From 08ca41a4638425100cba5c860398d07ddc1e837e Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Thu, 4 Dec 2025 06:02:03 -0800 Subject: [PATCH 1/6] refactor helpers Signed-off-by: Harper, Jason M --- internal/report/accelerator.go | 151 ++ internal/report/accelerator_defs.go | 61 - ...elpers_benchmarking.go => benchmarking.go} | 0 .../{table_helpers_cache.go => cache.go} | 0 ...le_helpers_cache_test.go => cache_test.go} | 0 internal/report/cpu.go | 249 +++ internal/report/cpu_test.go | 267 +++ .../report/{table_helpers_dimm.go => dimm.go} | 0 ...able_helpers_frequency.go => frequency.go} | 0 internal/report/frequency_test.go | 209 +++ internal/report/gpu.go | 305 ++++ internal/report/gpu_defs.go | 125 -- internal/report/isa.go | 63 + internal/report/nic.go | 258 +++ internal/report/nic_test.go | 627 +++++++ internal/report/power.go | 258 +++ .../{prefetcher_defs.go => prefetcher.go} | 116 +- internal/report/security.go | 48 + .../{table_helpers_stacks.go => stacks.go} | 135 ++ internal/report/stacks_test.go | 157 ++ internal/report/storage.go | 143 ++ internal/report/system.go | 99 + internal/report/table_helpers.go | 1589 +---------------- .../table_helpers_nic_integration_test.go | 204 --- internal/report/table_helpers_nic_test.go | 104 -- internal/report/table_helpers_test.go | 1014 ----------- ...able_helpers_turbostat.go => turbostat.go} | 0 ...rs_turbostat_test.go => turbostat_test.go} | 0 28 files changed, 3083 insertions(+), 3099 deletions(-) create mode 100644 internal/report/accelerator.go delete mode 100644 internal/report/accelerator_defs.go rename internal/report/{table_helpers_benchmarking.go => benchmarking.go} (100%) rename internal/report/{table_helpers_cache.go => cache.go} (100%) rename internal/report/{table_helpers_cache_test.go => cache_test.go} (100%) create mode 100644 internal/report/cpu.go create mode 100644 internal/report/cpu_test.go rename internal/report/{table_helpers_dimm.go => dimm.go} (100%) rename internal/report/{table_helpers_frequency.go => frequency.go} (100%) create mode 100644 internal/report/frequency_test.go create mode 100644 internal/report/gpu.go delete mode 100644 internal/report/gpu_defs.go create mode 100644 internal/report/isa.go create mode 100644 internal/report/nic.go create mode 100644 internal/report/nic_test.go create mode 100644 internal/report/power.go rename internal/report/{prefetcher_defs.go => prefetcher.go} (56%) create mode 100644 internal/report/security.go rename internal/report/{table_helpers_stacks.go => stacks.go} (50%) create mode 100644 internal/report/stacks_test.go create mode 100644 internal/report/storage.go create mode 100644 internal/report/system.go delete mode 100644 internal/report/table_helpers_nic_integration_test.go delete mode 100644 internal/report/table_helpers_nic_test.go delete mode 100644 internal/report/table_helpers_test.go rename internal/report/{table_helpers_turbostat.go => turbostat.go} (100%) rename internal/report/{table_helpers_turbostat_test.go => turbostat_test.go} (100%) diff --git a/internal/report/accelerator.go b/internal/report/accelerator.go new file mode 100644 index 00000000..f9ca55b0 --- /dev/null +++ b/internal/report/accelerator.go @@ -0,0 +1,151 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "fmt" + "regexp" + "strings" + + "perfspect/internal/script" +) + +// Intel Accelerators (sorted by devid) +// references: +// https://pci-ids.ucw.cz/read/PC/8086 + +type AcceleratorDefinition struct { + MfgID string + DevID string + Name string + FullName string + Description string +} + +var acceleratorDefinitions = []AcceleratorDefinition{ + { + MfgID: "8086", + DevID: "(2710|2714)", + Name: "DLB", + FullName: "Intel Dynamic Load Balancer", + Description: "hardware managed system of queues and arbiters connecting producers and consumers", + }, + { + MfgID: "8086", + DevID: "B25", + Name: "DSA", + FullName: "Intel Data Streaming Accelerator", + Description: "a high-performance data copy and transformation accelerator", + }, + { + MfgID: "8086", + DevID: "CFE", + Name: "IAA", + FullName: "Intel Analytics Accelerator", + Description: "accelerates compression and decompression for big data applications and in-memory analytic databases", + }, + { + MfgID: "8086", + DevID: "(4940|4942|4944)", + Name: "QAT (on CPU)", + FullName: "Intel Quick Assist Technology", + Description: "accelerates data encryption and compression for applications from networking to enterprise, cloud to storage, and content delivery to database", + }, + { + MfgID: "8086", + DevID: "37C8", + Name: "QAT (on chipset)", + FullName: "Intel Quick Assist Technology", + Description: "accelerates data encryption and compression for applications from networking to enterprise, cloud to storage, and content delivery to database", + }, + { + MfgID: "8086", + DevID: "57C2", + Name: "vRAN Boost", + FullName: "Intel vRAN Boost", + Description: "accelerates vRAN workloads", + }, +} + +func acceleratorNames() []string { + var names []string + for _, accel := range acceleratorDefinitions { + names = append(names, accel.Name) + } + return names +} + +func acceleratorCountsFromOutput(outputs map[string]script.ScriptOutput) []string { + var counts []string + lshw := outputs[script.LshwScriptName].Stdout + for _, accel := range acceleratorDefinitions { + regex := fmt.Sprintf("%s:%s", accel.MfgID, accel.DevID) + re := regexp.MustCompile(regex) + count := len(re.FindAllString(lshw, -1)) + counts = append(counts, fmt.Sprintf("%d", count)) + } + return counts +} + +func acceleratorWorkQueuesFromOutput(outputs map[string]script.ScriptOutput) []string { + var queues []string + for _, accel := range acceleratorDefinitions { + if accel.Name == "IAA" || accel.Name == "DSA" { + var scriptName string + if accel.Name == "IAA" { + scriptName = script.IaaDevicesScriptName + } else { + scriptName = script.DsaDevicesScriptName + } + devices := outputs[scriptName].Stdout + lines := strings.Split(devices, "\n") + // get non-empty lines + var nonEmptyLines []string + for _, line := range lines { + if strings.TrimSpace(line) != "" { + nonEmptyLines = append(nonEmptyLines, line) + } + } + if len(nonEmptyLines) == 0 { + queues = append(queues, "None") + } else { + queues = append(queues, strings.Join(nonEmptyLines, ", ")) + } + } else { + queues = append(queues, "N/A") + } + } + return queues +} + +func acceleratorFullNamesFromYaml() []string { + var fullNames []string + for _, accel := range acceleratorDefinitions { + fullNames = append(fullNames, accel.FullName) + } + return fullNames +} + +func acceleratorDescriptionsFromYaml() []string { + var descriptions []string + for _, accel := range acceleratorDefinitions { + descriptions = append(descriptions, accel.Description) + } + return descriptions +} + +func acceleratorSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + var summary []string + accelerators := acceleratorNames() + counts := acceleratorCountsFromOutput(outputs) + for i, name := range accelerators { + if strings.Contains(name, "chipset") { // skip "QAT (on chipset)" in this table + continue + } else if strings.Contains(name, "CPU") { // rename "QAT (on CPU) to simply "QAT" + name = "QAT" + } + summary = append(summary, fmt.Sprintf("%s %s [0]", name, counts[i])) + } + return strings.Join(summary, ", ") +} diff --git a/internal/report/accelerator_defs.go b/internal/report/accelerator_defs.go deleted file mode 100644 index 531f33c3..00000000 --- a/internal/report/accelerator_defs.go +++ /dev/null @@ -1,61 +0,0 @@ -package report - -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -// Intel Accelerators (sorted by devid) -// references: -// https://pci-ids.ucw.cz/read/PC/8086 - -type AcceleratorDefinition struct { - MfgID string - DevID string - Name string - FullName string - Description string -} - -var acceleratorDefinitions = []AcceleratorDefinition{ - { - MfgID: "8086", - DevID: "(2710|2714)", - Name: "DLB", - FullName: "Intel Dynamic Load Balancer", - Description: "hardware managed system of queues and arbiters connecting producers and consumers", - }, - { - MfgID: "8086", - DevID: "B25", - Name: "DSA", - FullName: "Intel Data Streaming Accelerator", - Description: "a high-performance data copy and transformation accelerator", - }, - { - MfgID: "8086", - DevID: "CFE", - Name: "IAA", - FullName: "Intel Analytics Accelerator", - Description: "accelerates compression and decompression for big data applications and in-memory analytic databases", - }, - { - MfgID: "8086", - DevID: "(4940|4942|4944)", - Name: "QAT (on CPU)", - FullName: "Intel Quick Assist Technology", - Description: "accelerates data encryption and compression for applications from networking to enterprise, cloud to storage, and content delivery to database", - }, - { - MfgID: "8086", - DevID: "37C8", - Name: "QAT (on chipset)", - FullName: "Intel Quick Assist Technology", - Description: "accelerates data encryption and compression for applications from networking to enterprise, cloud to storage, and content delivery to database", - }, - { - MfgID: "8086", - DevID: "57C2", - Name: "vRAN Boost", - FullName: "Intel vRAN Boost", - Description: "accelerates vRAN workloads", - }, -} diff --git a/internal/report/table_helpers_benchmarking.go b/internal/report/benchmarking.go similarity index 100% rename from internal/report/table_helpers_benchmarking.go rename to internal/report/benchmarking.go diff --git a/internal/report/table_helpers_cache.go b/internal/report/cache.go similarity index 100% rename from internal/report/table_helpers_cache.go rename to internal/report/cache.go diff --git a/internal/report/table_helpers_cache_test.go b/internal/report/cache_test.go similarity index 100% rename from internal/report/table_helpers_cache_test.go rename to internal/report/cache_test.go diff --git a/internal/report/cpu.go b/internal/report/cpu.go new file mode 100644 index 00000000..d1943119 --- /dev/null +++ b/internal/report/cpu.go @@ -0,0 +1,249 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "fmt" + "log/slog" + "strconv" + "strings" + + "perfspect/internal/cpus" + "perfspect/internal/script" + "perfspect/internal/util" +) + +// UarchFromOutput returns the architecture of the CPU that matches family, model, stepping, +// capid4, and devices information from the output or an empty string, if no match is found. +func UarchFromOutput(outputs map[string]script.ScriptOutput) string { + family := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) + model := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) + stepping := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) + capid4 := valFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) + devices := valFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) + cpu, err := cpus.GetCPUExtended(family, model, stepping, capid4, devices) + if err == nil { + return cpu.MicroArchitecture + } + return "" +} + +func hyperthreadingFromOutput(outputs map[string]script.ScriptOutput) string { + family := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) + model := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) + stepping := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) + sockets := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) + coresPerSocket := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`) + cpuCount := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(.*:\s*(.+?)$`) + onlineCpus := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^On-line CPU\(s\) list:\s*(.+)$`) + threadsPerCore := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Thread\(s\) per core:\s*(.+)$`) + + numCPUs, err := strconv.Atoi(cpuCount) // logical CPUs + if err != nil { + slog.Error("error parsing cpus from lscpu") + return "" + } + onlineCpusList, err := util.SelectiveIntRangeToIntList(onlineCpus) // logical online CPUs + numOnlineCpus := len(onlineCpusList) + if err != nil { + slog.Error("error parsing online cpus from lscpu") + numOnlineCpus = 0 // set to 0 to indicate parsing failed, will use numCPUs instead + } + numThreadsPerCore, err := strconv.Atoi(threadsPerCore) // logical threads per core + if err != nil { + slog.Error("error parsing threads per core from lscpu") + numThreadsPerCore = 0 + } + numSockets, err := strconv.Atoi(sockets) + if err != nil { + slog.Error("error parsing sockets from lscpu") + return "" + } + numCoresPerSocket, err := strconv.Atoi(coresPerSocket) // physical cores + if err != nil { + slog.Error("error parsing cores per sockets from lscpu") + return "" + } + cpu, err := cpus.GetCPUExtended(family, model, stepping, "", "") + if err != nil { + return "" + } + if numOnlineCpus > 0 && numOnlineCpus < numCPUs { + // if online CPUs list is available, use it to determine the number of CPUs + // supersedes lscpu output of numCPUs which counts CPUs on the system, not online CPUs + numCPUs = numOnlineCpus + } + if cpu.LogicalThreadCount < 2 { + return "N/A" + } else if numThreadsPerCore == 1 { + // if threads per core is 1, hyperthreading is disabled + return "Disabled" + } else if numThreadsPerCore >= 2 { + // if threads per core is greater than or equal to 2, hyperthreading is enabled + return "Enabled" + } else if numCPUs > numCoresPerSocket*numSockets { + // if the threads per core attribute is not available, we can still check if hyperthreading is enabled + // by checking if the number of logical CPUs is greater than the number of physical cores + return "Enabled" + } else { + return "Disabled" + } +} + +func numaCPUListFromOutput(outputs map[string]script.ScriptOutput) string { + nodeCPUs := valsFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node[0-9] CPU\(.*:\s*(.+?)$`) + return strings.Join(nodeCPUs, " :: ") +} + +func ppinsFromOutput(outputs map[string]script.ScriptOutput) string { + uniquePpins := []string{} + for line := range strings.SplitSeq(outputs[script.PPINName].Stdout, "\n") { + parts := strings.Split(line, ":") + if len(parts) < 2 { + continue + } + ppin := strings.TrimSpace(parts[1]) + found := false + for _, p := range uniquePpins { + if string(p) == ppin { + found = true + break + } + } + if !found && ppin != "" { + uniquePpins = append(uniquePpins, ppin) + } + } + return strings.Join(uniquePpins, ", ") +} + +func channelsFromOutput(outputs map[string]script.ScriptOutput) string { + family := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) + model := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) + stepping := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) + capid4 := valFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) + devices := valFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) + cpu, err := cpus.GetCPUExtended(family, model, stepping, capid4, devices) + if err != nil { + slog.Error("error getting CPU from CPUdb", slog.String("error", err.Error())) + return "" + } + return fmt.Sprintf("%d", cpu.MemoryChannelCount) +} + +func turboEnabledFromOutput(outputs map[string]script.ScriptOutput) string { + vendor := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) + switch vendor { + case cpus.IntelVendor: + val := valFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, `^Intel Turbo Boost Technology\s*= (.+?)$`) + if val == "true" { + return "Enabled" + } + if val == "false" { + return "Disabled" + } + return "" // unknown value + case cpus.AMDVendor: + val := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Frequency boost.*:\s*(.+?)$`) + if val != "" { + return val + " (AMD Frequency Boost)" + } + } + return "" +} + +func tdpFromOutput(outputs map[string]script.ScriptOutput) string { + msrHex := strings.TrimSpace(outputs[script.PackagePowerLimitName].Stdout) + msr, err := strconv.ParseInt(msrHex, 16, 0) + if err != nil || msr == 0 { + return "" + } + return fmt.Sprint(msr/8) + "W" +} + +func chaCountFromOutput(outputs map[string]script.ScriptOutput) string { + // output is the result of three rdmsr calls + // - client cha count + // - cha count + // - spr cha count + // stop when we find a non-zero value + // note: rdmsr writes to stderr on error so we will likely have fewer than 3 lines in stdout + for hexCount := range strings.SplitSeq(outputs[script.ChaCountScriptName].Stdout, "\n") { + if hexCount != "" && hexCount != "0" { + count, err := strconv.ParseInt(hexCount, 16, 64) + if err == nil { + return fmt.Sprintf("%d", count) + } + } + } + return "" +} + +func numaBalancingFromOutput(outputs map[string]script.ScriptOutput) string { + if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "1") { + return "Enabled" + } else if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "0") { + return "Disabled" + } + return "" +} + +func clusteringModeFromOutput(outputs map[string]script.ScriptOutput) string { + uarch := UarchFromOutput(outputs) + sockets := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) + nodes := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`) + if uarch == "" || sockets == "" || nodes == "" { + return "" + } + socketCount, err := strconv.Atoi(sockets) + if err != nil { + slog.Error("failed to parse socket count", slog.String("error", err.Error())) + return "" + } + nodeCount, err := strconv.Atoi(nodes) + if err != nil { + slog.Error("failed to parse node count", slog.String("error", err.Error())) + return "" + } + if nodeCount == 0 || socketCount == 0 { + slog.Error("node count or socket count is zero") + return "" + } + nodesPerSocket := nodeCount / socketCount + switch uarch { + case "GNR_X1": + return "All2All" + case "GNR_X2": + switch nodesPerSocket { + case 1: + return "UMA 4 (Quad)" + case 2: + return "SNC 2" + } + case "GNR_X3": + switch nodesPerSocket { + case 1: + return "UMA 6 (Hex)" + case 3: + return "SNC 3" + } + case "SRF_SP": + return "UMA 2 (Hemi)" + case "SRF_AP": + switch nodesPerSocket { + case 1: + return "UMA 4 (Quad)" + case 2: + return "SNC 2" + } + case "CWF": + switch nodesPerSocket { + case 1: + return "UMA 6 (Hex)" + case 3: + return "SNC 3" + } + } + return "" +} diff --git a/internal/report/cpu_test.go b/internal/report/cpu_test.go new file mode 100644 index 00000000..78b4d281 --- /dev/null +++ b/internal/report/cpu_test.go @@ -0,0 +1,267 @@ +package report + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "perfspect/internal/script" + "testing" +) + +func TestHyperthreadingFromOutput(t *testing.T) { + tests := []struct { + name string + lscpuOutput string + wantResult string + }{ + { + name: "Hyperthreading enabled - 2 threads per core", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 8 +CPU(s): 16 +Thread(s) per core: 2 +On-line CPU(s) list: 0-15 +`, + wantResult: "Enabled", + }, + { + name: "Hyperthreading disabled - 1 thread per core", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 8 +CPU(s): 8 +Thread(s) per core: 1 +On-line CPU(s) list: 0-7 +`, + wantResult: "Disabled", + }, + { + name: "Hyperthreading enabled - detected by CPU count vs core count", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 2 +Core(s) per socket: 8 +CPU(s): 32 +On-line CPU(s) list: 0-31 +`, + wantResult: "Enabled", + }, + { + name: "Hyperthreading disabled - CPU count equals core count", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 2 +Core(s) per socket: 8 +CPU(s): 16 +On-line CPU(s) list: 0-15 +`, + wantResult: "Disabled", + }, + { + name: "Online CPUs less than total CPUs - use online count", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 8 +CPU(s): 16 +Thread(s) per core: 2 +On-line CPU(s) list: 0-7 +`, + wantResult: "Enabled", + }, + { + name: "Missing threads per core - fallback to CPU vs core comparison", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 8 +CPU(s): 16 +On-line CPU(s) list: 0-15 +`, + wantResult: "Enabled", + }, + { + name: "Error parsing CPU count", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 8 +CPU(s): invalid +Thread(s) per core: 2 +On-line CPU(s) list: 0-15 +`, + wantResult: "", + }, + { + name: "Error parsing socket count", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): invalid +Core(s) per socket: 8 +CPU(s): 16 +Thread(s) per core: 2 +On-line CPU(s) list: 0-15 +`, + wantResult: "", + }, + { + name: "Error parsing cores per socket", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: invalid +CPU(s): 16 +Thread(s) per core: 2 +On-line CPU(s) list: 0-15 +`, + wantResult: "", + }, + { + name: "Invalid online CPU list - should continue with total CPU count", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 8 +CPU(s): 16 +Thread(s) per core: 2 +On-line CPU(s) list: invalid-range +`, + wantResult: "Enabled", + }, + { + name: "Single core CPU - disabled result", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 1 +CPU(s): 1 +Thread(s) per core: 1 +On-line CPU(s) list: 0 +`, + wantResult: "Disabled", + }, + { + name: "4 threads per core - enabled", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 8 +CPU(s): 32 +Thread(s) per core: 4 +On-line CPU(s) list: 0-31 +`, + wantResult: "Enabled", + }, + { + name: "Missing CPU family - getCPUExtended will fail", + lscpuOutput: ` +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 8 +CPU(s): 16 +Thread(s) per core: 2 +On-line CPU(s) list: 0-15 +`, + wantResult: "", + }, + { + name: "Dual socket system with hyperthreading", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 2 +Core(s) per socket: 16 +CPU(s): 64 +Thread(s) per core: 2 +On-line CPU(s) list: 0-63 +`, + wantResult: "Enabled", + }, + { + name: "Quad socket system without hyperthreading", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 4 +Core(s) per socket: 12 +CPU(s): 48 +Thread(s) per core: 1 +On-line CPU(s) list: 0-47 +`, + wantResult: "Disabled", + }, + { + name: "Offlined cores with hyperthreading disabled and no threads per core", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 8 +CPU(s): 64 +On-line CPU(s) list: 0-7 +`, + wantResult: "Disabled", + }, + { + name: "Offlined cores with hyperthreading enabled and no threads per core", + lscpuOutput: ` +CPU family: 6 +Model: 143 +Stepping: 8 +Socket(s): 1 +Core(s) per socket: 8 +CPU(s): 64 +On-line CPU(s) list: 0-7,32-39 +`, + wantResult: "Enabled", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + outputs := map[string]script.ScriptOutput{ + script.LscpuScriptName: { + Stdout: tt.lscpuOutput, + Stderr: "", + Exitcode: 0, + }, + } + + result := hyperthreadingFromOutput(outputs) + if result != tt.wantResult { + t.Errorf("hyperthreadingFromOutput() = %q, want %q", result, tt.wantResult) + } + }) + } +} diff --git a/internal/report/table_helpers_dimm.go b/internal/report/dimm.go similarity index 100% rename from internal/report/table_helpers_dimm.go rename to internal/report/dimm.go diff --git a/internal/report/table_helpers_frequency.go b/internal/report/frequency.go similarity index 100% rename from internal/report/table_helpers_frequency.go rename to internal/report/frequency.go diff --git a/internal/report/frequency_test.go b/internal/report/frequency_test.go new file mode 100644 index 00000000..8b6328d2 --- /dev/null +++ b/internal/report/frequency_test.go @@ -0,0 +1,209 @@ +package report + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "reflect" + "testing" +) + +func TestGetFrequenciesFromMSR(t *testing.T) { + tests := []struct { + name string + msr string + want []int + expectErr bool + }{ + { + name: "Valid MSR with multiple frequencies", + msr: "0x1A2B3C4D", + want: []int{0x4D, 0x3C, 0x2B, 0x1A}, + expectErr: false, + }, + { + name: "Valid MSR with single frequency", + msr: "0x1A", + want: []int{0x1A}, + expectErr: false, + }, + { + name: "Empty MSR string", + msr: "", + want: nil, + expectErr: true, + }, + { + name: "Invalid MSR string", + msr: "invalid_hex", + want: nil, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := getFrequenciesFromHex(tt.msr) + if (err != nil) != tt.expectErr { + t.Errorf("getFrequenciesFromMSR() error = %v, expectErr %v", err, tt.expectErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("getFrequenciesFromMSR() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetBucketSizesFromMSR(t *testing.T) { + tests := []struct { + name string + msr string + want []int + expectErr bool + }{ + { + name: "Valid MSR with 8 bucket sizes", + msr: "0x0102030405060708", + want: []int{8, 7, 6, 5, 4, 3, 2, 1}, + expectErr: false, + }, + { + name: "Valid MSR with reversed order", + msr: "0x0807060504030201", + want: []int{1, 2, 3, 4, 5, 6, 7, 8}, + expectErr: false, + }, + { + name: "Invalid MSR string", + msr: "invalid_hex", + want: nil, + expectErr: true, + }, + { + name: "MSR with less than 8 bucket sizes", + msr: "0x01020304", + want: nil, + expectErr: true, + }, + { + name: "MSR with more than 8 bucket sizes", + msr: "0x010203040506070809", + want: nil, + expectErr: true, + }, + { + name: "Empty MSR string", + msr: "", + want: nil, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := getBucketSizesFromHex(tt.msr) + if (err != nil) != tt.expectErr { + t.Errorf("getBucketSizesFromMSR() error = %v, expectErr %v", err, tt.expectErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("getBucketSizesFromMSR() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestExpandTurboFrequencies(t *testing.T) { + tests := []struct { + name string + buckets [][]string + isa string + want []string + expectErr bool + }{ + { + name: "Valid input with single bucket", + buckets: [][]string{ + {"Cores", "SSE", "AVX2"}, + {"1-4", "3.5", "3.2"}, + }, + isa: "SSE", + want: []string{"3.5", "3.5", "3.5", "3.5"}, + expectErr: false, + }, + { + name: "Valid input with multiple buckets", + buckets: [][]string{ + {"Cores", "SSE", "AVX2"}, + {"1-2", "3.5", "3.2"}, + {"3-4", "3.6", "3.3"}, + }, + isa: "SSE", + want: []string{"3.5", "3.5", "3.6", "3.6"}, + expectErr: false, + }, + { + name: "ISA column not found", + buckets: [][]string{ + {"Cores", "SSE", "AVX2"}, + {"1-4", "3.5", "3.2"}, + }, + isa: "AVX512", + want: nil, + expectErr: true, + }, + { + name: "Empty buckets", + buckets: [][]string{ + {}, + }, + isa: "SSE", + want: nil, + expectErr: true, + }, + { + name: "Invalid bucket range", + buckets: [][]string{ + {"Cores", "SSE", "AVX2"}, + {"1-", "3.5", "3.2"}, + }, + isa: "SSE", + want: nil, + expectErr: true, + }, + { + name: "Empty frequency value", + buckets: [][]string{ + {"Cores", "SSE", "AVX2"}, + {"1-4", "", "3.2"}, + }, + isa: "SSE", + want: nil, + expectErr: true, + }, + { + name: "Whitespace in bucket range", + buckets: [][]string{ + {"Cores", "SSE", "AVX2"}, + {" 1-4 ", "3.5", "3.2"}, + }, + isa: "SSE", + want: []string{"3.5", "3.5", "3.5", "3.5"}, + expectErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := expandTurboFrequencies(tt.buckets, tt.isa) + if (err != nil) != tt.expectErr { + t.Errorf("expandTurboFrequencies() error = %v, expectErr %v", err, tt.expectErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("expandTurboFrequencies() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/report/gpu.go b/internal/report/gpu.go new file mode 100644 index 00000000..60cb632d --- /dev/null +++ b/internal/report/gpu.go @@ -0,0 +1,305 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "log/slog" + "regexp" + "sort" + "strconv" + "strings" + + "perfspect/internal/script" +) + +// Intel Discrete GPUs (sorted by devid) +// references: +// https://pci-ids.ucw.cz/read/PC/8086 +// https://dgpu-docs.intel.com/devices/hardware-table.html +// +// The devid field will be interpreted as a regular expression. + +type GPUDefinition struct { + Model string + MfgID string + DevID string +} + +var gpuDefinitions = []GPUDefinition{ + { + Model: "ATS-P", + MfgID: "8086", + DevID: "201", + }, + { + Model: "Ponte Vecchio 2T", + MfgID: "8086", + DevID: "BD0", + }, + { + Model: "Ponte Vecchio 1T", + MfgID: "8086", + DevID: "BD5", + }, + { + Model: "Intel® Iris® Xe MAX Graphics (DG1)", + MfgID: "8086", + DevID: "4905", + }, + { + Model: "Intel® Iris® Xe Pod (DG1)", + MfgID: "8086", + DevID: "4906", + }, + { + Model: "SG1", + MfgID: "8086", + DevID: "4907", + }, + { + Model: "Intel® Iris® Xe Graphics (DG1)", + MfgID: "8086", + DevID: "4908", + }, + { + Model: "Intel® Iris® Xe MAX 100 (DG1)", + MfgID: "8086", + DevID: "4909", + }, + { + Model: "DG2", + MfgID: "8086", + DevID: "(4F80|4F81|4F82)", + }, + { + Model: "Intel® Arc ™ A770M Graphics", + MfgID: "8086", + DevID: "5690", + }, + { + Model: "Intel® Arc ™ A730M Graphics (Alchemist)", + MfgID: "8086", + DevID: "5691", + }, + { + Model: "Intel® Arc ™ A550M Graphics (Alchemist)", + MfgID: "8086", + DevID: "5692", + }, + { + Model: "Intel® Arc ™ A370M Graphics (Alchemist)", + MfgID: "8086", + DevID: "5693", + }, + { + Model: "Intel® Arc ™ A350M Graphics (Alchemist)", + MfgID: "8086", + DevID: "5694", + }, + { + Model: "Intel® Arc ™ A770 Graphics", + MfgID: "8086", + DevID: "56A0", + }, + { + Model: "Intel® Arc ™ A750 Graphics (Alchemist)", + MfgID: "8086", + DevID: "56A1", + }, + { + Model: "Intel® Arc ™ A380 Graphics (Alchemist)", + MfgID: "8086", + DevID: "56A5", + }, + { + Model: "Intel® Arc ™ A310 Graphics (Alchemist)", + MfgID: "8086", + DevID: "56A6", + }, + { + Model: "Intel® Data Center GPU Flex 170", + MfgID: "8086", + DevID: "56C0", + }, + { + Model: "Intel® Data Center GPU Flex 140", + MfgID: "8086", + DevID: "56C1", + }, + { + Model: "Intel® Data Center GPU Flex 170V", + MfgID: "8086", + DevID: "56C2", + }, +} + +type GPU struct { + Manufacturer string + Model string + PCIID string +} + +func gpuInfoFromOutput(outputs map[string]script.ScriptOutput) []GPU { + gpus := []GPU{} + gpusLshw := valsArrayFromRegexSubmatch(outputs[script.LshwScriptName].Stdout, `^pci.*?\s+display\s+(\w+).*?\s+\[(\w+):(\w+)]$`) + idxMfgName := 0 + idxMfgID := 1 + idxDevID := 2 + for _, gpu := range gpusLshw { + // Find GPU in GPU defs, note the model + var model string + for _, intelGPU := range gpuDefinitions { + if gpu[idxMfgID] == intelGPU.MfgID { + model = intelGPU.Model + break + } + re := regexp.MustCompile(intelGPU.DevID) + if re.FindString(gpu[idxDevID]) != "" { + model = intelGPU.Model + break + } + } + if model == "" { + if gpu[idxMfgID] == "8086" { + model = "Unknown Intel" + } else { + model = "Unknown" + } + } + gpus = append(gpus, GPU{Manufacturer: gpu[idxMfgName], Model: model, PCIID: gpu[idxMfgID] + ":" + gpu[idxDevID]}) + } + return gpus +} + +type Gaudi struct { + ModuleID string + Microarchitecture string + SerialNumber string + BusID string + DriverVersion string + EROM string + CPLD string + SPI string + NUMA string +} + +func gaudiInfoFromOutput(outputs map[string]script.ScriptOutput) []Gaudi { + gaudis := []Gaudi{} + for i, line := range strings.Split(outputs[script.GaudiInfoScriptName].Stdout, "\n") { + if line == "" || i == 0 { // skip blank lines and header + continue + } + fields := strings.Split(line, ", ") + if len(fields) != 4 { + slog.Error("unexpected number of fields in gaudi info output", slog.String("line", line)) + continue + } + gaudis = append(gaudis, Gaudi{ModuleID: fields[0], SerialNumber: fields[1], BusID: fields[2], DriverVersion: fields[3]}) + } + // sort the gaudis by module ID + sort.Slice(gaudis, func(i, j int) bool { + return gaudis[i].ModuleID < gaudis[j].ModuleID + }) + // set microarchitecture (assumes same arch for all gaudi devices) + for i := range gaudis { + gaudis[i].Microarchitecture = strings.TrimSpace(outputs[script.GaudiArchitectureScriptName].Stdout) + } + // get NUMA affinity + numaAffinities := valsArrayFromRegexSubmatch(outputs[script.GaudiNumaScriptName].Stdout, `^(\d+)\s+(\d+)\s+$`) + if len(numaAffinities) != len(gaudis) { + slog.Error("number of gaudis in gaudi info and numa output do not match", slog.Int("gaudis", len(gaudis)), slog.Int("numaAffinities", len(numaAffinities))) + return nil + } + for i, numaAffinity := range numaAffinities { + gaudis[i].NUMA = numaAffinity[1] + } + // get firmware versions + reDevice := regexp.MustCompile(`^\[(\d+)] AIP \(accel\d+\) (.*)$`) + reErom := regexp.MustCompile(`\s+erom$`) + reCpld := regexp.MustCompile(`\s+cpld$`) + rePreboot := regexp.MustCompile(`\s+preboot$`) + reComponent := regexp.MustCompile(`^\s+component\s+:\s+hl-gaudi\d-(.*)-sec-\d+`) + reCpldComponent := regexp.MustCompile(`^\s+component\s+:\s+(0x[0-9a-fA-F]+\.[0-9a-fA-F]+)$`) + deviceIdx := -1 + state := -1 + for line := range strings.SplitSeq(outputs[script.GaudiFirmwareScriptName].Stdout, "\n") { + if line == "" { + continue + } + match := reDevice.FindStringSubmatch(line) + if match != nil { + var err error + deviceIdx, err = strconv.Atoi(match[1]) + if err != nil { + slog.Error("failed to parse device index", slog.String("deviceIdx", match[1])) + return nil + } + if deviceIdx >= len(gaudis) { + slog.Error("device index out of range", slog.Int("deviceIdx", deviceIdx), slog.Int("gaudis", len(gaudis))) + return nil + } + continue + } + if deviceIdx == -1 { + continue + } + if reErom.FindString(line) != "" { + state = 0 + continue + } + if reCpld.FindString(line) != "" { + state = 1 + continue + } + if rePreboot.FindString(line) != "" { + state = 2 + continue + } + if state != -1 { + switch state { + case 0: + match := reComponent.FindStringSubmatch(line) + if match != nil { + gaudis[deviceIdx].EROM = match[1] + } + case 1: + match := reCpldComponent.FindStringSubmatch(line) + if match != nil { + gaudis[deviceIdx].CPLD = match[1] + } + case 2: + match := reComponent.FindStringSubmatch(line) + if match != nil { + gaudis[deviceIdx].SPI = match[1] + } + } + state = -1 + } + } + return gaudis +} + +// return all PCI Devices of specified class +func getPCIDevices(class string, outputs map[string]script.ScriptOutput) (devices []map[string]string) { + device := make(map[string]string) + re := regexp.MustCompile(`^(\w+):\s+(.*)$`) + for line := range strings.SplitSeq(outputs[script.LspciVmmScriptName].Stdout, "\n") { + if line == "" { // end of device + if devClass, ok := device["Class"]; ok { + if devClass == class { + devices = append(devices, device) + } + } + device = make(map[string]string) + continue + } + match := re.FindStringSubmatch(line) + if len(match) > 0 { + key := match[1] + value := match[2] + device[key] = value + } + } + return +} diff --git a/internal/report/gpu_defs.go b/internal/report/gpu_defs.go deleted file mode 100644 index 3104d2bd..00000000 --- a/internal/report/gpu_defs.go +++ /dev/null @@ -1,125 +0,0 @@ -package report - -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -// Intel Discrete GPUs (sorted by devid) -// references: -// https://pci-ids.ucw.cz/read/PC/8086 -// https://dgpu-docs.intel.com/devices/hardware-table.html -// -// The devid field will be interpreted as a regular expression. - -type GPUDefinition struct { - Model string - MfgID string - DevID string -} - -var gpuDefinitions = []GPUDefinition{ - { - Model: "ATS-P", - MfgID: "8086", - DevID: "201", - }, - { - Model: "Ponte Vecchio 2T", - MfgID: "8086", - DevID: "BD0", - }, - { - Model: "Ponte Vecchio 1T", - MfgID: "8086", - DevID: "BD5", - }, - { - Model: "Intel® Iris® Xe MAX Graphics (DG1)", - MfgID: "8086", - DevID: "4905", - }, - { - Model: "Intel® Iris® Xe Pod (DG1)", - MfgID: "8086", - DevID: "4906", - }, - { - Model: "SG1", - MfgID: "8086", - DevID: "4907", - }, - { - Model: "Intel® Iris® Xe Graphics (DG1)", - MfgID: "8086", - DevID: "4908", - }, - { - Model: "Intel® Iris® Xe MAX 100 (DG1)", - MfgID: "8086", - DevID: "4909", - }, - { - Model: "DG2", - MfgID: "8086", - DevID: "(4F80|4F81|4F82)", - }, - { - Model: "Intel® Arc ™ A770M Graphics", - MfgID: "8086", - DevID: "5690", - }, - { - Model: "Intel® Arc ™ A730M Graphics (Alchemist)", - MfgID: "8086", - DevID: "5691", - }, - { - Model: "Intel® Arc ™ A550M Graphics (Alchemist)", - MfgID: "8086", - DevID: "5692", - }, - { - Model: "Intel® Arc ™ A370M Graphics (Alchemist)", - MfgID: "8086", - DevID: "5693", - }, - { - Model: "Intel® Arc ™ A350M Graphics (Alchemist)", - MfgID: "8086", - DevID: "5694", - }, - { - Model: "Intel® Arc ™ A770 Graphics", - MfgID: "8086", - DevID: "56A0", - }, - { - Model: "Intel® Arc ™ A750 Graphics (Alchemist)", - MfgID: "8086", - DevID: "56A1", - }, - { - Model: "Intel® Arc ™ A380 Graphics (Alchemist)", - MfgID: "8086", - DevID: "56A5", - }, - { - Model: "Intel® Arc ™ A310 Graphics (Alchemist)", - MfgID: "8086", - DevID: "56A6", - }, - { - Model: "Intel® Data Center GPU Flex 170", - MfgID: "8086", - DevID: "56C0", - }, - { - Model: "Intel® Data Center GPU Flex 140", - MfgID: "8086", - DevID: "56C1", - }, - { - Model: "Intel® Data Center GPU Flex 170V", - MfgID: "8086", - DevID: "56C2", - }, -} diff --git a/internal/report/isa.go b/internal/report/isa.go new file mode 100644 index 00000000..b40cee33 --- /dev/null +++ b/internal/report/isa.go @@ -0,0 +1,63 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "perfspect/internal/script" +) + +type ISA struct { + Name string + FullName string + CPUID string +} + +var isas = []ISA{ + {"AES", "Advanced Encryption Standard New Instructions (AES-NI)", "AES instruction"}, + {"AMX", "Advanced Matrix Extensions (AMX)", "AMX-BF16: tile bfloat16 support"}, + {"AMX-COMPLEX", "AMX-COMPLEX Instruction", "AMX-COMPLEX instructions"}, + {"AMX-FP16", "AMX-FP16 Instruction", "AMX-FP16: FP16 tile operations"}, + {"AVX-IFMA", "AVX-IFMA Instruction", "AVX-IFMA: integer fused multiply add"}, + {"AVX-NE-CONVERT", "AVX-NE-CONVERT Instruction", "AVX-NE-CONVERT instructions"}, + {"AVX-VNNI-INT8", "AVX-VNNI-INT8 Instruction", "AVX-VNNI-INT8 instructions"}, + {"AVX512F", "AVX-512 Foundation", "AVX512F: AVX-512 foundation instructions"}, + {"AVX512_BF16", "Vector Neural Network Instructions (AVX512_BF16)", "AVX512_BF16: bfloat16 instructions"}, + {"AVX512_FP16", "Advanced Vector Extensions (AVX512_FP16)", "AVX512_FP16: fp16 support"}, + {"AVX512_VNNI", "Vector Neural Network Instructions (AVX512_VNNI)", "AVX512_VNNI: neural network instructions"}, + {"CLDEMOTE", "Cache Line Demote (CLDEMOTE)", "CLDEMOTE supports cache line demote"}, + {"CMPCCXADD", "Compare and Add if Condition is Met (CMPCCXADD)", "CMPccXADD instructions"}, + {"ENQCMD", "Enqueue Command Instruction (ENQCMD)", "ENQCMD instruction"}, + {"MOVDIRI", "Move Doubleword as Direct Store (MOVDIRI)", "MOVDIRI instruction"}, + {"MOVDIR64B", "Move 64 Bytes as Direct Store (MOVDIR64B)", "MOVDIR64B instruction"}, + {"PREFETCHIT0/1", "PREFETCHIT0/1 Instruction", "PREFETCHIT0, PREFETCHIT1 instructions"}, + {"SERIALIZE", "SERIALIZE Instruction", "SERIALIZE instruction"}, + {"SHA_NI", "SHA1/SHA256 Instruction Extensions (SHA_NI)", "SHA instructions"}, + {"TSXLDTRK", "Transactional Synchronization Extensions (TSXLDTRK)", "TSXLDTRK: TSX suspend load addr tracking"}, + {"VAES", "Vector AES", "VAES instructions"}, + {"WAITPKG", "UMONITOR, UMWAIT, TPAUSE Instructions", "WAITPKG instructions"}, +} + +func isaFullNames() []string { + var names []string + for _, isa := range isas { + names = append(names, isa.FullName) + } + return names +} + +func yesIfTrue(val string) string { + if val == "true" { + return "Yes" + } + return "No" +} + +func isaSupportedFromOutput(outputs map[string]script.ScriptOutput) []string { + var supported []string + for _, isa := range isas { + oneSupported := yesIfTrue(valFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, isa.CPUID+`\s*= (.+?)$`)) + supported = append(supported, oneSupported) + } + return supported +} diff --git a/internal/report/nic.go b/internal/report/nic.go new file mode 100644 index 00000000..e0148023 --- /dev/null +++ b/internal/report/nic.go @@ -0,0 +1,258 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "fmt" + "math/big" + "sort" + "strconv" + "strings" + + "perfspect/internal/script" +) + +type nicInfo struct { + Name string + Vendor string + VendorID string + Model string + ModelID string + Speed string + Link string + Bus string + Driver string + DriverVersion string + FirmwareVersion string + MACAddress string + NUMANode string + CPUAffinity string + AdaptiveRX string + AdaptiveTX string + RxUsecs string + TxUsecs string + Card string + Port string + MTU string + IsVirtual bool + TXQueues string + RXQueues string + XPSCPUs map[string]string + RPSCPUs map[string]string +} + +func parseNicInfo(scriptOutput string) []nicInfo { + var nics []nicInfo + for nicOutput := range strings.SplitSeq(scriptOutput, "----------------------------------------") { + if strings.TrimSpace(nicOutput) == "" { + continue + } + var nic nicInfo + nic.XPSCPUs = make(map[string]string) + nic.RPSCPUs = make(map[string]string) + // Map of prefixes to field pointers + fieldMap := map[string]*string{ + "Interface: ": &nic.Name, + "Vendor: ": &nic.Vendor, + "Vendor ID: ": &nic.VendorID, + "Model: ": &nic.Model, + "Model ID: ": &nic.ModelID, + "Speed: ": &nic.Speed, + "Link detected: ": &nic.Link, + "bus-info: ": &nic.Bus, + "driver: ": &nic.Driver, + "version: ": &nic.DriverVersion, + "firmware-version: ": &nic.FirmwareVersion, + "MAC Address: ": &nic.MACAddress, + "NUMA Node: ": &nic.NUMANode, + "CPU Affinity: ": &nic.CPUAffinity, + "rx-usecs: ": &nic.RxUsecs, + "tx-usecs: ": &nic.TxUsecs, + "MTU: ": &nic.MTU, + "TX Queues: ": &nic.TXQueues, + "RX Queues: ": &nic.RXQueues, + } + for line := range strings.SplitSeq(nicOutput, "\n") { + line = strings.TrimSpace(line) + // Special parsing for "Adaptive RX: off TX: off" format + if strings.HasPrefix(line, "Adaptive RX: ") { + parts := strings.Split(line, "TX: ") + if len(parts) == 2 { + nic.AdaptiveRX = strings.TrimSpace(strings.TrimPrefix(parts[0], "Adaptive RX: ")) + nic.AdaptiveTX = strings.TrimSpace(parts[1]) + } + continue + } + // Check if this is a virtual function + if value, ok := strings.CutPrefix(line, "Virtual Function: "); ok { + nic.IsVirtual = (strings.TrimSpace(value) == "yes") + continue + } + // Special parsing for xps_cpus and rps_cpus + if strings.HasPrefix(line, "xps_cpus tx-") { + parts := strings.SplitN(line, ": ", 2) + if len(parts) == 2 { + queue := strings.TrimPrefix(parts[0], "xps_cpus ") + nic.XPSCPUs[queue] = hexBitmapToCPUList(parts[1]) + } + continue + } + if strings.HasPrefix(line, "rps_cpus rx-") { + parts := strings.SplitN(line, ": ", 2) + if len(parts) == 2 { + queue := strings.TrimPrefix(parts[0], "rps_cpus ") + nic.RPSCPUs[queue] = hexBitmapToCPUList(parts[1]) + } + continue + } + for prefix, fieldPtr := range fieldMap { + if after, ok := strings.CutPrefix(line, prefix); ok { + *fieldPtr = after + break + } + } + } + // special case for model as it sometimes has additional information in parentheses + nic.Model = strings.TrimSpace(strings.Split(nic.Model, "(")[0]) + nics = append(nics, nic) + } + // Assign card and port information + assignCardAndPort(nics) + return nics +} + +func hexBitmapToCPUList(hexBitmap string) string { + if hexBitmap == "" { + return "" + } + + // Remove commas to form a single continuous hex string. + // This assumes the comma-separated parts are in big-endian order. + fullHexBitmap := strings.ReplaceAll(hexBitmap, ",", "") + + i := new(big.Int) + // The string is a hex string, so the base is 16. + if _, success := i.SetString(fullHexBitmap, 16); !success { + // If parsing fails, it might not be a hex string. Return as is. + return hexBitmap + } + + var cpus []string + // Iterate through the bits of the big integer. + for bit := 0; bit < i.BitLen(); bit++ { + if i.Bit(bit) == 1 { + cpus = append(cpus, fmt.Sprintf("%d", bit)) + } + } + if len(cpus) == 0 { + return "" + } + return strings.Join(cpus, ",") +} + +// assignCardAndPort assigns card and port numbers to NICs based on their PCI addresses +func assignCardAndPort(nics []nicInfo) { + if len(nics) == 0 { + return + } + + // Map to store card identifiers (domain:bus:device) to card numbers + cardMap := make(map[string]int) + // Map to track ports within each card + portMap := make(map[string][]int) // card identifier -> list of indices in nics slice + cardCounter := 1 + + // First pass: identify cards and group NICs by card + for i := range nics { + if nics[i].Bus == "" { + continue + } + // PCI address format: domain:bus:device.function (e.g., 0000:32:00.0) + // Extract domain:bus:device as the card identifier + parts := strings.Split(nics[i].Bus, ":") + if len(parts) != 3 { + continue + } + // Further split the last part to separate device from function + deviceFunc := strings.Split(parts[2], ".") + if len(deviceFunc) != 2 { + continue + } + // Card identifier is domain:bus:device + cardID := parts[0] + ":" + parts[1] + ":" + deviceFunc[0] + + // Assign card number if not already assigned + if _, exists := cardMap[cardID]; !exists { + cardMap[cardID] = cardCounter + cardCounter++ + } + // Add this NIC index to the card's port list + portMap[cardID] = append(portMap[cardID], i) + } + + // Second pass: assign card and port numbers + for cardID, nicIndices := range portMap { + cardNum := cardMap[cardID] + // Sort NICs within a card by their function number + sort.Slice(nicIndices, func(i, j int) bool { + // Extract function numbers + funcI := extractFunction(nics[nicIndices[i]].Bus) + funcJ := extractFunction(nics[nicIndices[j]].Bus) + return funcI < funcJ + }) + // Assign port numbers + for portNum, nicIdx := range nicIndices { + nics[nicIdx].Card = fmt.Sprintf("%d", cardNum) + nics[nicIdx].Port = fmt.Sprintf("%d", portNum+1) + } + } +} + +// extractFunction extracts the function number from a PCI address +func extractFunction(busAddr string) int { + parts := strings.Split(busAddr, ".") + if len(parts) != 2 { + return 0 + } + funcNum, err := strconv.Atoi(parts[1]) + if err != nil { + return 0 + } + return funcNum +} + +func nicIRQMappingsFromOutput(outputs map[string]script.ScriptOutput) [][]string { + nics := parseNicInfo(outputs[script.NicInfoScriptName].Stdout) + if len(nics) == 0 { + return nil + } + nicIRQMappings := [][]string{} + for _, nic := range nics { + if nic.CPUAffinity == "" { + continue // skip NICs without CPU affinity + } + affinities := strings.Split(strings.TrimSuffix(nic.CPUAffinity, ";"), ";") + nicIRQMappings = append(nicIRQMappings, []string{nic.Name, strings.Join(affinities, " | ")}) + } + return nicIRQMappings +} + +func nicSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + nics := parseNicInfo(outputs[script.NicInfoScriptName].Stdout) + if len(nics) == 0 { + return "N/A" + } + modelCount := make(map[string]int) + for _, nic := range nics { + modelCount[nic.Model]++ + } + var summary []string + for model, count := range modelCount { + if model == "" { + model = "Unknown NIC" + } + summary = append(summary, fmt.Sprintf("%dx %s", count, model)) + } + return strings.Join(summary, ", ") +} diff --git a/internal/report/nic_test.go b/internal/report/nic_test.go new file mode 100644 index 00000000..19f2998f --- /dev/null +++ b/internal/report/nic_test.go @@ -0,0 +1,627 @@ +package report + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "perfspect/internal/script" + "testing" +) + +func TestAssignCardAndPort(t *testing.T) { + tests := []struct { + name string + nics []nicInfo + expected map[string]string // map of NIC name to expected "Card / Port" + }{ + { + name: "Two cards with two ports each", + nics: []nicInfo{ + {Name: "eth2", Bus: "0000:32:00.0"}, + {Name: "eth3", Bus: "0000:32:00.1"}, + {Name: "eth0", Bus: "0000:c0:00.0"}, + {Name: "eth1", Bus: "0000:c0:00.1"}, + }, + expected: map[string]string{ + "eth2": "1 / 1", + "eth3": "1 / 2", + "eth0": "2 / 1", + "eth1": "2 / 2", + }, + }, + { + name: "Single card with four ports", + nics: []nicInfo{ + {Name: "eth0", Bus: "0000:19:00.0"}, + {Name: "eth1", Bus: "0000:19:00.1"}, + {Name: "eth2", Bus: "0000:19:00.2"}, + {Name: "eth3", Bus: "0000:19:00.3"}, + }, + expected: map[string]string{ + "eth0": "1 / 1", + "eth1": "1 / 2", + "eth2": "1 / 3", + "eth3": "1 / 4", + }, + }, + { + name: "Three different cards", + nics: []nicInfo{ + {Name: "eth0", Bus: "0000:19:00.0"}, + {Name: "eth1", Bus: "0000:1a:00.0"}, + {Name: "eth2", Bus: "0000:1b:00.0"}, + }, + expected: map[string]string{ + "eth0": "1 / 1", + "eth1": "2 / 1", + "eth2": "3 / 1", + }, + }, + { + name: "Empty bus address should not assign card/port", + nics: []nicInfo{ + {Name: "eth0", Bus: ""}, + }, + expected: map[string]string{ + "eth0": " / ", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assignCardAndPort(tt.nics) + for _, nic := range tt.nics { + expected := tt.expected[nic.Name] + actual := nic.Card + " / " + nic.Port + if actual != expected { + t.Errorf("NIC %s: expected %q, got %q", nic.Name, expected, actual) + } + } + }) + } +} + +func TestExtractFunction(t *testing.T) { + tests := []struct { + busAddr string + expected int + }{ + {"0000:32:00.0", 0}, + {"0000:32:00.1", 1}, + {"0000:32:00.3", 3}, + {"invalid", 0}, + {"", 0}, + } + + for _, tt := range tests { + t.Run(tt.busAddr, func(t *testing.T) { + result := extractFunction(tt.busAddr) + if result != tt.expected { + t.Errorf("expected %d, got %d", tt.expected, result) + } + }) + } +} + +func TestParseNicInfoWithCardPort(t *testing.T) { + // Sample output simulating the scenario from the issue + sampleOutput := `Interface: eth2 +Vendor ID: 8086 +Model ID: 1593 +Vendor: Intel Corporation +Model: Ethernet Controller 10G X550T +Speed: 1000Mb/s +Link detected: yes +bus-info: 0000:32:00.0 +driver: ixgbe +version: 5.1.0-k +firmware-version: 0x800009e0 +MAC Address: aa:bb:cc:dd:ee:00 +NUMA Node: 0 +CPU Affinity: +IRQ Balance: Enabled +rx-usecs: 1 +tx-usecs: 1 +Adaptive RX: off TX: off +---------------------------------------- +Interface: eth3 +Vendor ID: 8086 +Model ID: 1593 +Vendor: Intel Corporation +Model: Ethernet Controller 10G X550T +Speed: Unknown! +Link detected: no +bus-info: 0000:32:00.1 +driver: ixgbe +version: 5.1.0-k +firmware-version: 0x800009e0 +MAC Address: aa:bb:cc:dd:ee:01 +NUMA Node: 0 +CPU Affinity: +IRQ Balance: Enabled +rx-usecs: 1 +tx-usecs: 1 +Adaptive RX: off TX: off +---------------------------------------- +Interface: eth0 +Vendor ID: 8086 +Model ID: 37d2 +Vendor: Intel Corporation +Model: Ethernet Controller E810-C for QSFP +Speed: 100000Mb/s +Link detected: yes +bus-info: 0000:c0:00.0 +driver: ice +version: K_5.19.0-41-generic_5.1.9 +firmware-version: 4.40 0x8001c967 1.3534.0 +MAC Address: aa:bb:cc:dd:ee:82 +NUMA Node: 1 +CPU Affinity: +IRQ Balance: Enabled +rx-usecs: 1 +tx-usecs: 1 +Adaptive RX: off TX: off +---------------------------------------- +Interface: eth1 +Vendor ID: 8086 +Model ID: 37d2 +Vendor: Intel Corporation +Model: Ethernet Controller E810-C for QSFP +Speed: 100000Mb/s +Link detected: yes +bus-info: 0000:c0:00.1 +driver: ice +version: K_5.19.0-41-generic_5.1.9 +firmware-version: 4.40 0x8001c967 1.3534.0 +MAC Address: aa:bb:cc:dd:ee:83 +NUMA Node: 1 +CPU Affinity: +IRQ Balance: Enabled +rx-usecs: 1 +tx-usecs: 1 +Adaptive RX: off TX: off +----------------------------------------` + + nics := parseNicInfo(sampleOutput) + + if len(nics) != 4 { + t.Fatalf("Expected 4 NICs, got %d", len(nics)) + } + + // Expected card/port assignments based on the issue example + expectedCardPort := map[string]struct { + card string + port string + }{ + "eth2": {"1", "1"}, // 0000:32:00.0 + "eth3": {"1", "2"}, // 0000:32:00.1 + "eth0": {"2", "1"}, // 0000:c0:00.0 + "eth1": {"2", "2"}, // 0000:c0:00.1 + } + + for _, nic := range nics { + expected, exists := expectedCardPort[nic.Name] + if !exists { + t.Errorf("Unexpected NIC name: %s", nic.Name) + continue + } + if nic.Card != expected.card { + t.Errorf("NIC %s: expected card %s, got %s", nic.Name, expected.card, nic.Card) + } + if nic.Port != expected.port { + t.Errorf("NIC %s: expected port %s, got %s", nic.Name, expected.port, nic.Port) + } + } +} + +func TestNicTableValuesWithCardPort(t *testing.T) { + // Sample output simulating the scenario from the issue + sampleOutput := `Interface: eth2 +bus-info: 0000:32:00.0 +Vendor: Intel Corporation +Model: Ethernet Controller 10G X550T +Speed: 1000Mb/s +Link detected: yes +---------------------------------------- +Interface: eth3 +bus-info: 0000:32:00.1 +Vendor: Intel Corporation +Model: Ethernet Controller 10G X550T +Speed: Unknown! +Link detected: no +---------------------------------------- +Interface: eth0 +bus-info: 0000:c0:00.0 +Vendor: Intel Corporation +Model: Ethernet Controller E810-C for QSFP +Speed: 100000Mb/s +Link detected: yes +---------------------------------------- +Interface: eth1 +bus-info: 0000:c0:00.1 +Vendor: Intel Corporation +Model: Ethernet Controller E810-C for QSFP +Speed: 100000Mb/s +Link detected: yes +----------------------------------------` + + outputs := map[string]script.ScriptOutput{ + script.NicInfoScriptName: {Stdout: sampleOutput}, + } + + fields := nicTableValues(outputs) + + // Find the "Card / Port" field + var cardPortField Field + found := false + for _, field := range fields { + if field.Name == "Card / Port" { + cardPortField = field + found = true + break + } + } + + if !found { + t.Fatal("Card / Port field not found in NIC table") + } + + // Verify we have 4 entries + if len(cardPortField.Values) != 4 { + t.Fatalf("Expected 4 Card / Port values, got %d", len(cardPortField.Values)) + } + + // Find the Name field to match values + var nameField Field + for _, field := range fields { + if field.Name == "Name" { + nameField = field + break + } + } + + // Verify card/port assignments + expectedCardPort := map[string]string{ + "eth2": "1 / 1", + "eth3": "1 / 2", + "eth0": "2 / 1", + "eth1": "2 / 2", + } + + for i, name := range nameField.Values { + expected := expectedCardPort[name] + actual := cardPortField.Values[i] + if actual != expected { + t.Errorf("NIC %s: expected Card / Port %q, got %q", name, expected, actual) + } + } +} + +func TestParseNicInfo(t *testing.T) { + nics := parseNicInfo(nicinfo) + if len(nics) != 3 { + t.Errorf("expected 3 NICs, got %d", len(nics)) + } + + // Test first NIC + first := nics[0] + if first.Name != "ens7f0np0" { + t.Errorf("expected Name 'ens7f0np0', got '%s'", first.Name) + } + if first.Vendor != "Broadcom Inc. and subsidiaries" { + t.Errorf("expected Vendor 'Broadcom Inc. and subsidiaries', got '%s'", first.Vendor) + } + if first.Model == "" { + t.Errorf("expected non-empty Model") + } + if first.Speed != "1000Mb/s" { + t.Errorf("expected Speed '1000Mb/s', got '%s'", first.Speed) + } + if first.Link != "yes" { + t.Errorf("expected Link 'yes', got '%s'", first.Link) + } + if first.Bus != "0000:4c:00.0" { + t.Errorf("expected Bus '0000:4c:00.0', got '%s'", first.Bus) + } + if first.Driver != "bnxt_en" { + t.Errorf("expected Driver 'bnxt_en', got '%s'", first.Driver) + } + if first.DriverVersion == "" { + t.Errorf("expected non-empty DriverVersion") + } + if first.FirmwareVersion == "" { + t.Errorf("expected non-empty FirmwareVersion") + } + if first.MACAddress != "04:32:01:f3:e1:a4" { + t.Errorf("expected MACAddress '04:32:01:f3:e1:a4', got '%s'", first.MACAddress) + } + if first.NUMANode != "0" { + t.Errorf("expected NUMANode '0', got '%s'", first.NUMANode) + } + if first.CPUAffinity == "" { + t.Errorf("expected non-empty CPUAffinity") + } + if first.AdaptiveRX != "off" { + t.Errorf("expected AdaptiveRX 'off', got '%s'", first.AdaptiveRX) + } + if first.AdaptiveTX != "off" { + t.Errorf("expected AdaptiveTX 'off', got '%s'", first.AdaptiveTX) + } + if first.RxUsecs != "200" { + t.Errorf("expected RxUsecs '200', got '%s'", first.RxUsecs) + } + if first.TxUsecs != "150" { + t.Errorf("expected TxUsecs '150', got '%s'", first.TxUsecs) + } + if first.IsVirtual { + t.Errorf("expected IsVirtual to be false for first NIC") + } + + // Spot check second NIC + second := nics[1] + if second.Name != "ens7f1np1" { + t.Errorf("expected Name 'ens7f1np1', got '%s'", second.Name) + } + if second.Model != "BCM57416 NetXtreme-E Dual-Media 10G RDMA Ethernet Controller" { + t.Errorf("expected Model 'BCM57416 NetXtreme-E Dual-Media 10G RDMA Ethernet Controller', got '%s'", second.Model) + } + if second.Link != "no" { + t.Errorf("expected Link 'no', got '%s'", second.Link) + } + if second.AdaptiveRX != "on" { + t.Errorf("expected AdaptiveRX 'on', got '%s'", second.AdaptiveRX) + } + if second.AdaptiveTX != "on" { + t.Errorf("expected AdaptiveTX 'on', got '%s'", second.AdaptiveTX) + } + if second.RxUsecs != "100" { + t.Errorf("expected RxUsecs '100', got '%s'", second.RxUsecs) + } + if second.TxUsecs != "100" { + t.Errorf("expected TxUsecs '100', got '%s'", second.TxUsecs) + } + + // Spot check third NIC + third := nics[2] + if third.Name != "enx2aecf92702ac" { + t.Errorf("expected Name 'enx2aecf92702ac', got '%s'", third.Name) + } + if third.Vendor != "Netchip Technology, Inc." { + t.Errorf("expected Vendor 'Netchip Technology, Inc.', got '%s'", third.Vendor) + } +} + +func TestParseNicInfoWithVirtualFunction(t *testing.T) { + nicinfoWithVF := ` +Interface: eth0 +Vendor: Intel Corporation +Vendor ID: 8086 +Model: Ethernet Adaptive Virtual Function +Model ID: 1889 +Speed: 10000Mb/s +Link detected: yes +driver: iavf +version: 6.13.7-061307-generic +firmware-version: N/A +bus-info: 0000:c0:11.0 +MAC Address: 00:11:22:33:44:55 +NUMA Node: 1 +Virtual Function: yes +CPU Affinity: 100:0-63; +IRQ Balance: Enabled +Adaptive RX: on TX: on +rx-usecs: 100 +tx-usecs: 100 +---------------------------------------- +Interface: eth1 +Vendor: Intel Corporation +Vendor ID: 8086 +Model: Ethernet Controller E810-C +Model ID: 1592 +Speed: 25000Mb/s +Link detected: yes +driver: ice +version: 6.13.7-061307-generic +firmware-version: 4.20 +bus-info: 0000:c0:00.0 +MAC Address: aa:bb:cc:dd:ee:ff +NUMA Node: 1 +Virtual Function: no +CPU Affinity: 200:0-63; +IRQ Balance: Enabled +Adaptive RX: off TX: off +rx-usecs: 50 +tx-usecs: 50 +---------------------------------------- +` + nics := parseNicInfo(nicinfoWithVF) + if len(nics) != 2 { + t.Fatalf("expected 2 NICs, got %d", len(nics)) + } + + // Test virtual function + vf := nics[0] + if vf.Name != "eth0" { + t.Errorf("expected Name 'eth0', got '%s'", vf.Name) + } + if !vf.IsVirtual { + t.Errorf("expected IsVirtual to be true for eth0") + } + if vf.Model != "Ethernet Adaptive Virtual Function" { + t.Errorf("expected Model 'Ethernet Adaptive Virtual Function', got '%s'", vf.Model) + } + + // Test physical function + pf := nics[1] + if pf.Name != "eth1" { + t.Errorf("expected Name 'eth1', got '%s'", pf.Name) + } + if pf.IsVirtual { + t.Errorf("expected IsVirtual to be false for eth1") + } + if pf.Model != "Ethernet Controller E810-C" { + t.Errorf("expected Model 'Ethernet Controller E810-C', got '%s'", pf.Model) + } +} + +var nicinfo = ` +Interface: ens7f0np0 +Vendor: Broadcom Inc. and subsidiaries +Model: BCM57416 NetXtreme-E Dual-Media 10G RDMA Ethernet Controller (NetXtreme-E Dual-port 10GBASE-T Ethernet OCP 3.0 Adapter (BCM957416N4160C)) +Settings for ens7f0np0: + Supported ports: [ TP ] + Supported link modes: 1000baseT/Full + 10000baseT/Full + Supported pause frame use: Symmetric Receive-only + Supports auto-negotiation: Yes + Supported FEC modes: Not reported + Advertised link modes: 1000baseT/Full + 10000baseT/Full + Advertised pause frame use: No + Advertised auto-negotiation: Yes + Advertised FEC modes: Not reported + Speed: 1000Mb/s + Lanes: 1 + Duplex: Full + Auto-negotiation: on + Port: Twisted Pair + PHYAD: 12 + Transceiver: internal + MDI-X: Unknown + Supports Wake-on: g + Wake-on: g + Current message level: 0x00002081 (8321) + drv tx_err hw + Link detected: yes +driver: bnxt_en +version: 6.13.7-061307-generic +firmware-version: 227.0.134.0/pkg 227.1.111.0 +expansion-rom-version: +bus-info: 0000:4c:00.0 +supports-statistics: yes +supports-test: yes +supports-eeprom-access: yes +supports-register-dump: yes +supports-priv-flags: no +Coalesce parameters for ens7f0np0: +Adaptive RX: off TX: off +stats-block-usecs: 0 +sample-interval: 0 +pkt-rate-low: 0 +pkt-rate-high: 0 + +rx-usecs: 200 +rx-frames: 0 +rx-usecs-irq: 0 +rx-frames-irq: 0 + +tx-usecs: 150 +tx-frames: 0 +tx-usecs-irq: 0 +tx-frames-irq: 0 +MAC Address: 04:32:01:f3:e1:a4 +NUMA Node: 0 +Virtual Function: no +CPU Affinity: 124:0-143;125:0-143;126:0-143;127:0-143;128:0-143;129:0-143;130:0-143;131:0-143;132:0-143;133:0-143;134:0-143;135:0-143;136:0-143;137:0-143;138:0-143;139:0-143;140:0-143;141:0-143;142:0-143;143:0-143;144:0-143;145:0-143;146:0-143;147:0-143;148:0-143;149:0-143;150:0-143;151:0-143;152:0-143;153:0-143;154:0-143;155:0-143;156:0-143;157:0-143;158:0-143;159:0-143;160:0-143;161:0-143;162:0-143;163:0-143;164:0-143;165:0-143;166:0-143;167:0-143;168:0-143;169:0-143;170:0-143;171:0-143;172:0-143;173:0-143;174:0-143;175:0-143;176:0-143;177:0-143;178:0-143;179:0-143;180:0-143;181:0-143;182:0-143;184:0-143;185:0-143;186:0-143;187:0-143;188:0-143;189:0-143;190:0-143;191:0-143;192:0-143;193:0-143;194:0-143;195:0-143;196:0-143;197:0-143;198:0-143; +IRQ Balance: Disabled +---------------------------------------- +Interface: ens7f1np1 +Vendor: Broadcom Inc. and subsidiaries +Model: BCM57416 NetXtreme-E Dual-Media 10G RDMA Ethernet Controller (NetXtreme-E Dual-port 10GBASE-T Ethernet OCP 3.0 Adapter (BCM957416N4160C)) +Settings for ens7f1np1: + Supported ports: [ TP ] + Supported link modes: 1000baseT/Full + 10000baseT/Full + Supported pause frame use: Symmetric Receive-only + Supports auto-negotiation: Yes + Supported FEC modes: Not reported + Advertised link modes: 1000baseT/Full + 10000baseT/Full + Advertised pause frame use: Symmetric + Advertised auto-negotiation: Yes + Advertised FEC modes: Not reported + Speed: Unknown! + Duplex: Unknown! (255) + Auto-negotiation: on + Port: Twisted Pair + PHYAD: 13 + Transceiver: internal + MDI-X: Unknown + Supports Wake-on: g + Wake-on: g + Current message level: 0x00002081 (8321) + drv tx_err hw + Link detected: no +driver: bnxt_en +version: 6.13.7-061307-generic +firmware-version: 227.0.134.0/pkg 227.1.111.0 +expansion-rom-version: +bus-info: 0000:4c:00.1 +supports-statistics: yes +supports-test: yes +supports-eeprom-access: yes +supports-register-dump: yes +supports-priv-flags: no +Coalesce parameters for ens7f1np1: +Adaptive RX: on TX: on +stats-block-usecs: 0 +sample-interval: 0 +pkt-rate-low: 0 +pkt-rate-high: 0 + +rx-usecs: 100 +rx-frames: 0 +rx-usecs-irq: 0 +rx-frames-irq: 0 + +tx-usecs: 100 +tx-frames: 0 +tx-usecs-irq: 0 +tx-frames-irq: 0 +MAC Address: 04:32:01:f3:e1:a5 +NUMA Node: 0 +Virtual Function: no +CPU Affinity: 454:0-143;455:0-143;456:0-143;457:0-143;458:0-143;459:0-143;460:0-143;461:0-143;462:0-143;463:0-143;464:0-143;465:0-143;466:0-143;467:0-143;468:0-143;469:0-143;470:0-143;471:0-143;472:0-143;473:0-143;474:0-143;475:0-143;476:0-143;477:0-143;478:0-143;479:0-143;480:0-143;481:0-143;482:0-143;483:0-143;484:0-143;485:0-143;486:0-143;487:0-143;488:0-143;489:0-143;490:0-143;491:0-143;492:0-143;493:0-143;494:0-143;495:0-143;496:0-143;497:0-143;498:0-143;499:0-143;500:0-143;501:0-143;502:0-143;503:0-143;504:0-143;505:0-143;506:0-143;507:0-143;508:0-143;509:0-143;510:0-143;511:0-143;512:0-143;513:0-143;514:0-143;515:0-143;516:0-143;517:0-143;518:0-143;519:0-143;520:0-143;521:0-143;522:0-143;523:0-143;524:0-143;525:0-143;526:0-143;527:0-143; +IRQ Balance: Disabled +---------------------------------------- +Interface: enx2aecf92702ac +Vendor: Netchip Technology, Inc. +Model: Linux-USB Ethernet/RNDIS Gadget +Settings for enx2aecf92702ac: + Supported ports: [ ] + Supported link modes: Not reported + Supported pause frame use: No + Supports auto-negotiation: No + Supported FEC modes: Not reported + Advertised link modes: Not reported + Advertised pause frame use: No + Advertised auto-negotiation: No + Advertised FEC modes: Not reported + Speed: 425Mb/s + Duplex: Half + Auto-negotiation: off + Port: Twisted Pair + PHYAD: 0 + Transceiver: internal + MDI-X: Unknown + Current message level: 0x00000007 (7) + drv probe link + Link detected: yes +driver: cdc_ether +version: 6.13.7-061307-generic +firmware-version: CDC Ethernet Device +expansion-rom-version: +bus-info: usb-0000:2c:00.0-3.1 +supports-statistics: no +supports-test: no +supports-eeprom-access: no +supports-register-dump: no +supports-priv-flags: no +MAC Address: 2a:ec:f9:27:02:ac +NUMA Node: +Virtual Function: no +CPU Affinity: +IRQ Balance: Disabled +---------------------------------------- +` diff --git a/internal/report/power.go b/internal/report/power.go new file mode 100644 index 00000000..4d05a74e --- /dev/null +++ b/internal/report/power.go @@ -0,0 +1,258 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "encoding/csv" + "fmt" + "log/slog" + "strconv" + "strings" + + "perfspect/internal/script" +) + +func elcFieldValuesFromOutput(outputs map[string]script.ScriptOutput) (fieldValues []Field) { + if outputs[script.ElcScriptName].Stdout == "" { + return + } + r := csv.NewReader(strings.NewReader(outputs[script.ElcScriptName].Stdout)) + rows, err := r.ReadAll() + if err != nil { + return + } + if len(rows) < 2 { + return + } + // first row is headers + for fieldNamesIndex, fieldName := range rows[0] { + values := []string{} + // value rows + for _, row := range rows[1:] { + values = append(values, row[fieldNamesIndex]) + } + fieldValues = append(fieldValues, Field{Name: fieldName, Values: values}) + } + + // let's add an interpretation of the values in an additional column + values := []string{} + // value rows + for _, row := range rows[1:] { + var mode string + if row[2] == "IO" { + if row[5] == "0" && row[6] == "0" && row[7] == "0" { + mode = "Latency Optimized" + } else if row[5] == "800" && row[6] == "10" && row[7] == "94" { + mode = "Default" + } else { + mode = "Custom" + } + } else { // COMPUTE + switch row[5] { + case "0": + mode = "Latency Optimized" + case "1200": + mode = "Default" + default: + mode = "Custom" + } + } + values = append(values, mode) + } + fieldValues = append(fieldValues, Field{Name: "Mode", Values: values}) + return +} + +func elcSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + fieldValues := elcFieldValuesFromOutput(outputs) + if len(fieldValues) == 0 { + return "" + } + if len(fieldValues) < 10 { + return "" + } + if len(fieldValues[9].Values) == 0 { + return "" + } + summary := fieldValues[9].Values[0] + for _, value := range fieldValues[9].Values[1:] { + if value != summary { + return "mixed" + } + } + return summary +} + +// epbFromOutput gets EPB value from script outputs +func epbFromOutput(outputs map[string]script.ScriptOutput) string { + if outputs[script.EpbScriptName].Exitcode != 0 || len(outputs[script.EpbScriptName].Stdout) == 0 { + slog.Warn("EPB scripts failed or produced no output") + return "" + } + epb := strings.TrimSpace(outputs[script.EpbScriptName].Stdout) + msr, err := strconv.ParseInt(epb, 16, 0) + if err != nil { + slog.Error("failed to parse EPB value", slog.String("error", err.Error()), slog.String("epb", epb)) + return "" + } + return epbValToLabel(int(msr)) +} + +func epbValToLabel(msr int) string { + var val string + if msr >= 0 && msr <= 3 { + val = "Performance" + } else if msr >= 4 && msr <= 7 { + val = "Balanced Performance" + } else if msr >= 8 && msr <= 11 { + val = "Balanced Energy" + } else if msr >= 12 { + val = "Energy Efficient" + } + return fmt.Sprintf("%s (%d)", val, msr) +} + +func eppValToLabel(msr int) string { + var val string + if msr == 128 { + val = "Normal" + } else if msr < 128 && msr > 64 { + val = "Balanced Performance" + } else if msr <= 64 { + val = "Performance" + } else if msr > 128 && msr < 192 { + val = "Balanced Powersave" + } else { + val = "Powersave" + } + return fmt.Sprintf("%s (%d)", val, msr) +} + +// eppFromOutput gets EPP value from script outputs +// IF 0x774[42] is '1' AND 0x774[60] is '0' +// THEN +// +// get EPP from 0x772 (package) +// +// ELSE +// +// get EPP from 0x774 (per core) +func eppFromOutput(outputs map[string]script.ScriptOutput) string { + // if we couldn't get the EPP values, return empty string + if outputs[script.EppValidScriptName].Exitcode != 0 || len(outputs[script.EppValidScriptName].Stdout) == 0 || + outputs[script.EppPackageControlScriptName].Exitcode != 0 || len(outputs[script.EppPackageControlScriptName].Stdout) == 0 || + outputs[script.EppPackageScriptName].Exitcode != 0 || len(outputs[script.EppPackageScriptName].Stdout) == 0 { + slog.Warn("EPP scripts failed or produced no output") + return "" + } + // check if the epp valid bit is set and consistent across all cores + var eppValid string + for i, line := range strings.Split(outputs[script.EppValidScriptName].Stdout, "\n") { // MSR 0x774, bit 60 + if line == "" { + continue + } + currentEpbValid := strings.TrimSpace(strings.Split(line, ":")[1]) + if i == 0 { + eppValid = currentEpbValid + continue + } + if currentEpbValid != eppValid { + slog.Warn("EPP valid bit is inconsistent across cores") + return "inconsistent" + } + } + // check if epp package control bit is set and consistent across all cores + var eppPkgCtrl string + for i, line := range strings.Split(outputs[script.EppPackageControlScriptName].Stdout, "\n") { // MSR 0x774, bit 42 + if line == "" { + continue + } + currentEppPkgCtrl := strings.TrimSpace(strings.Split(line, ":")[1]) + if i == 0 { + eppPkgCtrl = currentEppPkgCtrl + continue + } + if currentEppPkgCtrl != eppPkgCtrl { + slog.Warn("EPP package control bit is inconsistent across cores") + return "inconsistent" + } + } + if eppPkgCtrl == "1" && eppValid == "0" { + eppPackage := strings.TrimSpace(outputs[script.EppPackageScriptName].Stdout) // MSR 0x772, bits 24-31 (package) + msr, err := strconv.ParseInt(eppPackage, 16, 0) + if err != nil { + slog.Error("failed to parse EPP package value", slog.String("error", err.Error()), slog.String("epp", eppPackage)) + return "" + } + return eppValToLabel(int(msr)) + } else { + var epp string + for i, line := range strings.Split(outputs[script.EppScriptName].Stdout, "\n") { // MSR 0x774, bits 24-31 (per-core) + if line == "" { + continue + } + currentEpp := strings.TrimSpace(strings.Split(line, ":")[1]) + if i == 0 { + epp = currentEpp + continue + } + if currentEpp != epp { + slog.Warn("EPP is inconsistent across cores") + return "inconsistent" + } + } + msr, err := strconv.ParseInt(epp, 16, 0) + if err != nil { + slog.Error("failed to parse EPP value", slog.String("error", err.Error()), slog.String("epp", epp)) + return "" + } + return eppValToLabel(int(msr)) + } +} + +type cstateInfo struct { + Name string + Status string +} + +func c6FromOutput(outputs map[string]script.ScriptOutput) string { + cstatesInfo := cstatesFromOutput(outputs) + if cstatesInfo == nil { + return "" + } + for _, cstateInfo := range cstatesInfo { + if cstateInfo.Name == "C6" { + return cstateInfo.Status + } + } + return "" +} + +func cstatesFromOutput(outputs map[string]script.ScriptOutput) []cstateInfo { + var cstatesInfo []cstateInfo + output := outputs[script.CstatesScriptName].Stdout + for line := range strings.SplitSeq(output, "\n") { + if line == "" { + continue + } + parts := strings.Split(line, ",") + if len(parts) != 2 { + return nil + } + cstatesInfo = append(cstatesInfo, cstateInfo{Name: parts[0], Status: parts[1]}) + } + return cstatesInfo +} + +func cstatesSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + cstatesInfo := cstatesFromOutput(outputs) + if cstatesInfo == nil { + return "" + } + summaryParts := []string{} + for _, cstateInfo := range cstatesInfo { + summaryParts = append(summaryParts, fmt.Sprintf("%s: %s", cstateInfo.Name, cstateInfo.Status)) + } + return strings.Join(summaryParts, ", ") +} diff --git a/internal/report/prefetcher_defs.go b/internal/report/prefetcher.go similarity index 56% rename from internal/report/prefetcher_defs.go rename to internal/report/prefetcher.go index b083d735..a1f9c3ef 100644 --- a/internal/report/prefetcher_defs.go +++ b/internal/report/prefetcher.go @@ -1,11 +1,17 @@ -package report - -import "fmt" - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -// prefetcher_defs.go +package report + +import ( + "fmt" + "log/slog" + "perfspect/internal/script" + "slices" + "strconv" + "strings" +) + // prefetchers are enabled when associated bit in msr is 0 type PrefetcherDefinition struct { @@ -131,3 +137,103 @@ func GetPrefetcherDefByName(name string) (PrefetcherDefinition, error) { func GetPrefetcherDefinitions() []PrefetcherDefinition { return prefetcherDefinitions } + +func isPrefetcherEnabled(msrValue string, bit int) (bool, error) { + if msrValue == "" { + return false, fmt.Errorf("msrValue is empty") + } + msrInt, err := strconv.ParseInt(msrValue, 16, 64) + if err != nil { + return false, fmt.Errorf("failed to parse msrValue: %s, %v", msrValue, err) + } + bitMask := int64(1) << bit + // enabled if bit is zero + return bitMask&msrInt == 0, nil +} + +func prefetchersFromOutput(outputs map[string]script.ScriptOutput) [][]string { + out := make([][]string, 0) + uarch := UarchFromOutput(outputs) + if uarch == "" { + // uarch is required + return [][]string{} + } + for _, pf := range prefetcherDefinitions { + if slices.Contains(pf.Uarchs, "all") || slices.Contains(pf.Uarchs, uarch[:3]) { + var scriptName string + switch pf.Msr { + case MsrPrefetchControl: + scriptName = script.PrefetchControlName + case MsrPrefetchers: + scriptName = script.PrefetchersName + case MsrAtomPrefTuning1: + scriptName = script.PrefetchersAtomName + default: + slog.Error("unknown msr for prefetcher", slog.String("msr", fmt.Sprintf("0x%x", pf.Msr))) + continue + } + msrVal := valFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) + if msrVal == "" { + continue + } + var enabledDisabled string + enabled, err := isPrefetcherEnabled(msrVal, pf.Bit) + if err != nil { + slog.Warn("error checking prefetcher enabled status", slog.String("error", err.Error())) + continue + } + if enabled { + enabledDisabled = "Enabled" + } else { + enabledDisabled = "Disabled" + } + out = append(out, []string{pf.ShortName, pf.Description, fmt.Sprintf("0x%04X", pf.Msr), strconv.Itoa(pf.Bit), enabledDisabled}) + } + } + return out +} + +func prefetchersSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + uarch := UarchFromOutput(outputs) + if uarch == "" { + // uarch is required + return "" + } + var prefList []string + for _, pf := range prefetcherDefinitions { + if slices.Contains(pf.Uarchs, "all") || slices.Contains(pf.Uarchs, uarch[:3]) { + var scriptName string + switch pf.Msr { + case MsrPrefetchControl: + scriptName = script.PrefetchControlName + case MsrPrefetchers: + scriptName = script.PrefetchersName + case MsrAtomPrefTuning1: + scriptName = script.PrefetchersAtomName + default: + slog.Error("unknown msr for prefetcher", slog.String("msr", fmt.Sprintf("0x%x", pf.Msr))) + continue + } + msrVal := valFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) + if msrVal == "" { + continue + } + var enabledDisabled string + enabled, err := isPrefetcherEnabled(msrVal, pf.Bit) + if err != nil { + slog.Warn("error checking prefetcher enabled status", slog.String("error", err.Error())) + continue + } + if enabled { + enabledDisabled = "Enabled" + } else { + enabledDisabled = "Disabled" + } + prefList = append(prefList, fmt.Sprintf("%s: %s", pf.ShortName, enabledDisabled)) + } + } + if len(prefList) > 0 { + return strings.Join(prefList, ", ") + } + return "None" +} diff --git a/internal/report/security.go b/internal/report/security.go new file mode 100644 index 00000000..74a96f99 --- /dev/null +++ b/internal/report/security.go @@ -0,0 +1,48 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "fmt" + "sort" + "strings" + + "perfspect/internal/script" +) + +func cveInfoFromOutput(outputs map[string]script.ScriptOutput) [][]string { + vulns := make(map[string]string) + // from spectre-meltdown-checker + for _, pair := range valsArrayFromRegexSubmatch(outputs[script.CveScriptName].Stdout, `(CVE-\d+-\d+): (.+)`) { + vulns[pair[0]] = pair[1] + } + // sort the vulnerabilities by CVE ID + var ids []string + for id := range vulns { + ids = append(ids, id) + } + sort.Strings(ids) + cves := make([][]string, 0) + for _, id := range ids { + cves = append(cves, []string{id, vulns[id]}) + } + return cves +} + +func cveSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + cves := cveInfoFromOutput(outputs) + if len(cves) == 0 { + return "" + } + var numOK int + var numVuln int + for _, cve := range cves { + if strings.HasPrefix(cve[1], "OK") { + numOK++ + } else { + numVuln++ + } + } + return fmt.Sprintf("%d OK, %d Vulnerable", numOK, numVuln) +} diff --git a/internal/report/table_helpers_stacks.go b/internal/report/stacks.go similarity index 50% rename from internal/report/table_helpers_stacks.go rename to internal/report/stacks.go index 2b9554eb..dddc1ba9 100644 --- a/internal/report/table_helpers_stacks.go +++ b/internal/report/stacks.go @@ -5,12 +5,147 @@ package report import ( "fmt" + "log/slog" "math" + "perfspect/internal/script" "regexp" "strconv" "strings" ) +// getSectionsFromOutput parses output into sections, where the section name +// is the key in a map and the section content is the value +// sections are delimited by lines of the form ##########
########## +// example: +// ##########
########## +//
+//
+// ##########
########## +//
+// +// returns a map of section name to section content +// if the output is empty or contains no section headers, returns an empty map +// if a section contains no content, the value for that section is an empty string +func getSectionsFromOutput(output string) map[string]string { + sections := make(map[string]string) + re := regexp.MustCompile(`^########## (.+?) ##########$`) + var sectionName string + for line := range strings.SplitSeq(output, "\n") { + // check if the line is a section header + match := re.FindStringSubmatch(line) + if match != nil { + // if the section name isn't in the map yet, add it + if _, ok := sections[match[1]]; !ok { + sections[match[1]] = "" + } + // save the section name + sectionName = match[1] + continue + } + if sectionName != "" { + sections[sectionName] += line + "\n" + } + } + return sections +} + +// sectionValueFromOutput returns the content of a section from the output +// if the section doesn't exist, returns an empty string +// if the section exists but has no content, returns an empty string +func sectionValueFromOutput(output string, sectionName string) string { + sections := getSectionsFromOutput(output) + if len(sections) == 0 { + slog.Warn("no sections in output") + return "" + } + if _, ok := sections[sectionName]; !ok { + slog.Warn("section not found in output", slog.String("section", sectionName)) + return "" + } + if sections[sectionName] == "" { + slog.Warn("No content for section:", slog.String("section", sectionName)) + return "" + } + return sections[sectionName] +} + +func javaFoldedFromOutput(outputs map[string]script.ScriptOutput) string { + sections := getSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) + if len(sections) == 0 { + slog.Warn("no sections in collapsed call stack output") + return "" + } + javaFolded := make(map[string]string) + re := regexp.MustCompile(`^async-profiler (\d+) (.*)$`) + for header, stacks := range sections { + match := re.FindStringSubmatch(header) + if match == nil { + continue + } + pid := match[1] + processName := match[2] + if stacks == "" { + slog.Warn("no stacks for java process", slog.String("header", header)) + continue + } + if strings.HasPrefix(stacks, "Failed to inject profiler") { + slog.Error("profiling data error", slog.String("header", header)) + continue + } + _, ok := javaFolded[processName] + if processName == "" { + processName = "java (" + pid + ")" + } else if ok { + processName = processName + " (" + pid + ")" + } + javaFolded[processName] = stacks + } + folded, err := mergeJavaFolded(javaFolded) + if err != nil { + slog.Error("failed to merge java stacks", slog.String("error", err.Error())) + } + return folded +} + +func nativeFoldedFromOutput(outputs map[string]script.ScriptOutput) string { + sections := getSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) + if len(sections) == 0 { + slog.Warn("no sections in collapsed call stack output") + return "" + } + var dwarfFolded, fpFolded string + for header, content := range sections { + switch header { + case "perf_dwarf": + dwarfFolded = content + case "perf_fp": + fpFolded = content + } + } + if dwarfFolded == "" && fpFolded == "" { + return "" + } + folded, err := mergeSystemFolded(fpFolded, dwarfFolded) + if err != nil { + slog.Error("failed to merge native stacks", slog.String("error", err.Error())) + } + return folded +} + +func maxRenderDepthFromOutput(outputs map[string]script.ScriptOutput) string { + sections := getSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) + if len(sections) == 0 { + slog.Warn("no sections in collapsed call stack output") + return "" + } + for header, content := range sections { + if header == "maximum depth" { + return content + } + } + return "" +} + // ProcessStacks ... // [processName][callStack]=count type ProcessStacks map[string]Stacks diff --git a/internal/report/stacks_test.go b/internal/report/stacks_test.go new file mode 100644 index 00000000..aae2cb5e --- /dev/null +++ b/internal/report/stacks_test.go @@ -0,0 +1,157 @@ +package report + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "reflect" + "testing" +) + +func TestGetSectionsFromOutput(t *testing.T) { + tests := []struct { + name string + output string + want map[string]string + }{ + { + name: "Valid sections with content", + output: `########## Section A ########## +Content A1 +Content A2 +########## Section B ########## +Content B1 +Content B2 +########## Section C ########## +Content C1`, + want: map[string]string{ + "Section A": "Content A1\nContent A2\n", + "Section B": "Content B1\nContent B2\n", + "Section C": "Content C1\n", + }, + }, + { + name: "Valid sections with empty content", + output: `########## Section A ########## +########## Section B ########## +########## Section C ##########`, + want: map[string]string{ + "Section A": "", + "Section B": "", + "Section C": "", + }, + }, + { + name: "No sections", + output: "No section headers here", + want: map[string]string{}, + }, + { + name: "Empty output", + output: ``, + want: map[string]string{}, + }, + { + name: "Empty lines in output", + output: "\n\n\n", + want: map[string]string{}, + }, + { + name: "Section with trailing newlines", + output: `########## Section A ########## + +Content A1 + +########## Section B ########## +Content B1`, + want: map[string]string{ + "Section A": "\nContent A1\n\n", + "Section B": "Content B1\n", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getSectionsFromOutput(tt.output) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("getSectionsFromOutput() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestSectionValueFromOutput(t *testing.T) { + tests := []struct { + name string + output string + sectionName string + want string + }{ + { + name: "Section A exists with content", + output: `########## Section A ########## +Content A1 +Content A2 +########## Section B ########## +Content B1 +Content B2`, + sectionName: "Section A", + want: "Content A1\nContent A2\n", + }, + { + name: "Section B exists with content", + output: `########## Section A ########## +Content A1 +Content A2 +########## Section B ########## +Content B1 +Content B2`, + sectionName: "Section B", + want: "Content B1\nContent B2\n", + }, + { + name: "Section exists with no content", + output: `########## Section A ########## +########## Section B ########## +Content B1`, + sectionName: "Section A", + want: "", + }, + { + name: "Section does not exist", + output: `########## Section A ########## +Content A1 +########## Section B ########## +Content B1`, + sectionName: "Section C", + want: "", + }, + { + name: "Empty output", + output: "", + sectionName: "Section A", + want: "", + }, + { + name: "Section with trailing newlines", + output: `########## Section A ########## + +Content A1 + +########## Section B ########## +Content B1`, + sectionName: "Section A", + want: "\nContent A1\n\n", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := sectionValueFromOutput(tt.output, tt.sectionName) + if got != tt.want { + t.Errorf("sectionValueFromOutput() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/report/storage.go b/internal/report/storage.go new file mode 100644 index 00000000..afa007f0 --- /dev/null +++ b/internal/report/storage.go @@ -0,0 +1,143 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "fmt" + "log/slog" + "regexp" + "strings" + + "perfspect/internal/script" +) + +type diskInfo struct { + Name string + Model string + Size string + MountPoint string + Type string + RequestQueueSize string + MinIOSize string + FirmwareVersion string + PCIeAddress string + NUMANode string + LinkSpeed string + LinkWidth string + MaxLinkSpeed string + MaxLinkWidth string +} + +func diskInfoFromOutput(outputs map[string]script.ScriptOutput) []diskInfo { + diskInfos := []diskInfo{} + for i, line := range strings.Split(outputs[script.DiskInfoScriptName].Stdout, "\n") { + // first line is the header + if i == 0 { + continue + } + if line == "" { + continue + } + fields := strings.Split(line, "|") + if len(fields) != 14 { + slog.Error("unexpected number of fields in disk info output", slog.String("line", line)) + return nil + } + // clean up the model name + fields[1] = strings.TrimSpace(fields[1]) + // if we don't have a firmware version, try to get it from another source + if fields[7] == "" { + reFwRev := regexp.MustCompile(`FwRev=(\w+)`) + reDev := regexp.MustCompile(fmt.Sprintf(`/dev/%s:`, fields[0])) + devFound := false + for line := range strings.SplitSeq(outputs[script.HdparmScriptName].Stdout, "\n") { + if !devFound { + if reDev.FindString(line) != "" { + devFound = true + continue + } + } else { + match := reFwRev.FindStringSubmatch(line) + if match != nil { + fields[7] = match[1] + break + } + } + } + } + diskInfos = append(diskInfos, diskInfo{fields[0], fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], fields[8], fields[9], fields[10], fields[11], fields[12], fields[13]}) + } + return diskInfos +} + +func filesystemFieldValuesFromOutput(outputs map[string]script.ScriptOutput) []Field { + fieldValues := []Field{} + reFindmnt := regexp.MustCompile(`(.*)\s(.*)\s(.*)\s(.*)`) + for i, line := range strings.Split(outputs[script.DfScriptName].Stdout, "\n") { + if line == "" { + continue + } + fields := strings.Fields(line) + // "Mounted On" gets split into two fields, rejoin + if i == 0 && fields[len(fields)-2] == "Mounted" && fields[len(fields)-1] == "on" { + fields[len(fields)-2] = "Mounted on" + fields = fields[:len(fields)-1] + for _, field := range fields { + fieldValues = append(fieldValues, Field{Name: field, Values: []string{}}) + } + // add an additional field + fieldValues = append(fieldValues, Field{Name: "Mount Options", Values: []string{}}) + continue + } + if len(fields) != len(fieldValues)-1 { + slog.Error("unexpected number of fields in df output", slog.String("line", line)) + return nil + } + for i, field := range fields { + fieldValues[i].Values = append(fieldValues[i].Values, field) + } + // get mount options for the current file system + var options string + for i, line := range strings.Split(outputs[script.FindMntScriptName].Stdout, "\n") { + if i == 0 { + continue + } + match := reFindmnt.FindStringSubmatch(line) + if match != nil { + target := match[1] + source := match[2] + if fields[0] == source && fields[5] == target { + options = match[4] + break + } + } + } + fieldValues[len(fieldValues)-1].Values = append(fieldValues[len(fieldValues)-1].Values, options) + } + return fieldValues +} + +func diskSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + disks := diskInfoFromOutput(outputs) + if len(disks) == 0 { + return "N/A" + } + type ModelSize struct { + model string + size string + } + modelSizeCount := make(map[ModelSize]int) + for _, disk := range disks { + if disk.Model == "" { + continue + } + modelSize := ModelSize{model: disk.Model, size: disk.Size} + modelSizeCount[modelSize]++ + } + var summary []string + for modelSize, count := range modelSizeCount { + summary = append(summary, fmt.Sprintf("%dx %s %s", count, modelSize.size, modelSize.model)) + } + return strings.Join(summary, ", ") +} diff --git a/internal/report/system.go b/internal/report/system.go new file mode 100644 index 00000000..ea5fde44 --- /dev/null +++ b/internal/report/system.go @@ -0,0 +1,99 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "fmt" + "strings" + "time" + + "perfspect/internal/cpus" + "perfspect/internal/script" +) + +func operatingSystemFromOutput(outputs map[string]script.ScriptOutput) string { + os := valFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^PRETTY_NAME=\"(.+?)\"`) + centos := valFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^(CentOS Linux release .*)`) + if centos != "" { + os = centos + } + return os +} + +func systemSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + // BASELINE: 1-node, 2x Intel® Xeon® , xx cores, 100W TDP, HT On/Off?, Turbo On/Off?, Total Memory xxx GB (xx slots/ xx GB/ xxxx MHz [run @ xxxx MHz] ), , , , . Test by Intel as of . + template := "1-node, %s, %sx %s, %s cores, %s TDP, %s %s, %s %s, Total Memory %s, BIOS %s, microcode %s, %s, %s, %s, %s. Test by Intel as of %s." + var systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date string + + // system type + systemType = valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + " " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + // socket count + socketCount = valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(\d+)$`) + // CPU model + cpuModel = valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model name:\s*(.+?)$`) + // core count + coreCount = valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(\d+)$`) + // TDP + tdp = tdpFromOutput(outputs) + if tdp == "" { + tdp = "?" + } + vendor := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) + // hyperthreading + htLabel = "HT" + if vendor == cpus.AMDVendor { + htLabel = "SMT" + } + htOnOff = hyperthreadingFromOutput(outputs) + switch htOnOff { + case "Enabled": + htOnOff = "On" + case "Disabled": + htOnOff = "Off" + case "N/A": + htOnOff = "N/A" + default: + htOnOff = "?" + } + // turbo + turboLabel = "Turbo" + if vendor == cpus.AMDVendor { + turboLabel = "Boost" + } + turboOnOff = turboEnabledFromOutput(outputs) + if strings.Contains(strings.ToLower(turboOnOff), "enabled") { + turboOnOff = "On" + } else if strings.Contains(strings.ToLower(turboOnOff), "disabled") { + turboOnOff = "Off" + } else { + turboOnOff = "?" + } + // memory + installedMem = installedMemoryFromOutput(outputs) + // BIOS + biosVersion = valFromRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, `^Version:\s*(.+?)$`) + // microcode + uCodeVersion = valFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`) + // NICs + nics = nicSummaryFromOutput(outputs) + // disks + disks = diskSummaryFromOutput(outputs) + // OS + operatingSystem = operatingSystemFromOutput(outputs) + // kernel + kernelVersion = valFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`) + // date + date = strings.TrimSpace(outputs[script.DateScriptName].Stdout) + // parse date so that we can format it + parsedTime, err := time.Parse("Mon Jan 2 15:04:05 MST 2006", date) // without AM/PM + if err != nil { + parsedTime, err = time.Parse("Mon Jan 2 15:04:05 AM MST 2006", date) // with AM/PM + } + if err == nil { + date = parsedTime.Format("January 2 2006") + } + + // put it all together + return fmt.Sprintf(template, systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date) +} diff --git a/internal/report/table_helpers.go b/internal/report/table_helpers.go index 4aee27ad..535cc83b 100644 --- a/internal/report/table_helpers.go +++ b/internal/report/table_helpers.go @@ -1,25 +1,13 @@ -package report - // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -// table_helpers.go contains helper functions that are used to extract values from the output of the scripts. +// table_helpers.go contains base helper functions that are used to extract values from the output of the scripts. + +package report import ( - "encoding/csv" - "fmt" - "log/slog" - "math/big" "regexp" - "sort" - "strconv" "strings" - "time" - - "perfspect/internal/cpus" - "perfspect/internal/script" - "perfspect/internal/util" - "slices" ) // valFromRegexSubmatch searches for a regex pattern in the given output string and returns the first captured group. @@ -137,1574 +125,3 @@ func getDmiDecodeEntries(dmiDecodeOutput string, dmiType string) (entries [][]st } return } - -// UarchFromOutput returns the architecture of the CPU that matches family, model, stepping, -// capid4, and devices information from the output or an empty string, if no match is found. -func UarchFromOutput(outputs map[string]script.ScriptOutput) string { - family := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) - model := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) - stepping := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) - capid4 := valFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) - devices := valFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) - cpu, err := cpus.GetCPUExtended(family, model, stepping, capid4, devices) - if err == nil { - return cpu.MicroArchitecture - } - return "" -} - -func hyperthreadingFromOutput(outputs map[string]script.ScriptOutput) string { - family := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) - model := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) - stepping := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) - sockets := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) - coresPerSocket := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`) - cpuCount := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(.*:\s*(.+?)$`) - onlineCpus := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^On-line CPU\(s\) list:\s*(.+)$`) - threadsPerCore := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Thread\(s\) per core:\s*(.+)$`) - - numCPUs, err := strconv.Atoi(cpuCount) // logical CPUs - if err != nil { - slog.Error("error parsing cpus from lscpu") - return "" - } - onlineCpusList, err := util.SelectiveIntRangeToIntList(onlineCpus) // logical online CPUs - numOnlineCpus := len(onlineCpusList) - if err != nil { - slog.Error("error parsing online cpus from lscpu") - numOnlineCpus = 0 // set to 0 to indicate parsing failed, will use numCPUs instead - } - numThreadsPerCore, err := strconv.Atoi(threadsPerCore) // logical threads per core - if err != nil { - slog.Error("error parsing threads per core from lscpu") - numThreadsPerCore = 0 - } - numSockets, err := strconv.Atoi(sockets) - if err != nil { - slog.Error("error parsing sockets from lscpu") - return "" - } - numCoresPerSocket, err := strconv.Atoi(coresPerSocket) // physical cores - if err != nil { - slog.Error("error parsing cores per sockets from lscpu") - return "" - } - cpu, err := cpus.GetCPUExtended(family, model, stepping, "", "") - if err != nil { - return "" - } - if numOnlineCpus > 0 && numOnlineCpus < numCPUs { - // if online CPUs list is available, use it to determine the number of CPUs - // supersedes lscpu output of numCPUs which counts CPUs on the system, not online CPUs - numCPUs = numOnlineCpus - } - if cpu.LogicalThreadCount < 2 { - return "N/A" - } else if numThreadsPerCore == 1 { - // if threads per core is 1, hyperthreading is disabled - return "Disabled" - } else if numThreadsPerCore >= 2 { - // if threads per core is greater than or equal to 2, hyperthreading is enabled - return "Enabled" - } else if numCPUs > numCoresPerSocket*numSockets { - // if the threads per core attribute is not available, we can still check if hyperthreading is enabled - // by checking if the number of logical CPUs is greater than the number of physical cores - return "Enabled" - } else { - return "Disabled" - } -} - -func numaCPUListFromOutput(outputs map[string]script.ScriptOutput) string { - nodeCPUs := valsFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node[0-9] CPU\(.*:\s*(.+?)$`) - return strings.Join(nodeCPUs, " :: ") -} - -func ppinsFromOutput(outputs map[string]script.ScriptOutput) string { - uniquePpins := []string{} - for line := range strings.SplitSeq(outputs[script.PPINName].Stdout, "\n") { - parts := strings.Split(line, ":") - if len(parts) < 2 { - continue - } - ppin := strings.TrimSpace(parts[1]) - found := false - for _, p := range uniquePpins { - if string(p) == ppin { - found = true - break - } - } - if !found && ppin != "" { - uniquePpins = append(uniquePpins, ppin) - } - } - return strings.Join(uniquePpins, ", ") -} - -func channelsFromOutput(outputs map[string]script.ScriptOutput) string { - family := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) - model := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) - stepping := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) - capid4 := valFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) - devices := valFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) - cpu, err := cpus.GetCPUExtended(family, model, stepping, capid4, devices) - if err != nil { - slog.Error("error getting CPU from CPUdb", slog.String("error", err.Error())) - return "" - } - return fmt.Sprintf("%d", cpu.MemoryChannelCount) -} - -func turboEnabledFromOutput(outputs map[string]script.ScriptOutput) string { - vendor := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) - switch vendor { - case cpus.IntelVendor: - val := valFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, `^Intel Turbo Boost Technology\s*= (.+?)$`) - if val == "true" { - return "Enabled" - } - if val == "false" { - return "Disabled" - } - return "" // unknown value - case cpus.AMDVendor: - val := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Frequency boost.*:\s*(.+?)$`) - if val != "" { - return val + " (AMD Frequency Boost)" - } - } - return "" -} - -func isPrefetcherEnabled(msrValue string, bit int) (bool, error) { - if msrValue == "" { - return false, fmt.Errorf("msrValue is empty") - } - msrInt, err := strconv.ParseInt(msrValue, 16, 64) - if err != nil { - return false, fmt.Errorf("failed to parse msrValue: %s, %v", msrValue, err) - } - bitMask := int64(1) << bit - // enabled if bit is zero - return bitMask&msrInt == 0, nil -} - -func prefetchersFromOutput(outputs map[string]script.ScriptOutput) [][]string { - out := make([][]string, 0) - uarch := UarchFromOutput(outputs) - if uarch == "" { - // uarch is required - return [][]string{} - } - for _, pf := range prefetcherDefinitions { - if slices.Contains(pf.Uarchs, "all") || slices.Contains(pf.Uarchs, uarch[:3]) { - var scriptName string - switch pf.Msr { - case MsrPrefetchControl: - scriptName = script.PrefetchControlName - case MsrPrefetchers: - scriptName = script.PrefetchersName - case MsrAtomPrefTuning1: - scriptName = script.PrefetchersAtomName - default: - slog.Error("unknown msr for prefetcher", slog.String("msr", fmt.Sprintf("0x%x", pf.Msr))) - continue - } - msrVal := valFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) - if msrVal == "" { - continue - } - var enabledDisabled string - enabled, err := isPrefetcherEnabled(msrVal, pf.Bit) - if err != nil { - slog.Warn("error checking prefetcher enabled status", slog.String("error", err.Error())) - continue - } - if enabled { - enabledDisabled = "Enabled" - } else { - enabledDisabled = "Disabled" - } - out = append(out, []string{pf.ShortName, pf.Description, fmt.Sprintf("0x%04X", pf.Msr), strconv.Itoa(pf.Bit), enabledDisabled}) - } - } - return out -} - -func prefetchersSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - uarch := UarchFromOutput(outputs) - if uarch == "" { - // uarch is required - return "" - } - var prefList []string - for _, pf := range prefetcherDefinitions { - if slices.Contains(pf.Uarchs, "all") || slices.Contains(pf.Uarchs, uarch[:3]) { - var scriptName string - switch pf.Msr { - case MsrPrefetchControl: - scriptName = script.PrefetchControlName - case MsrPrefetchers: - scriptName = script.PrefetchersName - case MsrAtomPrefTuning1: - scriptName = script.PrefetchersAtomName - default: - slog.Error("unknown msr for prefetcher", slog.String("msr", fmt.Sprintf("0x%x", pf.Msr))) - continue - } - msrVal := valFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) - if msrVal == "" { - continue - } - var enabledDisabled string - enabled, err := isPrefetcherEnabled(msrVal, pf.Bit) - if err != nil { - slog.Warn("error checking prefetcher enabled status", slog.String("error", err.Error())) - continue - } - if enabled { - enabledDisabled = "Enabled" - } else { - enabledDisabled = "Disabled" - } - prefList = append(prefList, fmt.Sprintf("%s: %s", pf.ShortName, enabledDisabled)) - } - } - if len(prefList) > 0 { - return strings.Join(prefList, ", ") - } - return "None" -} - -func acceleratorNames() []string { - var names []string - for _, accel := range acceleratorDefinitions { - names = append(names, accel.Name) - } - return names -} - -func acceleratorCountsFromOutput(outputs map[string]script.ScriptOutput) []string { - var counts []string - lshw := outputs[script.LshwScriptName].Stdout - for _, accel := range acceleratorDefinitions { - regex := fmt.Sprintf("%s:%s", accel.MfgID, accel.DevID) - re := regexp.MustCompile(regex) - count := len(re.FindAllString(lshw, -1)) - counts = append(counts, fmt.Sprintf("%d", count)) - } - return counts -} - -func acceleratorWorkQueuesFromOutput(outputs map[string]script.ScriptOutput) []string { - var queues []string - for _, accel := range acceleratorDefinitions { - if accel.Name == "IAA" || accel.Name == "DSA" { - var scriptName string - if accel.Name == "IAA" { - scriptName = script.IaaDevicesScriptName - } else { - scriptName = script.DsaDevicesScriptName - } - devices := outputs[scriptName].Stdout - lines := strings.Split(devices, "\n") - // get non-empty lines - var nonEmptyLines []string - for _, line := range lines { - if strings.TrimSpace(line) != "" { - nonEmptyLines = append(nonEmptyLines, line) - } - } - if len(nonEmptyLines) == 0 { - queues = append(queues, "None") - } else { - queues = append(queues, strings.Join(nonEmptyLines, ", ")) - } - } else { - queues = append(queues, "N/A") - } - } - return queues -} - -func acceleratorFullNamesFromYaml() []string { - var fullNames []string - for _, accel := range acceleratorDefinitions { - fullNames = append(fullNames, accel.FullName) - } - return fullNames -} - -func acceleratorDescriptionsFromYaml() []string { - var descriptions []string - for _, accel := range acceleratorDefinitions { - descriptions = append(descriptions, accel.Description) - } - return descriptions -} - -func tdpFromOutput(outputs map[string]script.ScriptOutput) string { - msrHex := strings.TrimSpace(outputs[script.PackagePowerLimitName].Stdout) - msr, err := strconv.ParseInt(msrHex, 16, 0) - if err != nil || msr == 0 { - return "" - } - return fmt.Sprint(msr/8) + "W" -} - -func chaCountFromOutput(outputs map[string]script.ScriptOutput) string { - // output is the result of three rdmsr calls - // - client cha count - // - cha count - // - spr cha count - // stop when we find a non-zero value - // note: rdmsr writes to stderr on error so we will likely have fewer than 3 lines in stdout - for hexCount := range strings.SplitSeq(outputs[script.ChaCountScriptName].Stdout, "\n") { - if hexCount != "" && hexCount != "0" { - count, err := strconv.ParseInt(hexCount, 16, 64) - if err == nil { - return fmt.Sprintf("%d", count) - } - } - } - return "" -} - -func elcFieldValuesFromOutput(outputs map[string]script.ScriptOutput) (fieldValues []Field) { - if outputs[script.ElcScriptName].Stdout == "" { - return - } - r := csv.NewReader(strings.NewReader(outputs[script.ElcScriptName].Stdout)) - rows, err := r.ReadAll() - if err != nil { - return - } - if len(rows) < 2 { - return - } - // first row is headers - for fieldNamesIndex, fieldName := range rows[0] { - values := []string{} - // value rows - for _, row := range rows[1:] { - values = append(values, row[fieldNamesIndex]) - } - fieldValues = append(fieldValues, Field{Name: fieldName, Values: values}) - } - - // let's add an interpretation of the values in an additional column - values := []string{} - // value rows - for _, row := range rows[1:] { - var mode string - if row[2] == "IO" { - if row[5] == "0" && row[6] == "0" && row[7] == "0" { - mode = "Latency Optimized" - } else if row[5] == "800" && row[6] == "10" && row[7] == "94" { - mode = "Default" - } else { - mode = "Custom" - } - } else { // COMPUTE - switch row[5] { - case "0": - mode = "Latency Optimized" - case "1200": - mode = "Default" - default: - mode = "Custom" - } - } - values = append(values, mode) - } - fieldValues = append(fieldValues, Field{Name: "Mode", Values: values}) - return -} - -func elcSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - fieldValues := elcFieldValuesFromOutput(outputs) - if len(fieldValues) == 0 { - return "" - } - if len(fieldValues) < 10 { - return "" - } - if len(fieldValues[9].Values) == 0 { - return "" - } - summary := fieldValues[9].Values[0] - for _, value := range fieldValues[9].Values[1:] { - if value != summary { - return "mixed" - } - } - return summary -} - -// epbFromOutput gets EPB value from script outputs -func epbFromOutput(outputs map[string]script.ScriptOutput) string { - if outputs[script.EpbScriptName].Exitcode != 0 || len(outputs[script.EpbScriptName].Stdout) == 0 { - slog.Warn("EPB scripts failed or produced no output") - return "" - } - epb := strings.TrimSpace(outputs[script.EpbScriptName].Stdout) - msr, err := strconv.ParseInt(epb, 16, 0) - if err != nil { - slog.Error("failed to parse EPB value", slog.String("error", err.Error()), slog.String("epb", epb)) - return "" - } - return epbValToLabel(int(msr)) -} - -func epbValToLabel(msr int) string { - var val string - if msr >= 0 && msr <= 3 { - val = "Performance" - } else if msr >= 4 && msr <= 7 { - val = "Balanced Performance" - } else if msr >= 8 && msr <= 11 { - val = "Balanced Energy" - } else if msr >= 12 { - val = "Energy Efficient" - } - return fmt.Sprintf("%s (%d)", val, msr) -} - -func eppValToLabel(msr int) string { - var val string - if msr == 128 { - val = "Normal" - } else if msr < 128 && msr > 64 { - val = "Balanced Performance" - } else if msr <= 64 { - val = "Performance" - } else if msr > 128 && msr < 192 { - val = "Balanced Powersave" - } else { - val = "Powersave" - } - return fmt.Sprintf("%s (%d)", val, msr) -} - -// eppFromOutput gets EPP value from script outputs -// IF 0x774[42] is '1' AND 0x774[60] is '0' -// THEN -//       get EPP from 0x772 (package) -// ELSE -//       get EPP from 0x774 (per core) -func eppFromOutput(outputs map[string]script.ScriptOutput) string { - // if we couldn't get the EPP values, return empty string - if outputs[script.EppValidScriptName].Exitcode != 0 || len(outputs[script.EppValidScriptName].Stdout) == 0 || - outputs[script.EppPackageControlScriptName].Exitcode != 0 || len(outputs[script.EppPackageControlScriptName].Stdout) == 0 || - outputs[script.EppPackageScriptName].Exitcode != 0 || len(outputs[script.EppPackageScriptName].Stdout) == 0 { - slog.Warn("EPP scripts failed or produced no output") - return "" - } - // check if the epp valid bit is set and consistent across all cores - var eppValid string - for i, line := range strings.Split(outputs[script.EppValidScriptName].Stdout, "\n") { // MSR 0x774, bit 60 - if line == "" { - continue - } - currentEpbValid := strings.TrimSpace(strings.Split(line, ":")[1]) - if i == 0 { - eppValid = currentEpbValid - continue - } - if currentEpbValid != eppValid { - slog.Warn("EPP valid bit is inconsistent across cores") - return "inconsistent" - } - } - // check if epp package control bit is set and consistent across all cores - var eppPkgCtrl string - for i, line := range strings.Split(outputs[script.EppPackageControlScriptName].Stdout, "\n") { // MSR 0x774, bit 42 - if line == "" { - continue - } - currentEppPkgCtrl := strings.TrimSpace(strings.Split(line, ":")[1]) - if i == 0 { - eppPkgCtrl = currentEppPkgCtrl - continue - } - if currentEppPkgCtrl != eppPkgCtrl { - slog.Warn("EPP package control bit is inconsistent across cores") - return "inconsistent" - } - } - if eppPkgCtrl == "1" && eppValid == "0" { - eppPackage := strings.TrimSpace(outputs[script.EppPackageScriptName].Stdout) // MSR 0x772, bits 24-31 (package) - msr, err := strconv.ParseInt(eppPackage, 16, 0) - if err != nil { - slog.Error("failed to parse EPP package value", slog.String("error", err.Error()), slog.String("epp", eppPackage)) - return "" - } - return eppValToLabel(int(msr)) - } else { - var epp string - for i, line := range strings.Split(outputs[script.EppScriptName].Stdout, "\n") { // MSR 0x774, bits 24-31 (per-core) - if line == "" { - continue - } - currentEpp := strings.TrimSpace(strings.Split(line, ":")[1]) - if i == 0 { - epp = currentEpp - continue - } - if currentEpp != epp { - slog.Warn("EPP is inconsistent across cores") - return "inconsistent" - } - } - msr, err := strconv.ParseInt(epp, 16, 0) - if err != nil { - slog.Error("failed to parse EPP value", slog.String("error", err.Error()), slog.String("epp", epp)) - return "" - } - return eppValToLabel(int(msr)) - } -} - -func operatingSystemFromOutput(outputs map[string]script.ScriptOutput) string { - os := valFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^PRETTY_NAME=\"(.+?)\"`) - centos := valFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^(CentOS Linux release .*)`) - if centos != "" { - os = centos - } - return os -} - -type cstateInfo struct { - Name string - Status string -} - -func c6FromOutput(outputs map[string]script.ScriptOutput) string { - cstatesInfo := cstatesFromOutput(outputs) - if cstatesInfo == nil { - return "" - } - for _, cstateInfo := range cstatesInfo { - if cstateInfo.Name == "C6" { - return cstateInfo.Status - } - } - return "" -} - -func cstatesFromOutput(outputs map[string]script.ScriptOutput) []cstateInfo { - var cstatesInfo []cstateInfo - output := outputs[script.CstatesScriptName].Stdout - for line := range strings.SplitSeq(output, "\n") { - if line == "" { - continue - } - parts := strings.Split(line, ",") - if len(parts) != 2 { - return nil - } - cstatesInfo = append(cstatesInfo, cstateInfo{Name: parts[0], Status: parts[1]}) - } - return cstatesInfo -} - -func cstatesSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - cstatesInfo := cstatesFromOutput(outputs) - if cstatesInfo == nil { - return "" - } - summaryParts := []string{} - for _, cstateInfo := range cstatesInfo { - summaryParts = append(summaryParts, fmt.Sprintf("%s: %s", cstateInfo.Name, cstateInfo.Status)) - } - return strings.Join(summaryParts, ", ") -} - -type ISA struct { - Name string - FullName string - CPUID string -} - -var isas = []ISA{ - {"AES", "Advanced Encryption Standard New Instructions (AES-NI)", "AES instruction"}, - {"AMX", "Advanced Matrix Extensions (AMX)", "AMX-BF16: tile bfloat16 support"}, - {"AMX-COMPLEX", "AMX-COMPLEX Instruction", "AMX-COMPLEX instructions"}, - {"AMX-FP16", "AMX-FP16 Instruction", "AMX-FP16: FP16 tile operations"}, - {"AVX-IFMA", "AVX-IFMA Instruction", "AVX-IFMA: integer fused multiply add"}, - {"AVX-NE-CONVERT", "AVX-NE-CONVERT Instruction", "AVX-NE-CONVERT instructions"}, - {"AVX-VNNI-INT8", "AVX-VNNI-INT8 Instruction", "AVX-VNNI-INT8 instructions"}, - {"AVX512F", "AVX-512 Foundation", "AVX512F: AVX-512 foundation instructions"}, - {"AVX512_BF16", "Vector Neural Network Instructions (AVX512_BF16)", "AVX512_BF16: bfloat16 instructions"}, - {"AVX512_FP16", "Advanced Vector Extensions (AVX512_FP16)", "AVX512_FP16: fp16 support"}, - {"AVX512_VNNI", "Vector Neural Network Instructions (AVX512_VNNI)", "AVX512_VNNI: neural network instructions"}, - {"CLDEMOTE", "Cache Line Demote (CLDEMOTE)", "CLDEMOTE supports cache line demote"}, - {"CMPCCXADD", "Compare and Add if Condition is Met (CMPCCXADD)", "CMPccXADD instructions"}, - {"ENQCMD", "Enqueue Command Instruction (ENQCMD)", "ENQCMD instruction"}, - {"MOVDIRI", "Move Doubleword as Direct Store (MOVDIRI)", "MOVDIRI instruction"}, - {"MOVDIR64B", "Move 64 Bytes as Direct Store (MOVDIR64B)", "MOVDIR64B instruction"}, - {"PREFETCHIT0/1", "PREFETCHIT0/1 Instruction", "PREFETCHIT0, PREFETCHIT1 instructions"}, - {"SERIALIZE", "SERIALIZE Instruction", "SERIALIZE instruction"}, - {"SHA_NI", "SHA1/SHA256 Instruction Extensions (SHA_NI)", "SHA instructions"}, - {"TSXLDTRK", "Transactional Synchronization Extensions (TSXLDTRK)", "TSXLDTRK: TSX suspend load addr tracking"}, - {"VAES", "Vector AES", "VAES instructions"}, - {"WAITPKG", "UMONITOR, UMWAIT, TPAUSE Instructions", "WAITPKG instructions"}, -} - -func isaFullNames() []string { - var names []string - for _, isa := range isas { - names = append(names, isa.FullName) - } - return names -} - -func yesIfTrue(val string) string { - if val == "true" { - return "Yes" - } - return "No" -} - -func isaSupportedFromOutput(outputs map[string]script.ScriptOutput) []string { - var supported []string - for _, isa := range isas { - oneSupported := yesIfTrue(valFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, isa.CPUID+`\s*= (.+?)$`)) - supported = append(supported, oneSupported) - } - return supported -} - -func numaBalancingFromOutput(outputs map[string]script.ScriptOutput) string { - if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "1") { - return "Enabled" - } else if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "0") { - return "Disabled" - } - return "" -} - -func clusteringModeFromOutput(outputs map[string]script.ScriptOutput) string { - uarch := UarchFromOutput(outputs) - sockets := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) - nodes := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`) - if uarch == "" || sockets == "" || nodes == "" { - return "" - } - socketCount, err := strconv.Atoi(sockets) - if err != nil { - slog.Error("failed to parse socket count", slog.String("error", err.Error())) - return "" - } - nodeCount, err := strconv.Atoi(nodes) - if err != nil { - slog.Error("failed to parse node count", slog.String("error", err.Error())) - return "" - } - if nodeCount == 0 || socketCount == 0 { - slog.Error("node count or socket count is zero") - return "" - } - nodesPerSocket := nodeCount / socketCount - switch uarch { - case "GNR_X1": - return "All2All" - case "GNR_X2": - switch nodesPerSocket { - case 1: - return "UMA 4 (Quad)" - case 2: - return "SNC 2" - } - case "GNR_X3": - switch nodesPerSocket { - case 1: - return "UMA 6 (Hex)" - case 3: - return "SNC 3" - } - case "SRF_SP": - return "UMA 2 (Hemi)" - case "SRF_AP": - switch nodesPerSocket { - case 1: - return "UMA 4 (Quad)" - case 2: - return "SNC 2" - } - case "CWF": - switch nodesPerSocket { - case 1: - return "UMA 6 (Hex)" - case 3: - return "SNC 3" - } - } - return "" -} - -type nicInfo struct { - Name string - Vendor string - VendorID string - Model string - ModelID string - Speed string - Link string - Bus string - Driver string - DriverVersion string - FirmwareVersion string - MACAddress string - NUMANode string - CPUAffinity string - AdaptiveRX string - AdaptiveTX string - RxUsecs string - TxUsecs string - Card string - Port string - MTU string - IsVirtual bool - TXQueues string - RXQueues string - XPSCPUs map[string]string - RPSCPUs map[string]string -} - -func parseNicInfo(scriptOutput string) []nicInfo { - var nics []nicInfo - for nicOutput := range strings.SplitSeq(scriptOutput, "----------------------------------------") { - if strings.TrimSpace(nicOutput) == "" { - continue - } - var nic nicInfo - nic.XPSCPUs = make(map[string]string) - nic.RPSCPUs = make(map[string]string) - // Map of prefixes to field pointers - fieldMap := map[string]*string{ - "Interface: ": &nic.Name, - "Vendor: ": &nic.Vendor, - "Vendor ID: ": &nic.VendorID, - "Model: ": &nic.Model, - "Model ID: ": &nic.ModelID, - "Speed: ": &nic.Speed, - "Link detected: ": &nic.Link, - "bus-info: ": &nic.Bus, - "driver: ": &nic.Driver, - "version: ": &nic.DriverVersion, - "firmware-version: ": &nic.FirmwareVersion, - "MAC Address: ": &nic.MACAddress, - "NUMA Node: ": &nic.NUMANode, - "CPU Affinity: ": &nic.CPUAffinity, - "rx-usecs: ": &nic.RxUsecs, - "tx-usecs: ": &nic.TxUsecs, - "MTU: ": &nic.MTU, - "TX Queues: ": &nic.TXQueues, - "RX Queues: ": &nic.RXQueues, - } - for line := range strings.SplitSeq(nicOutput, "\n") { - line = strings.TrimSpace(line) - // Special parsing for "Adaptive RX: off TX: off" format - if strings.HasPrefix(line, "Adaptive RX: ") { - parts := strings.Split(line, "TX: ") - if len(parts) == 2 { - nic.AdaptiveRX = strings.TrimSpace(strings.TrimPrefix(parts[0], "Adaptive RX: ")) - nic.AdaptiveTX = strings.TrimSpace(parts[1]) - } - continue - } - // Check if this is a virtual function - if value, ok := strings.CutPrefix(line, "Virtual Function: "); ok { - nic.IsVirtual = (strings.TrimSpace(value) == "yes") - continue - } - // Special parsing for xps_cpus and rps_cpus - if strings.HasPrefix(line, "xps_cpus tx-") { - parts := strings.SplitN(line, ": ", 2) - if len(parts) == 2 { - queue := strings.TrimPrefix(parts[0], "xps_cpus ") - nic.XPSCPUs[queue] = hexBitmapToCPUList(parts[1]) - } - continue - } - if strings.HasPrefix(line, "rps_cpus rx-") { - parts := strings.SplitN(line, ": ", 2) - if len(parts) == 2 { - queue := strings.TrimPrefix(parts[0], "rps_cpus ") - nic.RPSCPUs[queue] = hexBitmapToCPUList(parts[1]) - } - continue - } - for prefix, fieldPtr := range fieldMap { - if after, ok := strings.CutPrefix(line, prefix); ok { - *fieldPtr = after - break - } - } - } - // special case for model as it sometimes has additional information in parentheses - nic.Model = strings.TrimSpace(strings.Split(nic.Model, "(")[0]) - nics = append(nics, nic) - } - // Assign card and port information - assignCardAndPort(nics) - return nics -} - -func hexBitmapToCPUList(hexBitmap string) string { - if hexBitmap == "" { - return "" - } - - // Remove commas to form a single continuous hex string. - // This assumes the comma-separated parts are in big-endian order. - fullHexBitmap := strings.ReplaceAll(hexBitmap, ",", "") - - i := new(big.Int) - // The string is a hex string, so the base is 16. - if _, success := i.SetString(fullHexBitmap, 16); !success { - // If parsing fails, it might not be a hex string. Return as is. - return hexBitmap - } - - var cpus []string - // Iterate through the bits of the big integer. - for bit := 0; bit < i.BitLen(); bit++ { - if i.Bit(bit) == 1 { - cpus = append(cpus, fmt.Sprintf("%d", bit)) - } - } - if len(cpus) == 0 { - return "" - } - return strings.Join(cpus, ",") -} - -// assignCardAndPort assigns card and port numbers to NICs based on their PCI addresses -func assignCardAndPort(nics []nicInfo) { - if len(nics) == 0 { - return - } - - // Map to store card identifiers (domain:bus:device) to card numbers - cardMap := make(map[string]int) - // Map to track ports within each card - portMap := make(map[string][]int) // card identifier -> list of indices in nics slice - cardCounter := 1 - - // First pass: identify cards and group NICs by card - for i := range nics { - if nics[i].Bus == "" { - continue - } - // PCI address format: domain:bus:device.function (e.g., 0000:32:00.0) - // Extract domain:bus:device as the card identifier - parts := strings.Split(nics[i].Bus, ":") - if len(parts) != 3 { - continue - } - // Further split the last part to separate device from function - deviceFunc := strings.Split(parts[2], ".") - if len(deviceFunc) != 2 { - continue - } - // Card identifier is domain:bus:device - cardID := parts[0] + ":" + parts[1] + ":" + deviceFunc[0] - - // Assign card number if not already assigned - if _, exists := cardMap[cardID]; !exists { - cardMap[cardID] = cardCounter - cardCounter++ - } - // Add this NIC index to the card's port list - portMap[cardID] = append(portMap[cardID], i) - } - - // Second pass: assign card and port numbers - for cardID, nicIndices := range portMap { - cardNum := cardMap[cardID] - // Sort NICs within a card by their function number - sort.Slice(nicIndices, func(i, j int) bool { - // Extract function numbers - funcI := extractFunction(nics[nicIndices[i]].Bus) - funcJ := extractFunction(nics[nicIndices[j]].Bus) - return funcI < funcJ - }) - // Assign port numbers - for portNum, nicIdx := range nicIndices { - nics[nicIdx].Card = fmt.Sprintf("%d", cardNum) - nics[nicIdx].Port = fmt.Sprintf("%d", portNum+1) - } - } -} - -// extractFunction extracts the function number from a PCI address -func extractFunction(busAddr string) int { - parts := strings.Split(busAddr, ".") - if len(parts) != 2 { - return 0 - } - funcNum, err := strconv.Atoi(parts[1]) - if err != nil { - return 0 - } - return funcNum -} - -type diskInfo struct { - Name string - Model string - Size string - MountPoint string - Type string - RequestQueueSize string - MinIOSize string - FirmwareVersion string - PCIeAddress string - NUMANode string - LinkSpeed string - LinkWidth string - MaxLinkSpeed string - MaxLinkWidth string -} - -func diskInfoFromOutput(outputs map[string]script.ScriptOutput) []diskInfo { - diskInfos := []diskInfo{} - for i, line := range strings.Split(outputs[script.DiskInfoScriptName].Stdout, "\n") { - // first line is the header - if i == 0 { - continue - } - if line == "" { - continue - } - fields := strings.Split(line, "|") - if len(fields) != 14 { - slog.Error("unexpected number of fields in disk info output", slog.String("line", line)) - return nil - } - // clean up the model name - fields[1] = strings.TrimSpace(fields[1]) - // if we don't have a firmware version, try to get it from another source - if fields[7] == "" { - reFwRev := regexp.MustCompile(`FwRev=(\w+)`) - reDev := regexp.MustCompile(fmt.Sprintf(`/dev/%s:`, fields[0])) - devFound := false - for line := range strings.SplitSeq(outputs[script.HdparmScriptName].Stdout, "\n") { - if !devFound { - if reDev.FindString(line) != "" { - devFound = true - continue - } - } else { - match := reFwRev.FindStringSubmatch(line) - if match != nil { - fields[7] = match[1] - break - } - } - } - } - diskInfos = append(diskInfos, diskInfo{fields[0], fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], fields[8], fields[9], fields[10], fields[11], fields[12], fields[13]}) - } - return diskInfos -} - -func filesystemFieldValuesFromOutput(outputs map[string]script.ScriptOutput) []Field { - fieldValues := []Field{} - reFindmnt := regexp.MustCompile(`(.*)\s(.*)\s(.*)\s(.*)`) - for i, line := range strings.Split(outputs[script.DfScriptName].Stdout, "\n") { - if line == "" { - continue - } - fields := strings.Fields(line) - // "Mounted On" gets split into two fields, rejoin - if i == 0 && fields[len(fields)-2] == "Mounted" && fields[len(fields)-1] == "on" { - fields[len(fields)-2] = "Mounted on" - fields = fields[:len(fields)-1] - for _, field := range fields { - fieldValues = append(fieldValues, Field{Name: field, Values: []string{}}) - } - // add an additional field - fieldValues = append(fieldValues, Field{Name: "Mount Options", Values: []string{}}) - continue - } - if len(fields) != len(fieldValues)-1 { - slog.Error("unexpected number of fields in df output", slog.String("line", line)) - return nil - } - for i, field := range fields { - fieldValues[i].Values = append(fieldValues[i].Values, field) - } - // get mount options for the current file system - var options string - for i, line := range strings.Split(outputs[script.FindMntScriptName].Stdout, "\n") { - if i == 0 { - continue - } - match := reFindmnt.FindStringSubmatch(line) - if match != nil { - target := match[1] - source := match[2] - if fields[0] == source && fields[5] == target { - options = match[4] - break - } - } - } - fieldValues[len(fieldValues)-1].Values = append(fieldValues[len(fieldValues)-1].Values, options) - } - return fieldValues -} - -type GPU struct { - Manufacturer string - Model string - PCIID string -} - -func gpuInfoFromOutput(outputs map[string]script.ScriptOutput) []GPU { - gpus := []GPU{} - gpusLshw := valsArrayFromRegexSubmatch(outputs[script.LshwScriptName].Stdout, `^pci.*?\s+display\s+(\w+).*?\s+\[(\w+):(\w+)]$`) - idxMfgName := 0 - idxMfgID := 1 - idxDevID := 2 - for _, gpu := range gpusLshw { - // Find GPU in GPU defs, note the model - var model string - for _, intelGPU := range gpuDefinitions { - if gpu[idxMfgID] == intelGPU.MfgID { - model = intelGPU.Model - break - } - re := regexp.MustCompile(intelGPU.DevID) - if re.FindString(gpu[idxDevID]) != "" { - model = intelGPU.Model - break - } - } - if model == "" { - if gpu[idxMfgID] == "8086" { - model = "Unknown Intel" - } else { - model = "Unknown" - } - } - gpus = append(gpus, GPU{Manufacturer: gpu[idxMfgName], Model: model, PCIID: gpu[idxMfgID] + ":" + gpu[idxDevID]}) - } - return gpus -} - -type Gaudi struct { - ModuleID string - Microarchitecture string - SerialNumber string - BusID string - DriverVersion string - EROM string - CPLD string - SPI string - NUMA string -} - -// output from the GaudiInfo script: -// module_id, serial, bus_id, driver_version -// 2, AM50016189, 0000:19:00.0, 1.17.0-28a11ca -// 6, AM50016165, 0000:b3:00.0, 1.17.0-28a11ca -// 3, AM50016119, 0000:1a:00.0, 1.17.0-28a11ca -// 0, AM50016134, 0000:43:00.0, 1.17.0-28a11ca -// 7, AM50016150, 0000:b4:00.0, 1.17.0-28a11ca -// 1, AM50016130, 0000:44:00.0, 1.17.0-28a11ca -// 4, AM50016127, 0000:cc:00.0, 1.17.0-28a11ca -// 5, AM50016122, 0000:cd:00.0, 1.17.0-28a11ca -// -// output from the GaudiNuma script: -// modID NUMA Affinity -// ----- ------------- -// 0 0 -// 1 0 -// 2 0 -// 3 0 -// 4 1 -// 5 1 -// 6 1 -// 7 1 -// -// output from the GaudiFirmware script: -// [0] AIP (accel0) 0000:19:00.0 -// erom -// component : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 (Jul 24 2024 - 11:31:46) -// fw os : -// fuse -// component : 01P0-HL2080A0-15-TF8A81-03-07-03 -// fw os : -// cpld -// component : 0x00000010.653FB250 -// fw os : -// uboot -// component : U-Boot 2021.04-hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// fw os : -// arcmp -// component : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// fw os : Linux gaudi2 5.10.18-hl-gaudi2-1.17.0-fw-51.2.0-sec-9 #1 SMP PREEMPT Wed Jul 24 11:44:52 IDT 2024 aarch64 GNU/Linux -// -// preboot -// component : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// fw os : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// eeprom : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// boot_info : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// mgmt -// component : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// fw os : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// i2c : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// eeprom : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// boot_info : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// pid -// component : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// fw os : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// eeprom : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// boot_info : hl-gaudi2-1.17.0-fw-51.2.0-sec-9 -// -// [1] AIP (accel1) 0000:b3:00.0 ....... - -func gaudiInfoFromOutput(outputs map[string]script.ScriptOutput) []Gaudi { - gaudis := []Gaudi{} - for i, line := range strings.Split(outputs[script.GaudiInfoScriptName].Stdout, "\n") { - if line == "" || i == 0 { // skip blank lines and header - continue - } - fields := strings.Split(line, ", ") - if len(fields) != 4 { - slog.Error("unexpected number of fields in gaudi info output", slog.String("line", line)) - continue - } - gaudis = append(gaudis, Gaudi{ModuleID: fields[0], SerialNumber: fields[1], BusID: fields[2], DriverVersion: fields[3]}) - } - // sort the gaudis by module ID - sort.Slice(gaudis, func(i, j int) bool { - return gaudis[i].ModuleID < gaudis[j].ModuleID - }) - // set microarchitecture (assumes same arch for all gaudi devices) - for i := range gaudis { - gaudis[i].Microarchitecture = strings.TrimSpace(outputs[script.GaudiArchitectureScriptName].Stdout) - } - // get NUMA affinity - numaAffinities := valsArrayFromRegexSubmatch(outputs[script.GaudiNumaScriptName].Stdout, `^(\d+)\s+(\d+)\s+$`) - if len(numaAffinities) != len(gaudis) { - slog.Error("number of gaudis in gaudi info and numa output do not match", slog.Int("gaudis", len(gaudis)), slog.Int("numaAffinities", len(numaAffinities))) - return nil - } - for i, numaAffinity := range numaAffinities { - gaudis[i].NUMA = numaAffinity[1] - } - // get firmware versions - reDevice := regexp.MustCompile(`^\[(\d+)] AIP \(accel\d+\) (.*)$`) - reErom := regexp.MustCompile(`\s+erom$`) - reCpld := regexp.MustCompile(`\s+cpld$`) - rePreboot := regexp.MustCompile(`\s+preboot$`) - reComponent := regexp.MustCompile(`^\s+component\s+:\s+hl-gaudi\d-(.*)-sec-\d+`) - reCpldComponent := regexp.MustCompile(`^\s+component\s+:\s+(0x[0-9a-fA-F]+\.[0-9a-fA-F]+)$`) - deviceIdx := -1 - state := -1 - for line := range strings.SplitSeq(outputs[script.GaudiFirmwareScriptName].Stdout, "\n") { - if line == "" { - continue - } - match := reDevice.FindStringSubmatch(line) - if match != nil { - var err error - deviceIdx, err = strconv.Atoi(match[1]) - if err != nil { - slog.Error("failed to parse device index", slog.String("deviceIdx", match[1])) - return nil - } - if deviceIdx >= len(gaudis) { - slog.Error("device index out of range", slog.Int("deviceIdx", deviceIdx), slog.Int("gaudis", len(gaudis))) - return nil - } - continue - } - if deviceIdx == -1 { - continue - } - if reErom.FindString(line) != "" { - state = 0 - continue - } - if reCpld.FindString(line) != "" { - state = 1 - continue - } - if rePreboot.FindString(line) != "" { - state = 2 - continue - } - if state != -1 { - switch state { - case 0: - match := reComponent.FindStringSubmatch(line) - if match != nil { - gaudis[deviceIdx].EROM = match[1] - } - case 1: - match := reCpldComponent.FindStringSubmatch(line) - if match != nil { - gaudis[deviceIdx].CPLD = match[1] - } - case 2: - match := reComponent.FindStringSubmatch(line) - if match != nil { - gaudis[deviceIdx].SPI = match[1] - } - } - state = -1 - } - } - return gaudis -} - -// return all PCI Devices of specified class -func getPCIDevices(class string, outputs map[string]script.ScriptOutput) (devices []map[string]string) { - device := make(map[string]string) - re := regexp.MustCompile(`^(\w+):\s+(.*)$`) - for line := range strings.SplitSeq(outputs[script.LspciVmmScriptName].Stdout, "\n") { - if line == "" { // end of device - if devClass, ok := device["Class"]; ok { - if devClass == class { - devices = append(devices, device) - } - } - device = make(map[string]string) - continue - } - match := re.FindStringSubmatch(line) - if len(match) > 0 { - key := match[1] - value := match[2] - device[key] = value - } - } - return -} - -func cveInfoFromOutput(outputs map[string]script.ScriptOutput) [][]string { - vulns := make(map[string]string) - // from spectre-meltdown-checker - for _, pair := range valsArrayFromRegexSubmatch(outputs[script.CveScriptName].Stdout, `(CVE-\d+-\d+): (.+)`) { - vulns[pair[0]] = pair[1] - } - // sort the vulnerabilities by CVE ID - var ids []string - for id := range vulns { - ids = append(ids, id) - } - sort.Strings(ids) - cves := make([][]string, 0) - for _, id := range ids { - cves = append(cves, []string{id, vulns[id]}) - } - return cves -} - -func nicIRQMappingsFromOutput(outputs map[string]script.ScriptOutput) [][]string { - nics := parseNicInfo(outputs[script.NicInfoScriptName].Stdout) - if len(nics) == 0 { - return nil - } - nicIRQMappings := [][]string{} - for _, nic := range nics { - if nic.CPUAffinity == "" { - continue // skip NICs without CPU affinity - } - affinities := strings.Split(strings.TrimSuffix(nic.CPUAffinity, ";"), ";") - nicIRQMappings = append(nicIRQMappings, []string{nic.Name, strings.Join(affinities, " | ")}) - } - return nicIRQMappings -} - -func nicSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - nics := parseNicInfo(outputs[script.NicInfoScriptName].Stdout) - if len(nics) == 0 { - return "N/A" - } - modelCount := make(map[string]int) - for _, nic := range nics { - modelCount[nic.Model]++ - } - var summary []string - for model, count := range modelCount { - if model == "" { - model = "Unknown NIC" - } - summary = append(summary, fmt.Sprintf("%dx %s", count, model)) - } - return strings.Join(summary, ", ") -} - -func diskSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - disks := diskInfoFromOutput(outputs) - if len(disks) == 0 { - return "N/A" - } - type ModelSize struct { - model string - size string - } - modelSizeCount := make(map[ModelSize]int) - for _, disk := range disks { - if disk.Model == "" { - continue - } - modelSize := ModelSize{model: disk.Model, size: disk.Size} - modelSizeCount[modelSize]++ - } - var summary []string - for modelSize, count := range modelSizeCount { - summary = append(summary, fmt.Sprintf("%dx %s %s", count, modelSize.size, modelSize.model)) - } - return strings.Join(summary, ", ") -} - -func acceleratorSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - var summary []string - accelerators := acceleratorNames() - counts := acceleratorCountsFromOutput(outputs) - for i, name := range accelerators { - if strings.Contains(name, "chipset") { // skip "QAT (on chipset)" in this table - continue - } else if strings.Contains(name, "CPU") { // rename "QAT (on CPU) to simply "QAT" - name = "QAT" - } - summary = append(summary, fmt.Sprintf("%s %s [0]", name, counts[i])) - } - return strings.Join(summary, ", ") -} - -func cveSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - cves := cveInfoFromOutput(outputs) - if len(cves) == 0 { - return "" - } - var numOK int - var numVuln int - for _, cve := range cves { - if strings.HasPrefix(cve[1], "OK") { - numOK++ - } else { - numVuln++ - } - } - return fmt.Sprintf("%d OK, %d Vulnerable", numOK, numVuln) -} - -func systemSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - // BASELINE: 1-node, 2x Intel® Xeon® , xx cores, 100W TDP, HT On/Off?, Turbo On/Off?, Total Memory xxx GB (xx slots/ xx GB/ xxxx MHz [run @ xxxx MHz] ), , , , . Test by Intel as of . - template := "1-node, %s, %sx %s, %s cores, %s TDP, %s %s, %s %s, Total Memory %s, BIOS %s, microcode %s, %s, %s, %s, %s. Test by Intel as of %s." - var systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date string - - // system type - systemType = valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + " " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) - // socket count - socketCount = valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(\d+)$`) - // CPU model - cpuModel = valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model name:\s*(.+?)$`) - // core count - coreCount = valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(\d+)$`) - // TDP - tdp = tdpFromOutput(outputs) - if tdp == "" { - tdp = "?" - } - vendor := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) - // hyperthreading - htLabel = "HT" - if vendor == cpus.AMDVendor { - htLabel = "SMT" - } - htOnOff = hyperthreadingFromOutput(outputs) - switch htOnOff { - case "Enabled": - htOnOff = "On" - case "Disabled": - htOnOff = "Off" - case "N/A": - htOnOff = "N/A" - default: - htOnOff = "?" - } - // turbo - turboLabel = "Turbo" - if vendor == cpus.AMDVendor { - turboLabel = "Boost" - } - turboOnOff = turboEnabledFromOutput(outputs) - if strings.Contains(strings.ToLower(turboOnOff), "enabled") { - turboOnOff = "On" - } else if strings.Contains(strings.ToLower(turboOnOff), "disabled") { - turboOnOff = "Off" - } else { - turboOnOff = "?" - } - // memory - installedMem = installedMemoryFromOutput(outputs) - // BIOS - biosVersion = valFromRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, `^Version:\s*(.+?)$`) - // microcode - uCodeVersion = valFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`) - // NICs - nics = nicSummaryFromOutput(outputs) - // disks - disks = diskSummaryFromOutput(outputs) - // OS - operatingSystem = operatingSystemFromOutput(outputs) - // kernel - kernelVersion = valFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`) - // date - date = strings.TrimSpace(outputs[script.DateScriptName].Stdout) - // parse date so that we can format it - parsedTime, err := time.Parse("Mon Jan 2 15:04:05 MST 2006", date) // without AM/PM - if err != nil { - parsedTime, err = time.Parse("Mon Jan 2 15:04:05 AM MST 2006", date) // with AM/PM - } - if err == nil { - date = parsedTime.Format("January 2 2006") - } - - // put it all together - return fmt.Sprintf(template, systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date) -} - -// getSectionsFromOutput parses output into sections, where the section name -// is the key in a map and the section content is the value -// sections are delimited by lines of the form ##########
########## -// example: -// ##########
########## -//
-//
-// ##########
########## -//
-// -// returns a map of section name to section content -// if the output is empty or contains no section headers, returns an empty map -// if a section contains no content, the value for that section is an empty string -func getSectionsFromOutput(output string) map[string]string { - sections := make(map[string]string) - re := regexp.MustCompile(`^########## (.+?) ##########$`) - var sectionName string - for line := range strings.SplitSeq(output, "\n") { - // check if the line is a section header - match := re.FindStringSubmatch(line) - if match != nil { - // if the section name isn't in the map yet, add it - if _, ok := sections[match[1]]; !ok { - sections[match[1]] = "" - } - // save the section name - sectionName = match[1] - continue - } - if sectionName != "" { - sections[sectionName] += line + "\n" - } - } - return sections -} - -// sectionValueFromOutput returns the content of a section from the output -// if the section doesn't exist, returns an empty string -// if the section exists but has no content, returns an empty string -func sectionValueFromOutput(output string, sectionName string) string { - sections := getSectionsFromOutput(output) - if len(sections) == 0 { - slog.Warn("no sections in output") - return "" - } - if _, ok := sections[sectionName]; !ok { - slog.Warn("section not found in output", slog.String("section", sectionName)) - return "" - } - if sections[sectionName] == "" { - slog.Warn("No content for section:", slog.String("section", sectionName)) - return "" - } - return sections[sectionName] -} - -func javaFoldedFromOutput(outputs map[string]script.ScriptOutput) string { - sections := getSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) - if len(sections) == 0 { - slog.Warn("no sections in collapsed call stack output") - return "" - } - javaFolded := make(map[string]string) - re := regexp.MustCompile(`^async-profiler (\d+) (.*)$`) - for header, stacks := range sections { - match := re.FindStringSubmatch(header) - if match == nil { - continue - } - pid := match[1] - processName := match[2] - if stacks == "" { - slog.Warn("no stacks for java process", slog.String("header", header)) - continue - } - if strings.HasPrefix(stacks, "Failed to inject profiler") { - slog.Error("profiling data error", slog.String("header", header)) - continue - } - _, ok := javaFolded[processName] - if processName == "" { - processName = "java (" + pid + ")" - } else if ok { - processName = processName + " (" + pid + ")" - } - javaFolded[processName] = stacks - } - folded, err := mergeJavaFolded(javaFolded) - if err != nil { - slog.Error("failed to merge java stacks", slog.String("error", err.Error())) - } - return folded -} - -func nativeFoldedFromOutput(outputs map[string]script.ScriptOutput) string { - sections := getSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) - if len(sections) == 0 { - slog.Warn("no sections in collapsed call stack output") - return "" - } - var dwarfFolded, fpFolded string - for header, content := range sections { - switch header { - case "perf_dwarf": - dwarfFolded = content - case "perf_fp": - fpFolded = content - } - } - if dwarfFolded == "" && fpFolded == "" { - return "" - } - folded, err := mergeSystemFolded(fpFolded, dwarfFolded) - if err != nil { - slog.Error("failed to merge native stacks", slog.String("error", err.Error())) - } - return folded -} - -func maxRenderDepthFromOutput(outputs map[string]script.ScriptOutput) string { - sections := getSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) - if len(sections) == 0 { - slog.Warn("no sections in collapsed call stack output") - return "" - } - for header, content := range sections { - if header == "maximum depth" { - return content - } - } - return "" -} diff --git a/internal/report/table_helpers_nic_integration_test.go b/internal/report/table_helpers_nic_integration_test.go deleted file mode 100644 index 591cffe6..00000000 --- a/internal/report/table_helpers_nic_integration_test.go +++ /dev/null @@ -1,204 +0,0 @@ -package report - -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -import ( - "testing" - - "perfspect/internal/script" -) - -func TestParseNicInfoWithCardPort(t *testing.T) { - // Sample output simulating the scenario from the issue - sampleOutput := `Interface: eth2 -Vendor ID: 8086 -Model ID: 1593 -Vendor: Intel Corporation -Model: Ethernet Controller 10G X550T -Speed: 1000Mb/s -Link detected: yes -bus-info: 0000:32:00.0 -driver: ixgbe -version: 5.1.0-k -firmware-version: 0x800009e0 -MAC Address: aa:bb:cc:dd:ee:00 -NUMA Node: 0 -CPU Affinity: -IRQ Balance: Enabled -rx-usecs: 1 -tx-usecs: 1 -Adaptive RX: off TX: off ----------------------------------------- -Interface: eth3 -Vendor ID: 8086 -Model ID: 1593 -Vendor: Intel Corporation -Model: Ethernet Controller 10G X550T -Speed: Unknown! -Link detected: no -bus-info: 0000:32:00.1 -driver: ixgbe -version: 5.1.0-k -firmware-version: 0x800009e0 -MAC Address: aa:bb:cc:dd:ee:01 -NUMA Node: 0 -CPU Affinity: -IRQ Balance: Enabled -rx-usecs: 1 -tx-usecs: 1 -Adaptive RX: off TX: off ----------------------------------------- -Interface: eth0 -Vendor ID: 8086 -Model ID: 37d2 -Vendor: Intel Corporation -Model: Ethernet Controller E810-C for QSFP -Speed: 100000Mb/s -Link detected: yes -bus-info: 0000:c0:00.0 -driver: ice -version: K_5.19.0-41-generic_5.1.9 -firmware-version: 4.40 0x8001c967 1.3534.0 -MAC Address: aa:bb:cc:dd:ee:82 -NUMA Node: 1 -CPU Affinity: -IRQ Balance: Enabled -rx-usecs: 1 -tx-usecs: 1 -Adaptive RX: off TX: off ----------------------------------------- -Interface: eth1 -Vendor ID: 8086 -Model ID: 37d2 -Vendor: Intel Corporation -Model: Ethernet Controller E810-C for QSFP -Speed: 100000Mb/s -Link detected: yes -bus-info: 0000:c0:00.1 -driver: ice -version: K_5.19.0-41-generic_5.1.9 -firmware-version: 4.40 0x8001c967 1.3534.0 -MAC Address: aa:bb:cc:dd:ee:83 -NUMA Node: 1 -CPU Affinity: -IRQ Balance: Enabled -rx-usecs: 1 -tx-usecs: 1 -Adaptive RX: off TX: off -----------------------------------------` - - nics := parseNicInfo(sampleOutput) - - if len(nics) != 4 { - t.Fatalf("Expected 4 NICs, got %d", len(nics)) - } - - // Expected card/port assignments based on the issue example - expectedCardPort := map[string]struct { - card string - port string - }{ - "eth2": {"1", "1"}, // 0000:32:00.0 - "eth3": {"1", "2"}, // 0000:32:00.1 - "eth0": {"2", "1"}, // 0000:c0:00.0 - "eth1": {"2", "2"}, // 0000:c0:00.1 - } - - for _, nic := range nics { - expected, exists := expectedCardPort[nic.Name] - if !exists { - t.Errorf("Unexpected NIC name: %s", nic.Name) - continue - } - if nic.Card != expected.card { - t.Errorf("NIC %s: expected card %s, got %s", nic.Name, expected.card, nic.Card) - } - if nic.Port != expected.port { - t.Errorf("NIC %s: expected port %s, got %s", nic.Name, expected.port, nic.Port) - } - } -} - -func TestNicTableValuesWithCardPort(t *testing.T) { - // Sample output simulating the scenario from the issue - sampleOutput := `Interface: eth2 -bus-info: 0000:32:00.0 -Vendor: Intel Corporation -Model: Ethernet Controller 10G X550T -Speed: 1000Mb/s -Link detected: yes ----------------------------------------- -Interface: eth3 -bus-info: 0000:32:00.1 -Vendor: Intel Corporation -Model: Ethernet Controller 10G X550T -Speed: Unknown! -Link detected: no ----------------------------------------- -Interface: eth0 -bus-info: 0000:c0:00.0 -Vendor: Intel Corporation -Model: Ethernet Controller E810-C for QSFP -Speed: 100000Mb/s -Link detected: yes ----------------------------------------- -Interface: eth1 -bus-info: 0000:c0:00.1 -Vendor: Intel Corporation -Model: Ethernet Controller E810-C for QSFP -Speed: 100000Mb/s -Link detected: yes -----------------------------------------` - - outputs := map[string]script.ScriptOutput{ - script.NicInfoScriptName: {Stdout: sampleOutput}, - } - - fields := nicTableValues(outputs) - - // Find the "Card / Port" field - var cardPortField Field - found := false - for _, field := range fields { - if field.Name == "Card / Port" { - cardPortField = field - found = true - break - } - } - - if !found { - t.Fatal("Card / Port field not found in NIC table") - } - - // Verify we have 4 entries - if len(cardPortField.Values) != 4 { - t.Fatalf("Expected 4 Card / Port values, got %d", len(cardPortField.Values)) - } - - // Find the Name field to match values - var nameField Field - for _, field := range fields { - if field.Name == "Name" { - nameField = field - break - } - } - - // Verify card/port assignments - expectedCardPort := map[string]string{ - "eth2": "1 / 1", - "eth3": "1 / 2", - "eth0": "2 / 1", - "eth1": "2 / 2", - } - - for i, name := range nameField.Values { - expected := expectedCardPort[name] - actual := cardPortField.Values[i] - if actual != expected { - t.Errorf("NIC %s: expected Card / Port %q, got %q", name, expected, actual) - } - } -} diff --git a/internal/report/table_helpers_nic_test.go b/internal/report/table_helpers_nic_test.go deleted file mode 100644 index a5742b44..00000000 --- a/internal/report/table_helpers_nic_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package report - -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -import ( - "testing" -) - -func TestAssignCardAndPort(t *testing.T) { - tests := []struct { - name string - nics []nicInfo - expected map[string]string // map of NIC name to expected "Card / Port" - }{ - { - name: "Two cards with two ports each", - nics: []nicInfo{ - {Name: "eth2", Bus: "0000:32:00.0"}, - {Name: "eth3", Bus: "0000:32:00.1"}, - {Name: "eth0", Bus: "0000:c0:00.0"}, - {Name: "eth1", Bus: "0000:c0:00.1"}, - }, - expected: map[string]string{ - "eth2": "1 / 1", - "eth3": "1 / 2", - "eth0": "2 / 1", - "eth1": "2 / 2", - }, - }, - { - name: "Single card with four ports", - nics: []nicInfo{ - {Name: "eth0", Bus: "0000:19:00.0"}, - {Name: "eth1", Bus: "0000:19:00.1"}, - {Name: "eth2", Bus: "0000:19:00.2"}, - {Name: "eth3", Bus: "0000:19:00.3"}, - }, - expected: map[string]string{ - "eth0": "1 / 1", - "eth1": "1 / 2", - "eth2": "1 / 3", - "eth3": "1 / 4", - }, - }, - { - name: "Three different cards", - nics: []nicInfo{ - {Name: "eth0", Bus: "0000:19:00.0"}, - {Name: "eth1", Bus: "0000:1a:00.0"}, - {Name: "eth2", Bus: "0000:1b:00.0"}, - }, - expected: map[string]string{ - "eth0": "1 / 1", - "eth1": "2 / 1", - "eth2": "3 / 1", - }, - }, - { - name: "Empty bus address should not assign card/port", - nics: []nicInfo{ - {Name: "eth0", Bus: ""}, - }, - expected: map[string]string{ - "eth0": " / ", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assignCardAndPort(tt.nics) - for _, nic := range tt.nics { - expected := tt.expected[nic.Name] - actual := nic.Card + " / " + nic.Port - if actual != expected { - t.Errorf("NIC %s: expected %q, got %q", nic.Name, expected, actual) - } - } - }) - } -} - -func TestExtractFunction(t *testing.T) { - tests := []struct { - busAddr string - expected int - }{ - {"0000:32:00.0", 0}, - {"0000:32:00.1", 1}, - {"0000:32:00.3", 3}, - {"invalid", 0}, - {"", 0}, - } - - for _, tt := range tests { - t.Run(tt.busAddr, func(t *testing.T) { - result := extractFunction(tt.busAddr) - if result != tt.expected { - t.Errorf("expected %d, got %d", tt.expected, result) - } - }) - } -} diff --git a/internal/report/table_helpers_test.go b/internal/report/table_helpers_test.go deleted file mode 100644 index 989cd991..00000000 --- a/internal/report/table_helpers_test.go +++ /dev/null @@ -1,1014 +0,0 @@ -package report - -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -import ( - "perfspect/internal/script" - "reflect" - "testing" -) - -func TestHyperthreadingFromOutput(t *testing.T) { - tests := []struct { - name string - lscpuOutput string - wantResult string - }{ - { - name: "Hyperthreading enabled - 2 threads per core", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 8 -CPU(s): 16 -Thread(s) per core: 2 -On-line CPU(s) list: 0-15 -`, - wantResult: "Enabled", - }, - { - name: "Hyperthreading disabled - 1 thread per core", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 8 -CPU(s): 8 -Thread(s) per core: 1 -On-line CPU(s) list: 0-7 -`, - wantResult: "Disabled", - }, - { - name: "Hyperthreading enabled - detected by CPU count vs core count", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 2 -Core(s) per socket: 8 -CPU(s): 32 -On-line CPU(s) list: 0-31 -`, - wantResult: "Enabled", - }, - { - name: "Hyperthreading disabled - CPU count equals core count", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 2 -Core(s) per socket: 8 -CPU(s): 16 -On-line CPU(s) list: 0-15 -`, - wantResult: "Disabled", - }, - { - name: "Online CPUs less than total CPUs - use online count", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 8 -CPU(s): 16 -Thread(s) per core: 2 -On-line CPU(s) list: 0-7 -`, - wantResult: "Enabled", - }, - { - name: "Missing threads per core - fallback to CPU vs core comparison", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 8 -CPU(s): 16 -On-line CPU(s) list: 0-15 -`, - wantResult: "Enabled", - }, - { - name: "Error parsing CPU count", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 8 -CPU(s): invalid -Thread(s) per core: 2 -On-line CPU(s) list: 0-15 -`, - wantResult: "", - }, - { - name: "Error parsing socket count", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): invalid -Core(s) per socket: 8 -CPU(s): 16 -Thread(s) per core: 2 -On-line CPU(s) list: 0-15 -`, - wantResult: "", - }, - { - name: "Error parsing cores per socket", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: invalid -CPU(s): 16 -Thread(s) per core: 2 -On-line CPU(s) list: 0-15 -`, - wantResult: "", - }, - { - name: "Invalid online CPU list - should continue with total CPU count", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 8 -CPU(s): 16 -Thread(s) per core: 2 -On-line CPU(s) list: invalid-range -`, - wantResult: "Enabled", - }, - { - name: "Single core CPU - disabled result", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 1 -CPU(s): 1 -Thread(s) per core: 1 -On-line CPU(s) list: 0 -`, - wantResult: "Disabled", - }, - { - name: "4 threads per core - enabled", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 8 -CPU(s): 32 -Thread(s) per core: 4 -On-line CPU(s) list: 0-31 -`, - wantResult: "Enabled", - }, - { - name: "Missing CPU family - getCPUExtended will fail", - lscpuOutput: ` -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 8 -CPU(s): 16 -Thread(s) per core: 2 -On-line CPU(s) list: 0-15 -`, - wantResult: "", - }, - { - name: "Dual socket system with hyperthreading", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 2 -Core(s) per socket: 16 -CPU(s): 64 -Thread(s) per core: 2 -On-line CPU(s) list: 0-63 -`, - wantResult: "Enabled", - }, - { - name: "Quad socket system without hyperthreading", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 4 -Core(s) per socket: 12 -CPU(s): 48 -Thread(s) per core: 1 -On-line CPU(s) list: 0-47 -`, - wantResult: "Disabled", - }, - { - name: "Offlined cores with hyperthreading disabled and no threads per core", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 8 -CPU(s): 64 -On-line CPU(s) list: 0-7 -`, - wantResult: "Disabled", - }, - { - name: "Offlined cores with hyperthreading enabled and no threads per core", - lscpuOutput: ` -CPU family: 6 -Model: 143 -Stepping: 8 -Socket(s): 1 -Core(s) per socket: 8 -CPU(s): 64 -On-line CPU(s) list: 0-7,32-39 -`, - wantResult: "Enabled", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - outputs := map[string]script.ScriptOutput{ - script.LscpuScriptName: { - Stdout: tt.lscpuOutput, - Stderr: "", - Exitcode: 0, - }, - } - - result := hyperthreadingFromOutput(outputs) - if result != tt.wantResult { - t.Errorf("hyperthreadingFromOutput() = %q, want %q", result, tt.wantResult) - } - }) - } -} - -func TestGetFrequenciesFromMSR(t *testing.T) { - tests := []struct { - name string - msr string - want []int - expectErr bool - }{ - { - name: "Valid MSR with multiple frequencies", - msr: "0x1A2B3C4D", - want: []int{0x4D, 0x3C, 0x2B, 0x1A}, - expectErr: false, - }, - { - name: "Valid MSR with single frequency", - msr: "0x1A", - want: []int{0x1A}, - expectErr: false, - }, - { - name: "Empty MSR string", - msr: "", - want: nil, - expectErr: true, - }, - { - name: "Invalid MSR string", - msr: "invalid_hex", - want: nil, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := getFrequenciesFromHex(tt.msr) - if (err != nil) != tt.expectErr { - t.Errorf("getFrequenciesFromMSR() error = %v, expectErr %v", err, tt.expectErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("getFrequenciesFromMSR() = %v, want %v", got, tt.want) - } - }) - } -} -func TestGetBucketSizesFromMSR(t *testing.T) { - tests := []struct { - name string - msr string - want []int - expectErr bool - }{ - { - name: "Valid MSR with 8 bucket sizes", - msr: "0x0102030405060708", - want: []int{8, 7, 6, 5, 4, 3, 2, 1}, - expectErr: false, - }, - { - name: "Valid MSR with reversed order", - msr: "0x0807060504030201", - want: []int{1, 2, 3, 4, 5, 6, 7, 8}, - expectErr: false, - }, - { - name: "Invalid MSR string", - msr: "invalid_hex", - want: nil, - expectErr: true, - }, - { - name: "MSR with less than 8 bucket sizes", - msr: "0x01020304", - want: nil, - expectErr: true, - }, - { - name: "MSR with more than 8 bucket sizes", - msr: "0x010203040506070809", - want: nil, - expectErr: true, - }, - { - name: "Empty MSR string", - msr: "", - want: nil, - expectErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := getBucketSizesFromHex(tt.msr) - if (err != nil) != tt.expectErr { - t.Errorf("getBucketSizesFromMSR() error = %v, expectErr %v", err, tt.expectErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("getBucketSizesFromMSR() = %v, want %v", got, tt.want) - } - }) - } -} -func TestExpandTurboFrequencies(t *testing.T) { - tests := []struct { - name string - buckets [][]string - isa string - want []string - expectErr bool - }{ - { - name: "Valid input with single bucket", - buckets: [][]string{ - {"Cores", "SSE", "AVX2"}, - {"1-4", "3.5", "3.2"}, - }, - isa: "SSE", - want: []string{"3.5", "3.5", "3.5", "3.5"}, - expectErr: false, - }, - { - name: "Valid input with multiple buckets", - buckets: [][]string{ - {"Cores", "SSE", "AVX2"}, - {"1-2", "3.5", "3.2"}, - {"3-4", "3.6", "3.3"}, - }, - isa: "SSE", - want: []string{"3.5", "3.5", "3.6", "3.6"}, - expectErr: false, - }, - { - name: "ISA column not found", - buckets: [][]string{ - {"Cores", "SSE", "AVX2"}, - {"1-4", "3.5", "3.2"}, - }, - isa: "AVX512", - want: nil, - expectErr: true, - }, - { - name: "Empty buckets", - buckets: [][]string{ - {}, - }, - isa: "SSE", - want: nil, - expectErr: true, - }, - { - name: "Invalid bucket range", - buckets: [][]string{ - {"Cores", "SSE", "AVX2"}, - {"1-", "3.5", "3.2"}, - }, - isa: "SSE", - want: nil, - expectErr: true, - }, - { - name: "Empty frequency value", - buckets: [][]string{ - {"Cores", "SSE", "AVX2"}, - {"1-4", "", "3.2"}, - }, - isa: "SSE", - want: nil, - expectErr: true, - }, - { - name: "Whitespace in bucket range", - buckets: [][]string{ - {"Cores", "SSE", "AVX2"}, - {" 1-4 ", "3.5", "3.2"}, - }, - isa: "SSE", - want: []string{"3.5", "3.5", "3.5", "3.5"}, - expectErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := expandTurboFrequencies(tt.buckets, tt.isa) - if (err != nil) != tt.expectErr { - t.Errorf("expandTurboFrequencies() error = %v, expectErr %v", err, tt.expectErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("expandTurboFrequencies() = %v, want %v", got, tt.want) - } - }) - } -} -func TestGetSectionsFromOutput(t *testing.T) { - tests := []struct { - name string - output string - want map[string]string - }{ - { - name: "Valid sections with content", - output: `########## Section A ########## -Content A1 -Content A2 -########## Section B ########## -Content B1 -Content B2 -########## Section C ########## -Content C1`, - want: map[string]string{ - "Section A": "Content A1\nContent A2\n", - "Section B": "Content B1\nContent B2\n", - "Section C": "Content C1\n", - }, - }, - { - name: "Valid sections with empty content", - output: `########## Section A ########## -########## Section B ########## -########## Section C ##########`, - want: map[string]string{ - "Section A": "", - "Section B": "", - "Section C": "", - }, - }, - { - name: "No sections", - output: "No section headers here", - want: map[string]string{}, - }, - { - name: "Empty output", - output: ``, - want: map[string]string{}, - }, - { - name: "Empty lines in output", - output: "\n\n\n", - want: map[string]string{}, - }, - { - name: "Section with trailing newlines", - output: `########## Section A ########## - -Content A1 - -########## Section B ########## -Content B1`, - want: map[string]string{ - "Section A": "\nContent A1\n\n", - "Section B": "Content B1\n", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := getSectionsFromOutput(tt.output) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("getSectionsFromOutput() = %v, want %v", got, tt.want) - } - }) - } -} -func TestSectionValueFromOutput(t *testing.T) { - tests := []struct { - name string - output string - sectionName string - want string - }{ - { - name: "Section A exists with content", - output: `########## Section A ########## -Content A1 -Content A2 -########## Section B ########## -Content B1 -Content B2`, - sectionName: "Section A", - want: "Content A1\nContent A2\n", - }, - { - name: "Section B exists with content", - output: `########## Section A ########## -Content A1 -Content A2 -########## Section B ########## -Content B1 -Content B2`, - sectionName: "Section B", - want: "Content B1\nContent B2\n", - }, - { - name: "Section exists with no content", - output: `########## Section A ########## -########## Section B ########## -Content B1`, - sectionName: "Section A", - want: "", - }, - { - name: "Section does not exist", - output: `########## Section A ########## -Content A1 -########## Section B ########## -Content B1`, - sectionName: "Section C", - want: "", - }, - { - name: "Empty output", - output: "", - sectionName: "Section A", - want: "", - }, - { - name: "Section with trailing newlines", - output: `########## Section A ########## - -Content A1 - -########## Section B ########## -Content B1`, - sectionName: "Section A", - want: "\nContent A1\n\n", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := sectionValueFromOutput(tt.output, tt.sectionName) - if got != tt.want { - t.Errorf("sectionValueFromOutput() = %v, want %v", got, tt.want) - } - }) - } -} -func TestParseNicInfo(t *testing.T) { - nics := parseNicInfo(nicinfo) - if len(nics) != 3 { - t.Errorf("expected 3 NICs, got %d", len(nics)) - } - - // Test first NIC - first := nics[0] - if first.Name != "ens7f0np0" { - t.Errorf("expected Name 'ens7f0np0', got '%s'", first.Name) - } - if first.Vendor != "Broadcom Inc. and subsidiaries" { - t.Errorf("expected Vendor 'Broadcom Inc. and subsidiaries', got '%s'", first.Vendor) - } - if first.Model == "" { - t.Errorf("expected non-empty Model") - } - if first.Speed != "1000Mb/s" { - t.Errorf("expected Speed '1000Mb/s', got '%s'", first.Speed) - } - if first.Link != "yes" { - t.Errorf("expected Link 'yes', got '%s'", first.Link) - } - if first.Bus != "0000:4c:00.0" { - t.Errorf("expected Bus '0000:4c:00.0', got '%s'", first.Bus) - } - if first.Driver != "bnxt_en" { - t.Errorf("expected Driver 'bnxt_en', got '%s'", first.Driver) - } - if first.DriverVersion == "" { - t.Errorf("expected non-empty DriverVersion") - } - if first.FirmwareVersion == "" { - t.Errorf("expected non-empty FirmwareVersion") - } - if first.MACAddress != "04:32:01:f3:e1:a4" { - t.Errorf("expected MACAddress '04:32:01:f3:e1:a4', got '%s'", first.MACAddress) - } - if first.NUMANode != "0" { - t.Errorf("expected NUMANode '0', got '%s'", first.NUMANode) - } - if first.CPUAffinity == "" { - t.Errorf("expected non-empty CPUAffinity") - } - if first.AdaptiveRX != "off" { - t.Errorf("expected AdaptiveRX 'off', got '%s'", first.AdaptiveRX) - } - if first.AdaptiveTX != "off" { - t.Errorf("expected AdaptiveTX 'off', got '%s'", first.AdaptiveTX) - } - if first.RxUsecs != "200" { - t.Errorf("expected RxUsecs '200', got '%s'", first.RxUsecs) - } - if first.TxUsecs != "150" { - t.Errorf("expected TxUsecs '150', got '%s'", first.TxUsecs) - } - if first.IsVirtual { - t.Errorf("expected IsVirtual to be false for first NIC") - } - - // Spot check second NIC - second := nics[1] - if second.Name != "ens7f1np1" { - t.Errorf("expected Name 'ens7f1np1', got '%s'", second.Name) - } - if second.Model != "BCM57416 NetXtreme-E Dual-Media 10G RDMA Ethernet Controller" { - t.Errorf("expected Model 'BCM57416 NetXtreme-E Dual-Media 10G RDMA Ethernet Controller', got '%s'", second.Model) - } - if second.Link != "no" { - t.Errorf("expected Link 'no', got '%s'", second.Link) - } - if second.AdaptiveRX != "on" { - t.Errorf("expected AdaptiveRX 'on', got '%s'", second.AdaptiveRX) - } - if second.AdaptiveTX != "on" { - t.Errorf("expected AdaptiveTX 'on', got '%s'", second.AdaptiveTX) - } - if second.RxUsecs != "100" { - t.Errorf("expected RxUsecs '100', got '%s'", second.RxUsecs) - } - if second.TxUsecs != "100" { - t.Errorf("expected TxUsecs '100', got '%s'", second.TxUsecs) - } - - // Spot check third NIC - third := nics[2] - if third.Name != "enx2aecf92702ac" { - t.Errorf("expected Name 'enx2aecf92702ac', got '%s'", third.Name) - } - if third.Vendor != "Netchip Technology, Inc." { - t.Errorf("expected Vendor 'Netchip Technology, Inc.', got '%s'", third.Vendor) - } -} - -func TestParseNicInfoWithVirtualFunction(t *testing.T) { - nicinfoWithVF := ` -Interface: eth0 -Vendor: Intel Corporation -Vendor ID: 8086 -Model: Ethernet Adaptive Virtual Function -Model ID: 1889 -Speed: 10000Mb/s -Link detected: yes -driver: iavf -version: 6.13.7-061307-generic -firmware-version: N/A -bus-info: 0000:c0:11.0 -MAC Address: 00:11:22:33:44:55 -NUMA Node: 1 -Virtual Function: yes -CPU Affinity: 100:0-63; -IRQ Balance: Enabled -Adaptive RX: on TX: on -rx-usecs: 100 -tx-usecs: 100 ----------------------------------------- -Interface: eth1 -Vendor: Intel Corporation -Vendor ID: 8086 -Model: Ethernet Controller E810-C -Model ID: 1592 -Speed: 25000Mb/s -Link detected: yes -driver: ice -version: 6.13.7-061307-generic -firmware-version: 4.20 -bus-info: 0000:c0:00.0 -MAC Address: aa:bb:cc:dd:ee:ff -NUMA Node: 1 -Virtual Function: no -CPU Affinity: 200:0-63; -IRQ Balance: Enabled -Adaptive RX: off TX: off -rx-usecs: 50 -tx-usecs: 50 ----------------------------------------- -` - nics := parseNicInfo(nicinfoWithVF) - if len(nics) != 2 { - t.Fatalf("expected 2 NICs, got %d", len(nics)) - } - - // Test virtual function - vf := nics[0] - if vf.Name != "eth0" { - t.Errorf("expected Name 'eth0', got '%s'", vf.Name) - } - if !vf.IsVirtual { - t.Errorf("expected IsVirtual to be true for eth0") - } - if vf.Model != "Ethernet Adaptive Virtual Function" { - t.Errorf("expected Model 'Ethernet Adaptive Virtual Function', got '%s'", vf.Model) - } - - // Test physical function - pf := nics[1] - if pf.Name != "eth1" { - t.Errorf("expected Name 'eth1', got '%s'", pf.Name) - } - if pf.IsVirtual { - t.Errorf("expected IsVirtual to be false for eth1") - } - if pf.Model != "Ethernet Controller E810-C" { - t.Errorf("expected Model 'Ethernet Controller E810-C', got '%s'", pf.Model) - } -} - -func TestNicTableValuesWithVirtualFunction(t *testing.T) { - nicinfoWithVF := ` -Interface: eth0 -Vendor: Intel Corporation -Vendor ID: 8086 -Model: Ethernet Adaptive Virtual Function -Model ID: 1889 -Speed: 10000Mb/s -Link detected: yes -driver: iavf -version: 6.13.7-061307-generic -firmware-version: N/A -bus-info: 0000:c0:11.0 -MAC Address: 00:11:22:33:44:55 -NUMA Node: 1 -Virtual Function: yes -CPU Affinity: 100:0-63; -IRQ Balance: Enabled -Adaptive RX: on TX: on -rx-usecs: 100 -tx-usecs: 100 ----------------------------------------- -Interface: eth1 -Vendor: Intel Corporation -Vendor ID: 8086 -Model: Ethernet Controller E810-C -Model ID: 1592 -Speed: 25000Mb/s -Link detected: yes -driver: ice -version: 6.13.7-061307-generic -firmware-version: 4.20 -bus-info: 0000:c0:00.0 -MAC Address: aa:bb:cc:dd:ee:ff -NUMA Node: 1 -Virtual Function: no -CPU Affinity: 200:0-63; -IRQ Balance: Enabled -Adaptive RX: off TX: off -rx-usecs: 50 -tx-usecs: 50 ----------------------------------------- -` - - outputs := map[string]script.ScriptOutput{ - script.NicInfoScriptName: {Stdout: nicinfoWithVF}, - } - - fields := nicTableValues(outputs) - - if len(fields) == 0 { - t.Fatal("Expected fields, got empty slice") - } - - // Find the Name field - nameField := fields[0] - if nameField.Name != "Name" { - t.Fatalf("Expected first field to be 'Name', got '%s'", nameField.Name) - } - - if len(nameField.Values) != 2 { - t.Fatalf("Expected 2 NIC names, got %d", len(nameField.Values)) - } - - // Check that the virtual function has "(virtual)" annotation - if nameField.Values[0] != "eth0 (virtual)" { - t.Errorf("Expected 'eth0 (virtual)', got '%s'", nameField.Values[0]) - } - - // Check that the physical function does not have "(virtual)" annotation - if nameField.Values[1] != "eth1" { - t.Errorf("Expected 'eth1', got '%s'", nameField.Values[1]) - } -} - -var nicinfo = ` -Interface: ens7f0np0 -Vendor: Broadcom Inc. and subsidiaries -Model: BCM57416 NetXtreme-E Dual-Media 10G RDMA Ethernet Controller (NetXtreme-E Dual-port 10GBASE-T Ethernet OCP 3.0 Adapter (BCM957416N4160C)) -Settings for ens7f0np0: - Supported ports: [ TP ] - Supported link modes: 1000baseT/Full - 10000baseT/Full - Supported pause frame use: Symmetric Receive-only - Supports auto-negotiation: Yes - Supported FEC modes: Not reported - Advertised link modes: 1000baseT/Full - 10000baseT/Full - Advertised pause frame use: No - Advertised auto-negotiation: Yes - Advertised FEC modes: Not reported - Speed: 1000Mb/s - Lanes: 1 - Duplex: Full - Auto-negotiation: on - Port: Twisted Pair - PHYAD: 12 - Transceiver: internal - MDI-X: Unknown - Supports Wake-on: g - Wake-on: g - Current message level: 0x00002081 (8321) - drv tx_err hw - Link detected: yes -driver: bnxt_en -version: 6.13.7-061307-generic -firmware-version: 227.0.134.0/pkg 227.1.111.0 -expansion-rom-version: -bus-info: 0000:4c:00.0 -supports-statistics: yes -supports-test: yes -supports-eeprom-access: yes -supports-register-dump: yes -supports-priv-flags: no -Coalesce parameters for ens7f0np0: -Adaptive RX: off TX: off -stats-block-usecs: 0 -sample-interval: 0 -pkt-rate-low: 0 -pkt-rate-high: 0 - -rx-usecs: 200 -rx-frames: 0 -rx-usecs-irq: 0 -rx-frames-irq: 0 - -tx-usecs: 150 -tx-frames: 0 -tx-usecs-irq: 0 -tx-frames-irq: 0 -MAC Address: 04:32:01:f3:e1:a4 -NUMA Node: 0 -Virtual Function: no -CPU Affinity: 124:0-143;125:0-143;126:0-143;127:0-143;128:0-143;129:0-143;130:0-143;131:0-143;132:0-143;133:0-143;134:0-143;135:0-143;136:0-143;137:0-143;138:0-143;139:0-143;140:0-143;141:0-143;142:0-143;143:0-143;144:0-143;145:0-143;146:0-143;147:0-143;148:0-143;149:0-143;150:0-143;151:0-143;152:0-143;153:0-143;154:0-143;155:0-143;156:0-143;157:0-143;158:0-143;159:0-143;160:0-143;161:0-143;162:0-143;163:0-143;164:0-143;165:0-143;166:0-143;167:0-143;168:0-143;169:0-143;170:0-143;171:0-143;172:0-143;173:0-143;174:0-143;175:0-143;176:0-143;177:0-143;178:0-143;179:0-143;180:0-143;181:0-143;182:0-143;184:0-143;185:0-143;186:0-143;187:0-143;188:0-143;189:0-143;190:0-143;191:0-143;192:0-143;193:0-143;194:0-143;195:0-143;196:0-143;197:0-143;198:0-143; -IRQ Balance: Disabled ----------------------------------------- -Interface: ens7f1np1 -Vendor: Broadcom Inc. and subsidiaries -Model: BCM57416 NetXtreme-E Dual-Media 10G RDMA Ethernet Controller (NetXtreme-E Dual-port 10GBASE-T Ethernet OCP 3.0 Adapter (BCM957416N4160C)) -Settings for ens7f1np1: - Supported ports: [ TP ] - Supported link modes: 1000baseT/Full - 10000baseT/Full - Supported pause frame use: Symmetric Receive-only - Supports auto-negotiation: Yes - Supported FEC modes: Not reported - Advertised link modes: 1000baseT/Full - 10000baseT/Full - Advertised pause frame use: Symmetric - Advertised auto-negotiation: Yes - Advertised FEC modes: Not reported - Speed: Unknown! - Duplex: Unknown! (255) - Auto-negotiation: on - Port: Twisted Pair - PHYAD: 13 - Transceiver: internal - MDI-X: Unknown - Supports Wake-on: g - Wake-on: g - Current message level: 0x00002081 (8321) - drv tx_err hw - Link detected: no -driver: bnxt_en -version: 6.13.7-061307-generic -firmware-version: 227.0.134.0/pkg 227.1.111.0 -expansion-rom-version: -bus-info: 0000:4c:00.1 -supports-statistics: yes -supports-test: yes -supports-eeprom-access: yes -supports-register-dump: yes -supports-priv-flags: no -Coalesce parameters for ens7f1np1: -Adaptive RX: on TX: on -stats-block-usecs: 0 -sample-interval: 0 -pkt-rate-low: 0 -pkt-rate-high: 0 - -rx-usecs: 100 -rx-frames: 0 -rx-usecs-irq: 0 -rx-frames-irq: 0 - -tx-usecs: 100 -tx-frames: 0 -tx-usecs-irq: 0 -tx-frames-irq: 0 -MAC Address: 04:32:01:f3:e1:a5 -NUMA Node: 0 -Virtual Function: no -CPU Affinity: 454:0-143;455:0-143;456:0-143;457:0-143;458:0-143;459:0-143;460:0-143;461:0-143;462:0-143;463:0-143;464:0-143;465:0-143;466:0-143;467:0-143;468:0-143;469:0-143;470:0-143;471:0-143;472:0-143;473:0-143;474:0-143;475:0-143;476:0-143;477:0-143;478:0-143;479:0-143;480:0-143;481:0-143;482:0-143;483:0-143;484:0-143;485:0-143;486:0-143;487:0-143;488:0-143;489:0-143;490:0-143;491:0-143;492:0-143;493:0-143;494:0-143;495:0-143;496:0-143;497:0-143;498:0-143;499:0-143;500:0-143;501:0-143;502:0-143;503:0-143;504:0-143;505:0-143;506:0-143;507:0-143;508:0-143;509:0-143;510:0-143;511:0-143;512:0-143;513:0-143;514:0-143;515:0-143;516:0-143;517:0-143;518:0-143;519:0-143;520:0-143;521:0-143;522:0-143;523:0-143;524:0-143;525:0-143;526:0-143;527:0-143; -IRQ Balance: Disabled ----------------------------------------- -Interface: enx2aecf92702ac -Vendor: Netchip Technology, Inc. -Model: Linux-USB Ethernet/RNDIS Gadget -Settings for enx2aecf92702ac: - Supported ports: [ ] - Supported link modes: Not reported - Supported pause frame use: No - Supports auto-negotiation: No - Supported FEC modes: Not reported - Advertised link modes: Not reported - Advertised pause frame use: No - Advertised auto-negotiation: No - Advertised FEC modes: Not reported - Speed: 425Mb/s - Duplex: Half - Auto-negotiation: off - Port: Twisted Pair - PHYAD: 0 - Transceiver: internal - MDI-X: Unknown - Current message level: 0x00000007 (7) - drv probe link - Link detected: yes -driver: cdc_ether -version: 6.13.7-061307-generic -firmware-version: CDC Ethernet Device -expansion-rom-version: -bus-info: usb-0000:2c:00.0-3.1 -supports-statistics: no -supports-test: no -supports-eeprom-access: no -supports-register-dump: no -supports-priv-flags: no -MAC Address: 2a:ec:f9:27:02:ac -NUMA Node: -Virtual Function: no -CPU Affinity: -IRQ Balance: Disabled ----------------------------------------- -` diff --git a/internal/report/table_helpers_turbostat.go b/internal/report/turbostat.go similarity index 100% rename from internal/report/table_helpers_turbostat.go rename to internal/report/turbostat.go diff --git a/internal/report/table_helpers_turbostat_test.go b/internal/report/turbostat_test.go similarity index 100% rename from internal/report/table_helpers_turbostat_test.go rename to internal/report/turbostat_test.go From 115568d061f5dc7ebeec931b1a1eb3ff8aa5b7e9 Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Thu, 4 Dec 2025 14:43:01 -0800 Subject: [PATCH 2/6] move some of report module into new table module Signed-off-by: Harper, Jason M --- cmd/config/config.go | 11 +- cmd/config/flag_groups.go | 4 +- cmd/config/set.go | 12 +- cmd/flame/flame.go | 5 +- cmd/lock/lock.go | 7 +- cmd/metrics/metadata.go | 8 +- cmd/report/report.go | 154 ++++++++-------- cmd/telemetry/telemetry.go | 79 +++++---- go.mod | 1 + internal/common/common.go | 31 ++-- internal/report/render_excel.go | 36 ++-- internal/report/render_html.go | 176 ++++++++++++------- internal/report/render_html_flamegraph.go | 7 +- internal/report/render_json.go | 7 +- internal/report/render_text.go | 26 ++- internal/report/report.go | 5 +- internal/{report => table}/accelerator.go | 2 +- internal/{report => table}/benchmarking.go | 2 +- internal/{report => table}/cache.go | 2 +- internal/{report => table}/cache_test.go | 2 +- internal/{report => table}/cpu.go | 2 +- internal/{report => table}/cpu_test.go | 2 +- internal/{report => table}/dimm.go | 2 +- internal/{report => table}/frequency.go | 2 +- internal/{report => table}/frequency_test.go | 2 +- internal/{report => table}/gpu.go | 2 +- internal/{report => table}/isa.go | 2 +- internal/{report => table}/nic.go | 2 +- internal/{report => table}/nic_test.go | 2 +- internal/{report => table}/power.go | 2 +- internal/{report => table}/prefetcher.go | 2 +- internal/{report => table}/security.go | 2 +- internal/{report => table}/stacks.go | 2 +- internal/{report => table}/stacks_test.go | 2 +- internal/{report => table}/storage.go | 2 +- internal/{report => table}/system.go | 2 +- internal/{report => table}/table.go | 5 +- internal/{report => table}/table_defs.go | 131 ++++++-------- internal/{report => table}/table_helpers.go | 2 +- internal/{report => table}/turbostat.go | 2 +- internal/{report => table}/turbostat_test.go | 2 +- 41 files changed, 410 insertions(+), 341 deletions(-) rename internal/{report => table}/accelerator.go (99%) rename internal/{report => table}/benchmarking.go (99%) rename internal/{report => table}/cache.go (99%) rename internal/{report => table}/cache_test.go (99%) rename internal/{report => table}/cpu.go (99%) rename internal/{report => table}/cpu_test.go (99%) rename internal/{report => table}/dimm.go (99%) rename internal/{report => table}/frequency.go (99%) rename internal/{report => table}/frequency_test.go (99%) rename internal/{report => table}/gpu.go (99%) rename internal/{report => table}/isa.go (99%) rename internal/{report => table}/nic.go (99%) rename internal/{report => table}/nic_test.go (99%) rename internal/{report => table}/power.go (99%) rename internal/{report => table}/prefetcher.go (99%) rename internal/{report => table}/security.go (98%) rename internal/{report => table}/stacks.go (99%) rename internal/{report => table}/stacks_test.go (99%) rename internal/{report => table}/storage.go (99%) rename internal/{report => table}/system.go (99%) rename internal/{report => table}/table.go (96%) rename internal/{report => table}/table_defs.go (96%) rename internal/{report => table}/table_helpers.go (99%) rename internal/{report => table}/turbostat.go (99%) rename internal/{report => table}/turbostat_test.go (99%) diff --git a/cmd/config/config.go b/cmd/config/config.go index bd1a2b03..46982432 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -13,6 +13,7 @@ import ( "perfspect/internal/progress" "perfspect/internal/report" "perfspect/internal/script" + "perfspect/internal/table" "perfspect/internal/target" "perfspect/internal/util" "slices" @@ -283,7 +284,7 @@ func setOnTarget(cmd *cobra.Command, myTarget target.Target, flagGroups []flagGr // getConfig collects the configuration data from the target(s) func getConfig(myTargets []target.Target, localTempDir string) ([]common.TargetScriptOutputs, error) { - scriptNames := report.GetScriptNamesForTable(report.ConfigurationTableName) + scriptNames := table.GetScriptNamesForTable(table.ConfigurationTableName) var scriptsToRun []script.ScriptDefinition for _, scriptName := range scriptNames { scriptsToRun = append(scriptsToRun, script.GetScriptByName(scriptName)) @@ -317,7 +318,7 @@ func getConfig(myTargets []target.Target, localTempDir string) ([]common.TargetS for _, target := range myTargets { for _, targetScriptOutputs := range allTargetScriptOutputs { if targetScriptOutputs.TargetName == target.GetName() { - targetScriptOutputs.TableNames = []string{report.ConfigurationTableName} + targetScriptOutputs.TableNames = []string{table.ConfigurationTableName} orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, targetScriptOutputs) break } @@ -333,9 +334,9 @@ func processConfig(targetScriptOutputs []common.TargetScriptOutputs) (map[string var err error for _, targetScriptOutput := range targetScriptOutputs { // process the tables, i.e., get field values from raw script output - tableNames := []string{report.ConfigurationTableName} - var tableValues []report.TableValues - if tableValues, err = report.ProcessTables(tableNames, targetScriptOutput.ScriptOutputs); err != nil { + tableNames := []string{table.ConfigurationTableName} + var tableValues []table.TableValues + if tableValues, err = table.ProcessTables(tableNames, targetScriptOutput.ScriptOutputs); err != nil { err = fmt.Errorf("failed to process collected data: %v", err) return nil, err } diff --git a/cmd/config/flag_groups.go b/cmd/config/flag_groups.go index 10fd0f11..57d3e677 100644 --- a/cmd/config/flag_groups.go +++ b/cmd/config/flag_groups.go @@ -6,7 +6,7 @@ package config import ( "fmt" "perfspect/internal/common" - "perfspect/internal/report" + "perfspect/internal/table" "perfspect/internal/target" "regexp" "slices" @@ -193,7 +193,7 @@ func initializeFlags(cmd *cobra.Command) { flagGroups = append(flagGroups, group) // prefetcher options group = flagGroup{name: flagGroupPrefetcherName, flags: []flagDefinition{}} - for _, pref := range report.GetPrefetcherDefinitions() { + for _, pref := range table.GetPrefetcherDefinitions() { group.flags = append(group.flags, newStringFlag(cmd, // flag name diff --git a/cmd/config/set.go b/cmd/config/set.go index 745193c0..ebe77e9a 100644 --- a/cmd/config/set.go +++ b/cmd/config/set.go @@ -5,8 +5,8 @@ import ( "log/slog" "math" "perfspect/internal/cpus" - "perfspect/internal/report" "perfspect/internal/script" + "perfspect/internal/table" "perfspect/internal/target" "perfspect/internal/util" "regexp" @@ -131,7 +131,7 @@ func setLlcSize(desiredLlcSize float64, myTarget target.Target, localTempDir str return fmt.Errorf("failed to run scripts on target: %w", err) } - uarch := report.UarchFromOutput(outputs) + uarch := table.UarchFromOutput(outputs) cpu, err := cpus.GetCPUByMicroArchitecture(uarch) if err != nil { return fmt.Errorf("failed to get CPU by microarchitecture: %w", err) @@ -139,11 +139,11 @@ func setLlcSize(desiredLlcSize float64, myTarget target.Target, localTempDir str if cpu.CacheWayCount == 0 { return fmt.Errorf("cache way count is zero") } - maximumLlcSize, _, err := report.GetL3LscpuMB(outputs) + maximumLlcSize, _, err := table.GetL3LscpuMB(outputs) if err != nil { return fmt.Errorf("failed to get maximum LLC size: %w", err) } - currentLlcSize, _, err := report.GetL3MSRMB(outputs) + currentLlcSize, _, err := table.GetL3MSRMB(outputs) if err != nil { return fmt.Errorf("failed to get current LLC size: %w", err) } @@ -823,7 +823,7 @@ func getUarch(myTarget target.Target, localTempDir string) (string, error) { if err != nil { return "", fmt.Errorf("failed to run scripts on target: %w", err) } - uarch := report.UarchFromOutput(outputs) + uarch := table.UarchFromOutput(outputs) if uarch == "" { return "", fmt.Errorf("failed to get microarchitecture") } @@ -831,7 +831,7 @@ func getUarch(myTarget target.Target, localTempDir string) (string, error) { } func setPrefetcher(enableDisable string, myTarget target.Target, localTempDir string, prefetcherType string) error { - pf, err := report.GetPrefetcherDefByName(prefetcherType) + pf, err := table.GetPrefetcherDefByName(prefetcherType) if err != nil { return fmt.Errorf("failed to get prefetcher definition: %w", err) } diff --git a/cmd/flame/flame.go b/cmd/flame/flame.go index b1f47dde..76186a9d 100644 --- a/cmd/flame/flame.go +++ b/cmd/flame/flame.go @@ -9,6 +9,7 @@ import ( "os" "perfspect/internal/common" "perfspect/internal/report" + "perfspect/internal/table" "perfspect/internal/util" "slices" "strconv" @@ -177,9 +178,9 @@ func validateFlags(cmd *cobra.Command, args []string) error { func runCmd(cmd *cobra.Command, args []string) error { var tableNames []string if !flagNoSystemSummary { - tableNames = append(tableNames, report.BriefSysSummaryTableName) + tableNames = append(tableNames, table.BriefSysSummaryTableName) } - tableNames = append(tableNames, report.CallStackFrequencyTableName) + tableNames = append(tableNames, table.CallStackFrequencyTableName) reportingCommand := common.ReportingCommand{ Cmd: cmd, ReportNamePost: "flame", diff --git a/cmd/lock/lock.go b/cmd/lock/lock.go index 03c3e5db..184f1f1f 100755 --- a/cmd/lock/lock.go +++ b/cmd/lock/lock.go @@ -11,6 +11,7 @@ import ( "perfspect/internal/progress" "perfspect/internal/report" "perfspect/internal/script" + "perfspect/internal/table" "perfspect/internal/target" "slices" "strconv" @@ -165,7 +166,7 @@ func formalizeOutputFormat(outputFormat []string) []string { func pullDataFiles(appContext common.AppContext, scriptOutputs map[string]script.ScriptOutput, myTarget target.Target, statusUpdate progress.MultiSpinnerUpdateFunc) error { localOutputDir := appContext.OutputDir - tableValues := report.GetValuesForTable(report.KernelLockAnalysisTableName, scriptOutputs) + tableValues := table.GetValuesForTable(table.KernelLockAnalysisTableName, scriptOutputs) found := false for _, field := range tableValues.Fields { if field.Name == "Perf Package Path" { @@ -194,9 +195,9 @@ func pullDataFiles(appContext common.AppContext, scriptOutputs map[string]script func runCmd(cmd *cobra.Command, args []string) error { var tableNames []string if !flagNoSystemSummary { - tableNames = append(tableNames, report.BriefSysSummaryTableName) + tableNames = append(tableNames, table.BriefSysSummaryTableName) } - tableNames = append(tableNames, report.KernelLockAnalysisTableName) + tableNames = append(tableNames, table.KernelLockAnalysisTableName) reportingCommand := common.ReportingCommand{ Cmd: cmd, ReportNamePost: "lock", diff --git a/cmd/metrics/metadata.go b/cmd/metrics/metadata.go index 882dff59..66fd37ee 100644 --- a/cmd/metrics/metadata.go +++ b/cmd/metrics/metadata.go @@ -20,8 +20,8 @@ import ( "perfspect/internal/cpus" "perfspect/internal/progress" - "perfspect/internal/report" "perfspect/internal/script" + "perfspect/internal/table" "perfspect/internal/target" ) @@ -526,7 +526,7 @@ func getMetadataScripts(noRoot bool, noSystemSummary bool, numGPCounters int) (m } // add the system summary table scripts to the list if !noSystemSummary { - table := report.GetTableByName(report.BriefSysSummaryTableName) + table := table.GetTableByName(table.BriefSysSummaryTableName) for _, scriptName := range table.ScriptNames { scriptDef := script.GetScriptByName(scriptName) metadataScripts = append(metadataScripts, scriptDef) @@ -609,8 +609,8 @@ func ReadJSONFromFile(path string) (md Metadata, err error) { // getSystemSummary - retrieves the system summary from the target func getSystemSummary(scriptOutputs map[string]script.ScriptOutput) (summaryFields [][]string, err error) { - var allTableValues []report.TableValues - allTableValues, err = report.ProcessTables([]string{report.BriefSysSummaryTableName}, scriptOutputs) + var allTableValues []table.TableValues + allTableValues, err = table.ProcessTables([]string{table.BriefSysSummaryTableName}, scriptOutputs) if err != nil { err = fmt.Errorf("failed to process script outputs: %w", err) return diff --git a/cmd/report/report.go b/cmd/report/report.go index 49e9094b..7e7545b9 100644 --- a/cmd/report/report.go +++ b/cmd/report/report.go @@ -19,6 +19,7 @@ import ( "perfspect/internal/common" "perfspect/internal/report" "perfspect/internal/script" + "perfspect/internal/table" "perfspect/internal/util" ) @@ -139,51 +140,51 @@ var benchmarkOptions = []string{ var benchmarkAll = "all" var benchmarkTableNames = map[string][]string{ - "speed": {report.SpeedBenchmarkTableName}, - "power": {report.PowerBenchmarkTableName}, - "temperature": {report.TemperatureBenchmarkTableName}, - "frequency": {report.FrequencyBenchmarkTableName}, - "memory": {report.MemoryBenchmarkTableName}, - "numa": {report.NUMABenchmarkTableName}, - "storage": {report.StorageBenchmarkTableName}, + "speed": {table.SpeedBenchmarkTableName}, + "power": {table.PowerBenchmarkTableName}, + "temperature": {table.TemperatureBenchmarkTableName}, + "frequency": {table.FrequencyBenchmarkTableName}, + "memory": {table.MemoryBenchmarkTableName}, + "numa": {table.NUMABenchmarkTableName}, + "storage": {table.StorageBenchmarkTableName}, } var benchmarkSummaryTableName = "Benchmark Summary" // categories maps flag names to tables that will be included in report var categories = []common.Category{ - {FlagName: flagSystemSummaryName, FlagVar: &flagSystemSummary, Help: "System Summary", TableNames: []string{report.SystemSummaryTableName}}, - {FlagName: flagHostName, FlagVar: &flagHost, Help: "Host", TableNames: []string{report.HostTableName}}, - {FlagName: flagBiosName, FlagVar: &flagBios, Help: "BIOS", TableNames: []string{report.BIOSTableName}}, - {FlagName: flagOsName, FlagVar: &flagOs, Help: "Operating System", TableNames: []string{report.OperatingSystemTableName}}, - {FlagName: flagSoftwareName, FlagVar: &flagSoftware, Help: "Software Versions", TableNames: []string{report.SoftwareVersionTableName}}, - {FlagName: flagCpuName, FlagVar: &flagCpu, Help: "Processor Details", TableNames: []string{report.CPUTableName}}, - {FlagName: flagPrefetcherName, FlagVar: &flagPrefetcher, Help: "Prefetchers", TableNames: []string{report.PrefetcherTableName}}, - {FlagName: flagIsaName, FlagVar: &flagIsa, Help: "Instruction Sets", TableNames: []string{report.ISATableName}}, - {FlagName: flagAcceleratorName, FlagVar: &flagAccelerator, Help: "On-board Accelerators", TableNames: []string{report.AcceleratorTableName}}, - {FlagName: flagPowerName, FlagVar: &flagPower, Help: "Power Settings", TableNames: []string{report.PowerTableName}}, - {FlagName: flagCstatesName, FlagVar: &flagCstates, Help: "C-states", TableNames: []string{report.CstateTableName}}, - {FlagName: flagFrequencyName, FlagVar: &flagFrequency, Help: "Maximum Frequencies", TableNames: []string{report.MaximumFrequencyTableName}}, - {FlagName: flagSSTName, FlagVar: &flagSST, Help: "Speed Select Technology Settings", TableNames: []string{report.SSTTFHPTableName, report.SSTTFLPTableName}}, - {FlagName: flagUncoreName, FlagVar: &flagUncore, Help: "Uncore Configuration", TableNames: []string{report.UncoreTableName}}, - {FlagName: flagElcName, FlagVar: &flagElc, Help: "Efficiency Latency Control Settings", TableNames: []string{report.ElcTableName}}, - {FlagName: flagMemoryName, FlagVar: &flagMemory, Help: "Memory Configuration", TableNames: []string{report.MemoryTableName}}, - {FlagName: flagDimmName, FlagVar: &flagDimm, Help: "DIMM Population", TableNames: []string{report.DIMMTableName}}, - {FlagName: flagNetConfigName, FlagVar: &flagNetConfig, Help: "Network Configuration", TableNames: []string{report.NetworkConfigTableName}}, - {FlagName: flagNicName, FlagVar: &flagNic, Help: "Network Cards", TableNames: []string{report.NICTableName, report.NICCpuAffinityTableName, report.NICPacketSteeringTableName}}, - {FlagName: flagDiskName, FlagVar: &flagDisk, Help: "Storage Devices", TableNames: []string{report.DiskTableName}}, - {FlagName: flagFilesystemName, FlagVar: &flagFilesystem, Help: "File Systems", TableNames: []string{report.FilesystemTableName}}, - {FlagName: flagGpuName, FlagVar: &flagGpu, Help: "GPUs", TableNames: []string{report.GPUTableName}}, - {FlagName: flagGaudiName, FlagVar: &flagGaudi, Help: "Gaudi Devices", TableNames: []string{report.GaudiTableName}}, - {FlagName: flagCxlName, FlagVar: &flagCxl, Help: "CXL Devices", TableNames: []string{report.CXLTableName}}, - {FlagName: flagPcieName, FlagVar: &flagPcie, Help: "PCIE Slots", TableNames: []string{report.PCIeTableName}}, - {FlagName: flagCveName, FlagVar: &flagCve, Help: "Vulnerabilities", TableNames: []string{report.CVETableName}}, - {FlagName: flagProcessName, FlagVar: &flagProcess, Help: "Process List", TableNames: []string{report.ProcessTableName}}, - {FlagName: flagSensorName, FlagVar: &flagSensor, Help: "Sensor Status", TableNames: []string{report.SensorTableName}}, - {FlagName: flagChassisStatusName, FlagVar: &flagChassisStatus, Help: "Chassis Status", TableNames: []string{report.ChassisStatusTableName}}, - {FlagName: flagPmuName, FlagVar: &flagPmu, Help: "Performance Monitoring Unit Status", TableNames: []string{report.PMUTableName}}, - {FlagName: flagSystemEventLogName, FlagVar: &flagSystemEventLog, Help: "System Event Log", TableNames: []string{report.SystemEventLogTableName}}, - {FlagName: flagKernelLogName, FlagVar: &flagKernelLog, Help: "Kernel Log", TableNames: []string{report.KernelLogTableName}}, + {FlagName: flagSystemSummaryName, FlagVar: &flagSystemSummary, Help: "System Summary", TableNames: []string{table.SystemSummaryTableName}}, + {FlagName: flagHostName, FlagVar: &flagHost, Help: "Host", TableNames: []string{table.HostTableName}}, + {FlagName: flagBiosName, FlagVar: &flagBios, Help: "BIOS", TableNames: []string{table.BIOSTableName}}, + {FlagName: flagOsName, FlagVar: &flagOs, Help: "Operating System", TableNames: []string{table.OperatingSystemTableName}}, + {FlagName: flagSoftwareName, FlagVar: &flagSoftware, Help: "Software Versions", TableNames: []string{table.SoftwareVersionTableName}}, + {FlagName: flagCpuName, FlagVar: &flagCpu, Help: "Processor Details", TableNames: []string{table.CPUTableName}}, + {FlagName: flagPrefetcherName, FlagVar: &flagPrefetcher, Help: "Prefetchers", TableNames: []string{table.PrefetcherTableName}}, + {FlagName: flagIsaName, FlagVar: &flagIsa, Help: "Instruction Sets", TableNames: []string{table.ISATableName}}, + {FlagName: flagAcceleratorName, FlagVar: &flagAccelerator, Help: "On-board Accelerators", TableNames: []string{table.AcceleratorTableName}}, + {FlagName: flagPowerName, FlagVar: &flagPower, Help: "Power Settings", TableNames: []string{table.PowerTableName}}, + {FlagName: flagCstatesName, FlagVar: &flagCstates, Help: "C-states", TableNames: []string{table.CstateTableName}}, + {FlagName: flagFrequencyName, FlagVar: &flagFrequency, Help: "Maximum Frequencies", TableNames: []string{table.MaximumFrequencyTableName}}, + {FlagName: flagSSTName, FlagVar: &flagSST, Help: "Speed Select Technology Settings", TableNames: []string{table.SSTTFHPTableName, table.SSTTFLPTableName}}, + {FlagName: flagUncoreName, FlagVar: &flagUncore, Help: "Uncore Configuration", TableNames: []string{table.UncoreTableName}}, + {FlagName: flagElcName, FlagVar: &flagElc, Help: "Efficiency Latency Control Settings", TableNames: []string{table.ElcTableName}}, + {FlagName: flagMemoryName, FlagVar: &flagMemory, Help: "Memory Configuration", TableNames: []string{table.MemoryTableName}}, + {FlagName: flagDimmName, FlagVar: &flagDimm, Help: "DIMM Population", TableNames: []string{table.DIMMTableName}}, + {FlagName: flagNetConfigName, FlagVar: &flagNetConfig, Help: "Network Configuration", TableNames: []string{table.NetworkConfigTableName}}, + {FlagName: flagNicName, FlagVar: &flagNic, Help: "Network Cards", TableNames: []string{table.NICTableName, table.NICCpuAffinityTableName, table.NICPacketSteeringTableName}}, + {FlagName: flagDiskName, FlagVar: &flagDisk, Help: "Storage Devices", TableNames: []string{table.DiskTableName}}, + {FlagName: flagFilesystemName, FlagVar: &flagFilesystem, Help: "File Systems", TableNames: []string{table.FilesystemTableName}}, + {FlagName: flagGpuName, FlagVar: &flagGpu, Help: "GPUs", TableNames: []string{table.GPUTableName}}, + {FlagName: flagGaudiName, FlagVar: &flagGaudi, Help: "Gaudi Devices", TableNames: []string{table.GaudiTableName}}, + {FlagName: flagCxlName, FlagVar: &flagCxl, Help: "CXL Devices", TableNames: []string{table.CXLTableName}}, + {FlagName: flagPcieName, FlagVar: &flagPcie, Help: "PCIE Slots", TableNames: []string{table.PCIeTableName}}, + {FlagName: flagCveName, FlagVar: &flagCve, Help: "Vulnerabilities", TableNames: []string{table.CVETableName}}, + {FlagName: flagProcessName, FlagVar: &flagProcess, Help: "Process List", TableNames: []string{table.ProcessTableName}}, + {FlagName: flagSensorName, FlagVar: &flagSensor, Help: "Sensor Status", TableNames: []string{table.SensorTableName}}, + {FlagName: flagChassisStatusName, FlagVar: &flagChassisStatus, Help: "Chassis Status", TableNames: []string{table.ChassisStatusTableName}}, + {FlagName: flagPmuName, FlagVar: &flagPmu, Help: "Performance Monitoring Unit Status", TableNames: []string{table.PMUTableName}}, + {FlagName: flagSystemEventLogName, FlagVar: &flagSystemEventLog, Help: "System Event Log", TableNames: []string{table.SystemEventLogTableName}}, + {FlagName: flagKernelLogName, FlagVar: &flagKernelLog, Help: "Kernel Log", TableNames: []string{table.KernelLogTableName}}, } func init() { @@ -354,26 +355,26 @@ func runCmd(cmd *cobra.Command, args []string) error { TableNames: tableNames, SummaryFunc: summaryFunc, SummaryTableName: benchmarkSummaryTableName, - SummaryBeforeTableName: report.SpeedBenchmarkTableName, + SummaryBeforeTableName: table.SpeedBenchmarkTableName, InsightsFunc: insightsFunc, } return reportingCommand.Run() } -func benchmarkSummaryFromTableValues(allTableValues []report.TableValues, outputs map[string]script.ScriptOutput) report.TableValues { - maxFreq := getValueFromTableValues(getTableValues(allTableValues, report.FrequencyBenchmarkTableName), "SSE", 0) +func benchmarkSummaryFromTableValues(allTableValues []table.TableValues, outputs map[string]script.ScriptOutput) table.TableValues { + maxFreq := getValueFromTableValues(getTableValues(allTableValues, table.FrequencyBenchmarkTableName), "SSE", 0) if maxFreq != "" { maxFreq = maxFreq + " GHz" } - allCoreMaxFreq := getValueFromTableValues(getTableValues(allTableValues, report.FrequencyBenchmarkTableName), "SSE", -1) + allCoreMaxFreq := getValueFromTableValues(getTableValues(allTableValues, table.FrequencyBenchmarkTableName), "SSE", -1) if allCoreMaxFreq != "" { allCoreMaxFreq = allCoreMaxFreq + " GHz" } // get the maximum memory bandwidth from the memory latency table - memLatTableValues := getTableValues(allTableValues, report.MemoryBenchmarkTableName) + memLatTableValues := getTableValues(allTableValues, table.MemoryBenchmarkTableName) var bandwidthValues []string if len(memLatTableValues.Fields) > 1 { - bandwidthValues = getTableValues(allTableValues, report.MemoryBenchmarkTableName).Fields[1].Values + bandwidthValues = getTableValues(allTableValues, table.MemoryBenchmarkTableName).Fields[1].Values } maxBandwidth := 0.0 for _, bandwidthValue := range bandwidthValues { @@ -391,50 +392,51 @@ func benchmarkSummaryFromTableValues(allTableValues []report.TableValues, output maxMemBW = fmt.Sprintf("%.1f GB/s", maxBandwidth) } // get the minimum memory latency - minLatency := getValueFromTableValues(getTableValues(allTableValues, report.MemoryBenchmarkTableName), "Latency (ns)", 0) + minLatency := getValueFromTableValues(getTableValues(allTableValues, table.MemoryBenchmarkTableName), "Latency (ns)", 0) if minLatency != "" { minLatency = minLatency + " ns" } - return report.TableValues{ - TableDefinition: report.TableDefinition{ - Name: benchmarkSummaryTableName, - HasRows: false, - MenuLabel: benchmarkSummaryTableName, - HTMLTableRendererFunc: summaryHTMLTableRenderer, - XlsxTableRendererFunc: summaryXlsxTableRenderer, - TextTableRendererFunc: summaryTextTableRenderer, + report.RegisterHTMLRenderer(benchmarkSummaryTableName, summaryHTMLTableRenderer) + report.RegisterTextRenderer(benchmarkSummaryTableName, summaryTextTableRenderer) + report.RegisterXlsxRenderer(benchmarkSummaryTableName, summaryXlsxTableRenderer) + + return table.TableValues{ + TableDefinition: table.TableDefinition{ + Name: benchmarkSummaryTableName, + HasRows: false, + MenuLabel: benchmarkSummaryTableName, }, - Fields: []report.Field{ - {Name: "CPU Speed", Values: []string{getValueFromTableValues(getTableValues(allTableValues, report.SpeedBenchmarkTableName), "Ops/s", 0) + " Ops/s"}}, + Fields: []table.Field{ + {Name: "CPU Speed", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.SpeedBenchmarkTableName), "Ops/s", 0) + " Ops/s"}}, {Name: "Single-core Maximum frequency", Values: []string{maxFreq}}, {Name: "All-core Maximum frequency", Values: []string{allCoreMaxFreq}}, - {Name: "Maximum Power", Values: []string{getValueFromTableValues(getTableValues(allTableValues, report.PowerBenchmarkTableName), "Maximum Power", 0)}}, - {Name: "Maximum Temperature", Values: []string{getValueFromTableValues(getTableValues(allTableValues, report.TemperatureBenchmarkTableName), "Maximum Temperature", 0)}}, - {Name: "Minimum Power", Values: []string{getValueFromTableValues(getTableValues(allTableValues, report.PowerBenchmarkTableName), "Minimum Power", 0)}}, + {Name: "Maximum Power", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.PowerBenchmarkTableName), "Maximum Power", 0)}}, + {Name: "Maximum Temperature", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.TemperatureBenchmarkTableName), "Maximum Temperature", 0)}}, + {Name: "Minimum Power", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.PowerBenchmarkTableName), "Minimum Power", 0)}}, {Name: "Memory Peak Bandwidth", Values: []string{maxMemBW}}, {Name: "Memory Minimum Latency", Values: []string{minLatency}}, - {Name: "Disk Read Bandwidth", Values: []string{getValueFromTableValues(getTableValues(allTableValues, report.StorageBenchmarkTableName), "Single-Thread Read Bandwidth", 0)}}, - {Name: "Disk Write Bandwidth", Values: []string{getValueFromTableValues(getTableValues(allTableValues, report.StorageBenchmarkTableName), "Single-Thread Write Bandwidth", 0)}}, - {Name: "Microarchitecture", Values: []string{getValueFromTableValues(getTableValues(allTableValues, report.SystemSummaryTableName), "Microarchitecture", 0)}}, - {Name: "Sockets", Values: []string{getValueFromTableValues(getTableValues(allTableValues, report.SystemSummaryTableName), "Sockets", 0)}}, + {Name: "Disk Read Bandwidth", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.StorageBenchmarkTableName), "Single-Thread Read Bandwidth", 0)}}, + {Name: "Disk Write Bandwidth", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.StorageBenchmarkTableName), "Single-Thread Write Bandwidth", 0)}}, + {Name: "Microarchitecture", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.SystemSummaryTableName), "Microarchitecture", 0)}}, + {Name: "Sockets", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.SystemSummaryTableName), "Sockets", 0)}}, }, } } // getTableValues returns the table values for a table with a given name -func getTableValues(allTableValues []report.TableValues, tableName string) report.TableValues { +func getTableValues(allTableValues []table.TableValues, tableName string) table.TableValues { for _, tv := range allTableValues { if tv.Name == tableName { return tv } } - return report.TableValues{} + return table.TableValues{} } // getValueFromTableValues returns the value of a field in a table // if row is -1, it returns the last value -func getValueFromTableValues(tv report.TableValues, fieldName string, row int) string { +func getValueFromTableValues(tv table.TableValues, fieldName string, row int) string { for _, fv := range tv.Fields { if fv.Name == fieldName { if row == -1 { // return the last value @@ -485,7 +487,7 @@ var referenceData = map[ReferenceDataKey]ReferenceData{ } // getFieldIndex returns the index of a field in a list of fields -func getFieldIndex(fields []report.Field, fieldName string) (int, error) { +func getFieldIndex(fields []table.Field, fieldName string) (int, error) { for i, field := range fields { if field.Name == fieldName { return i, nil @@ -496,7 +498,7 @@ func getFieldIndex(fields []report.Field, fieldName string) (int, error) { // summaryHTMLTableRenderer is a custom HTML table renderer for the summary table // it removes the Microarchitecture and Sockets fields and adds a reference table -func summaryHTMLTableRenderer(tv report.TableValues, targetName string) string { +func summaryHTMLTableRenderer(tv table.TableValues, targetName string) string { uarchFieldIdx, err := getFieldIndex(tv.Fields, "Microarchitecture") if err != nil { panic(err) @@ -509,8 +511,8 @@ func summaryHTMLTableRenderer(tv report.TableValues, targetName string) string { if refData, ok := referenceData[ReferenceDataKey{tv.Fields[uarchFieldIdx].Values[0], tv.Fields[socketsFieldIdx].Values[0]}]; ok { // remove microarchitecture and sockets fields fields := tv.Fields[:len(tv.Fields)-2] - refTableValues := report.TableValues{ - Fields: []report.Field{ + refTableValues := table.TableValues{ + Fields: []table.Field{ {Name: "CPU Speed", Values: []string{fmt.Sprintf("%.0f Ops/s", refData.CPUSpeed)}}, {Name: "Single-core Maximum frequency", Values: []string{fmt.Sprintf("%.0f MHz", refData.SingleCoreFreq)}}, {Name: "All-core Maximum frequency", Values: []string{fmt.Sprintf("%.0f MHz", refData.AllCoreFreq)}}, @@ -521,22 +523,22 @@ func summaryHTMLTableRenderer(tv report.TableValues, targetName string) string { {Name: "Memory Minimum Latency", Values: []string{fmt.Sprintf("%.0f ns", refData.MemMinLatency)}}, }, } - return report.RenderMultiTargetTableValuesAsHTML([]report.TableValues{{TableDefinition: tv.TableDefinition, Fields: fields}, refTableValues}, []string{targetName, refData.Description}) + return report.RenderMultiTargetTableValuesAsHTML([]table.TableValues{{TableDefinition: tv.TableDefinition, Fields: fields}, refTableValues}, []string{targetName, refData.Description}) } else { // remove microarchitecture and sockets fields fields := tv.Fields[:len(tv.Fields)-2] - return report.DefaultHTMLTableRendererFunc(report.TableValues{TableDefinition: tv.TableDefinition, Fields: fields}) + return report.DefaultHTMLTableRendererFunc(table.TableValues{TableDefinition: tv.TableDefinition, Fields: fields}) } } -func summaryXlsxTableRenderer(tv report.TableValues, f *excelize.File, targetName string, row *int) { +func summaryXlsxTableRenderer(tv table.TableValues, f *excelize.File, targetName string, row *int) { // remove microarchitecture and sockets fields fields := tv.Fields[:len(tv.Fields)-2] - report.DefaultXlsxTableRendererFunc(report.TableValues{TableDefinition: tv.TableDefinition, Fields: fields}, f, report.XlsxPrimarySheetName, row) + report.DefaultXlsxTableRendererFunc(table.TableValues{TableDefinition: tv.TableDefinition, Fields: fields}, f, report.XlsxPrimarySheetName, row) } -func summaryTextTableRenderer(tv report.TableValues) string { +func summaryTextTableRenderer(tv table.TableValues) string { // remove microarchitecture and sockets fields fields := tv.Fields[:len(tv.Fields)-2] - return report.DefaultTextTableRendererFunc(report.TableValues{TableDefinition: tv.TableDefinition, Fields: fields}) + return report.DefaultTextTableRendererFunc(table.TableValues{TableDefinition: tv.TableDefinition, Fields: fields}) } diff --git a/cmd/telemetry/telemetry.go b/cmd/telemetry/telemetry.go index 39955a04..d42b1ec9 100644 --- a/cmd/telemetry/telemetry.go +++ b/cmd/telemetry/telemetry.go @@ -15,6 +15,7 @@ import ( "perfspect/internal/common" "perfspect/internal/report" "perfspect/internal/script" + "perfspect/internal/table" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -95,17 +96,17 @@ const ( var telemetrySummaryTableName = "Telemetry Summary" var categories = []common.Category{ - {FlagName: flagCPUName, FlagVar: &flagCPU, DefaultValue: false, Help: "monitor cpu utilization", TableNames: []string{report.CPUUtilizationTelemetryTableName, report.UtilizationCategoriesTelemetryTableName}}, - {FlagName: flagIPCName, FlagVar: &flagIPC, DefaultValue: false, Help: "monitor IPC", TableNames: []string{report.IPCTelemetryTableName}}, - {FlagName: flagC6Name, FlagVar: &flagC6, DefaultValue: false, Help: "monitor C6 residency", TableNames: []string{report.C6TelemetryTableName}}, - {FlagName: flagFrequencyName, FlagVar: &flagFrequency, DefaultValue: false, Help: "monitor cpu frequency", TableNames: []string{report.FrequencyTelemetryTableName}}, - {FlagName: flagPowerName, FlagVar: &flagPower, DefaultValue: false, Help: "monitor power", TableNames: []string{report.PowerTelemetryTableName}}, - {FlagName: flagTemperatureName, FlagVar: &flagTemperature, DefaultValue: false, Help: "monitor temperature", TableNames: []string{report.TemperatureTelemetryTableName}}, - {FlagName: flagMemoryName, FlagVar: &flagMemory, DefaultValue: false, Help: "monitor memory", TableNames: []string{report.MemoryTelemetryTableName}}, - {FlagName: flagNetworkName, FlagVar: &flagNetwork, DefaultValue: false, Help: "monitor network", TableNames: []string{report.NetworkTelemetryTableName}}, - {FlagName: flagStorageName, FlagVar: &flagStorage, DefaultValue: false, Help: "monitor storage", TableNames: []string{report.DriveTelemetryTableName}}, - {FlagName: flagIRQRateName, FlagVar: &flagIRQRate, DefaultValue: false, Help: "monitor IRQ rate", TableNames: []string{report.IRQRateTelemetryTableName}}, - {FlagName: flagInstrMixName, FlagVar: &flagInstrMix, DefaultValue: false, Help: "monitor instruction mix", TableNames: []string{report.InstructionTelemetryTableName}}, + {FlagName: flagCPUName, FlagVar: &flagCPU, DefaultValue: false, Help: "monitor cpu utilization", TableNames: []string{table.CPUUtilizationTelemetryTableName, table.UtilizationCategoriesTelemetryTableName}}, + {FlagName: flagIPCName, FlagVar: &flagIPC, DefaultValue: false, Help: "monitor IPC", TableNames: []string{table.IPCTelemetryTableName}}, + {FlagName: flagC6Name, FlagVar: &flagC6, DefaultValue: false, Help: "monitor C6 residency", TableNames: []string{table.C6TelemetryTableName}}, + {FlagName: flagFrequencyName, FlagVar: &flagFrequency, DefaultValue: false, Help: "monitor cpu frequency", TableNames: []string{table.FrequencyTelemetryTableName}}, + {FlagName: flagPowerName, FlagVar: &flagPower, DefaultValue: false, Help: "monitor power", TableNames: []string{table.PowerTelemetryTableName}}, + {FlagName: flagTemperatureName, FlagVar: &flagTemperature, DefaultValue: false, Help: "monitor temperature", TableNames: []string{table.TemperatureTelemetryTableName}}, + {FlagName: flagMemoryName, FlagVar: &flagMemory, DefaultValue: false, Help: "monitor memory", TableNames: []string{table.MemoryTelemetryTableName}}, + {FlagName: flagNetworkName, FlagVar: &flagNetwork, DefaultValue: false, Help: "monitor network", TableNames: []string{table.NetworkTelemetryTableName}}, + {FlagName: flagStorageName, FlagVar: &flagStorage, DefaultValue: false, Help: "monitor storage", TableNames: []string{table.DriveTelemetryTableName}}, + {FlagName: flagIRQRateName, FlagVar: &flagIRQRate, DefaultValue: false, Help: "monitor IRQ rate", TableNames: []string{table.IRQRateTelemetryTableName}}, + {FlagName: flagInstrMixName, FlagVar: &flagInstrMix, DefaultValue: false, Help: "monitor instruction mix", TableNames: []string{table.InstructionTelemetryTableName}}, } const ( @@ -272,7 +273,7 @@ func runCmd(cmd *cobra.Command, args []string) error { var tableNames []string // add system summary table if not disabled if !flagNoSystemSummary { - tableNames = append(tableNames, report.BriefSysSummaryTableName) + tableNames = append(tableNames, table.BriefSysSummaryTableName) } // add category tables for _, cat := range categories { @@ -291,7 +292,7 @@ func runCmd(cmd *cobra.Command, args []string) error { gaudiHlsmiPath := os.Getenv("PERFSPECT_GAUDI_HLSMI_PATH") // must be full path to hlsmi binary if gaudiHlsmiPath != "" { slog.Info("Gaudi telemetry enabled", slog.String("hlsmi_path", gaudiHlsmiPath)) - tableNames = append(tableNames, report.GaudiTelemetryTableName) + tableNames = append(tableNames, table.GaudiTelemetryTableName) } // hidden feature - PDU telemetry, only enabled when four environment variables are set pduHost := os.Getenv("PERFSPECT_PDU_HOST") @@ -300,7 +301,7 @@ func runCmd(cmd *cobra.Command, args []string) error { pduOutlet := os.Getenv("PERFSPECT_PDU_OUTLET") if pduHost != "" && pduUser != "" && pduPassword != "" && pduOutlet != "" { slog.Info("PDU telemetry enabled", slog.String("host", pduHost), slog.String("outlet", pduOutlet)) - tableNames = append(tableNames, report.PDUTelemetryTableName) + tableNames = append(tableNames, table.PDUTelemetryTableName) } // include telemetry summary table if all telemetry options are selected var summaryFunc common.SummaryFunc @@ -329,40 +330,40 @@ func runCmd(cmd *cobra.Command, args []string) error { TableNames: tableNames, SummaryFunc: summaryFunc, SummaryTableName: telemetrySummaryTableName, - SummaryBeforeTableName: report.CPUUtilizationTelemetryTableName, + SummaryBeforeTableName: table.CPUUtilizationTelemetryTableName, InsightsFunc: insightsFunc, } return reportingCommand.Run() } -func getTableValues(allTableValues []report.TableValues, tableName string) report.TableValues { +func getTableValues(allTableValues []table.TableValues, tableName string) table.TableValues { for _, tv := range allTableValues { if tv.Name == tableName { return tv } } - return report.TableValues{} + return table.TableValues{} } -func summaryFromTableValues(allTableValues []report.TableValues, _ map[string]script.ScriptOutput) report.TableValues { - cpuUtil := getCPUAveragePercentage(getTableValues(allTableValues, report.UtilizationCategoriesTelemetryTableName), "%idle", true) - ipc := getCPUAveragePercentage(getTableValues(allTableValues, report.IPCTelemetryTableName), "Core (Avg.)", false) - c6 := getCPUAveragePercentage(getTableValues(allTableValues, report.C6TelemetryTableName), "Core (Avg.)", false) - avgCoreFreq := getMetricAverage(getTableValues(allTableValues, report.FrequencyTelemetryTableName), []string{"Core (Avg.)"}, "Time") +func summaryFromTableValues(allTableValues []table.TableValues, _ map[string]script.ScriptOutput) table.TableValues { + cpuUtil := getCPUAveragePercentage(getTableValues(allTableValues, table.UtilizationCategoriesTelemetryTableName), "%idle", true) + ipc := getCPUAveragePercentage(getTableValues(allTableValues, table.IPCTelemetryTableName), "Core (Avg.)", false) + c6 := getCPUAveragePercentage(getTableValues(allTableValues, table.C6TelemetryTableName), "Core (Avg.)", false) + avgCoreFreq := getMetricAverage(getTableValues(allTableValues, table.FrequencyTelemetryTableName), []string{"Core (Avg.)"}, "Time") pkgPower := getPkgAveragePower(allTableValues) pkgTemperature := getPkgAverageTemperature(allTableValues) - driveReads := getMetricAverage(getTableValues(allTableValues, report.DriveTelemetryTableName), []string{"kB_read/s"}, "Device") - driveWrites := getMetricAverage(getTableValues(allTableValues, report.DriveTelemetryTableName), []string{"kB_wrtn/s"}, "Device") - networkReads := getMetricAverage(getTableValues(allTableValues, report.NetworkTelemetryTableName), []string{"rxkB/s"}, "Time") - networkWrites := getMetricAverage(getTableValues(allTableValues, report.NetworkTelemetryTableName), []string{"txkB/s"}, "Time") - memAvail := getMetricAverage(getTableValues(allTableValues, report.MemoryTelemetryTableName), []string{"avail"}, "Time") - return report.TableValues{ - TableDefinition: report.TableDefinition{ + driveReads := getMetricAverage(getTableValues(allTableValues, table.DriveTelemetryTableName), []string{"kB_read/s"}, "Device") + driveWrites := getMetricAverage(getTableValues(allTableValues, table.DriveTelemetryTableName), []string{"kB_wrtn/s"}, "Device") + networkReads := getMetricAverage(getTableValues(allTableValues, table.NetworkTelemetryTableName), []string{"rxkB/s"}, "Time") + networkWrites := getMetricAverage(getTableValues(allTableValues, table.NetworkTelemetryTableName), []string{"txkB/s"}, "Time") + memAvail := getMetricAverage(getTableValues(allTableValues, table.MemoryTelemetryTableName), []string{"avail"}, "Time") + return table.TableValues{ + TableDefinition: table.TableDefinition{ Name: telemetrySummaryTableName, HasRows: false, MenuLabel: telemetrySummaryTableName, }, - Fields: []report.Field{ + Fields: []table.Field{ {Name: "CPU Utilization (%)", Values: []string{cpuUtil}}, {Name: "IPC", Values: []string{ipc}}, {Name: "C6 Core Residency (%)", Values: []string{c6}}, @@ -378,7 +379,7 @@ func summaryFromTableValues(allTableValues []report.TableValues, _ map[string]sc } } -func getMetricAverage(tableValues report.TableValues, fieldNames []string, separatorFieldName string) (average string) { +func getMetricAverage(tableValues table.TableValues, fieldNames []string, separatorFieldName string) (average string) { sum, seps, err := getSumOfFields(tableValues.Fields, fieldNames, separatorFieldName) if err != nil { slog.Error("failed to get sum of fields for IO metrics", slog.String("error", err.Error())) @@ -392,7 +393,7 @@ func getMetricAverage(tableValues report.TableValues, fieldNames []string, separ return } -func getFieldIndex(fields []report.Field, fieldName string) (int, error) { +func getFieldIndex(fields []table.Field, fieldName string) (int, error) { for i, field := range fields { if field.Name == fieldName { return i, nil @@ -401,7 +402,7 @@ func getFieldIndex(fields []report.Field, fieldName string) (int, error) { return -1, fmt.Errorf("field not found: %s", fieldName) } -func getSumOfFields(fields []report.Field, fieldNames []string, separatorFieldName string) (sum float64, numSeparators int, err error) { +func getSumOfFields(fields []table.Field, fieldNames []string, separatorFieldName string) (sum float64, numSeparators int, err error) { prevSeparator := "" var separatorIdx int if separatorFieldName != "" { @@ -438,12 +439,12 @@ func getSumOfFields(fields []report.Field, fieldNames []string, separatorFieldNa return } -func getCPUAveragePercentage(tableValues report.TableValues, fieldName string, inverse bool) string { +func getCPUAveragePercentage(tableValues table.TableValues, fieldName string, inverse bool) string { if len(tableValues.Fields) == 0 { return "" } var fieldIndex int - var fv report.Field + var fv table.Field for fieldIndex, fv = range tableValues.Fields { if fv.Name == fieldName { break @@ -468,8 +469,8 @@ func getCPUAveragePercentage(tableValues report.TableValues, fieldName string, i return "" } -func getPkgAverageTemperature(allTableValues []report.TableValues) string { - tableValues := getTableValues(allTableValues, report.TemperatureTelemetryTableName) +func getPkgAverageTemperature(allTableValues []table.TableValues) string { + tableValues := getTableValues(allTableValues, table.TemperatureTelemetryTableName) // number of packages can vary, so we need to find the average temperature across all packages if len(tableValues.Fields) == 0 { return "" @@ -501,8 +502,8 @@ func getPkgAverageTemperature(allTableValues []report.TableValues) string { return "" } -func getPkgAveragePower(allTableValues []report.TableValues) string { - tableValues := getTableValues(allTableValues, report.PowerTelemetryTableName) +func getPkgAveragePower(allTableValues []table.TableValues) string { + tableValues := getTableValues(allTableValues, table.PowerTelemetryTableName) // number of packages can vary, so we need to find the average power across all packages if len(tableValues.Fields) == 0 { return "" diff --git a/go.mod b/go.mod index 58dca4e0..b3bb06ba 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ replace ( perfspect/internal/cpudb => ./internal/cpudb perfspect/internal/progress => ./internal/progress perfspect/internal/report => ./internal/report + perfspect/internal/table => ./internal/table perfspect/internal/script => ./internal/script perfspect/internal/target => ./internal/target perfspect/internal/util => ./internal/util diff --git a/internal/common/common.go b/internal/common/common.go index b2b47c52..f446596b 100644 --- a/internal/common/common.go +++ b/internal/common/common.go @@ -15,6 +15,7 @@ import ( "perfspect/internal/progress" "perfspect/internal/report" "perfspect/internal/script" + "perfspect/internal/table" "perfspect/internal/target" "perfspect/internal/util" "strings" @@ -83,7 +84,7 @@ const ( FlagFormatName = "format" ) -type SummaryFunc func([]report.TableValues, map[string]script.ScriptOutput) report.TableValues +type SummaryFunc func([]table.TableValues, map[string]script.ScriptOutput) table.TableValues type InsightsFunc SummaryFunc type AdhocFunc func(AppContext, map[string]script.ScriptOutput, target.Target, progress.MultiSpinnerUpdateFunc) error @@ -291,14 +292,14 @@ func (rc *ReportingCommand) Run() error { } // DefaultInsightsFunc returns the insights table values from the table values -func DefaultInsightsFunc(allTableValues []report.TableValues, scriptOutputs map[string]script.ScriptOutput) report.TableValues { - insightsTableValues := report.TableValues{ - TableDefinition: report.TableDefinition{ +func DefaultInsightsFunc(allTableValues []table.TableValues, scriptOutputs map[string]script.ScriptOutput) table.TableValues { + insightsTableValues := table.TableValues{ + TableDefinition: table.TableDefinition{ Name: TableNameInsights, HasRows: true, MenuLabel: TableNameInsights, }, - Fields: []report.Field{ + Fields: []table.Field{ {Name: "Recommendation", Values: []string{}}, {Name: "Justification", Values: []string{}}, }, @@ -361,10 +362,10 @@ func writeReport(reportBytes []byte, reportPath string) error { // createReports processes the collected data and creates the requested report(s) func (rc *ReportingCommand) createReports(appContext AppContext, orderedTargetScriptOutputs []TargetScriptOutputs, formats []string) ([]string, error) { reportFilePaths := []string{} - allTargetsTableValues := make([][]report.TableValues, 0) + allTargetsTableValues := make([][]table.TableValues, 0) for _, targetScriptOutputs := range orderedTargetScriptOutputs { // process the tables, i.e., get field values from script output - allTableValues, err := report.ProcessTables(targetScriptOutputs.TableNames, targetScriptOutputs.ScriptOutputs) + allTableValues, err := table.ProcessTables(targetScriptOutputs.TableNames, targetScriptOutputs.ScriptOutputs) if err != nil { err = fmt.Errorf("failed to process collected data: %w", err) return nil, err @@ -379,7 +380,7 @@ func (rc *ReportingCommand) createReports(appContext AppContext, orderedTargetSc if tableValues.TableDefinition.Name == rc.SummaryBeforeTableName { summaryBeforeTableFound = true // insert the summary table before this table - allTableValues = append(allTableValues[:i], append([]report.TableValues{summaryTableValues}, allTableValues[i:]...)...) + allTableValues = append(allTableValues[:i], append([]table.TableValues{summaryTableValues}, allTableValues[i:]...)...) break } } @@ -395,11 +396,11 @@ func (rc *ReportingCommand) createReports(appContext AppContext, orderedTargetSc allTableValues = append(allTableValues, insightsTableValues) } // special case - add tableValues for the application version - allTableValues = append(allTableValues, report.TableValues{ - TableDefinition: report.TableDefinition{ + allTableValues = append(allTableValues, table.TableValues{ + TableDefinition: table.TableDefinition{ Name: TableNamePerfspect, }, - Fields: []report.Field{ + Fields: []table.Field{ {Name: "Version", Values: []string{appContext.Version}}, {Name: "Args", Values: []string{strings.Join(os.Args, " ")}}, {Name: "OutputDir", Values: []string{appContext.OutputDir}}, @@ -464,7 +465,7 @@ func (rc *ReportingCommand) createReports(appContext AppContext, orderedTargetSc // extractTableNamesFromValues extracts the table names from the processed table values for each target. // It returns a slice of slices, where each inner slice contains the table names for a target. -func extractTableNamesFromValues(allTargetsTableValues [][]report.TableValues) [][]string { +func extractTableNamesFromValues(allTargetsTableValues [][]table.TableValues) [][]string { targetTableNames := make([][]string, 0, len(allTargetsTableValues)) for _, tableValues := range allTargetsTableValues { names := make([]string, 0, len(tableValues)) @@ -511,11 +512,11 @@ func outputsFromTargets(cmd *cobra.Command, myTargets []target.Target, tableName targetTableNames = append(targetTableNames, []string{}) targetScriptNames = append(targetScriptNames, []string{}) for _, tableName := range tableNames { - if report.IsTableForTarget(tableName, target) { + if table.IsTableForTarget(tableName, target) { // add table to list of tables to collect targetTableNames[targetIdx] = util.UniqueAppend(targetTableNames[targetIdx], tableName) // add scripts to list of scripts to run - for _, scriptName := range report.GetScriptNamesForTable(tableName) { + for _, scriptName := range table.GetScriptNamesForTable(tableName) { targetScriptNames[targetIdx] = util.UniqueAppend(targetScriptNames[targetIdx], scriptName) } } else { @@ -561,7 +562,7 @@ func outputsFromTargets(cmd *cobra.Command, myTargets []target.Target, tableName func elevatedPrivilegesRequired(tableNames []string) bool { for _, tableName := range tableNames { // add scripts to list of scripts to run - for _, scriptName := range report.GetScriptNamesForTable(tableName) { + for _, scriptName := range table.GetScriptNamesForTable(tableName) { script := script.GetScriptByName(scriptName) if script.Superuser { return true diff --git a/internal/report/render_excel.go b/internal/report/render_excel.go index d70ca152..c3edf09e 100644 --- a/internal/report/render_excel.go +++ b/internal/report/render_excel.go @@ -7,11 +7,27 @@ import ( "bufio" "bytes" "fmt" + "perfspect/internal/table" "strconv" "github.com/xuri/excelize/v2" ) +// Package-level map for custom XLSX renderers +var customXlsxRenderers = map[string]table.XlsxTableRenderer{ + // No custom XLSX renderers currently defined +} + +// getCustomXlsxRenderer returns the custom XLSX renderer for a table, or nil if no custom renderer exists +func getCustomXlsxRenderer(tableName string) table.XlsxTableRenderer { + return customXlsxRenderers[tableName] +} + +// RegisterXlsxRenderer allows external packages to register custom XLSX renderers for specific tables +func RegisterXlsxRenderer(tableName string, renderer table.XlsxTableRenderer) { + customXlsxRenderers[tableName] = renderer +} + func cellName(col int, row int) (name string) { columnName, err := excelize.ColumnNumberToName(col) if err != nil { @@ -24,7 +40,7 @@ func cellName(col int, row int) (name string) { return } -func renderXlsxTable(tableValues TableValues, f *excelize.File, sheetName string, row *int) { +func renderXlsxTable(tableValues table.TableValues, f *excelize.File, sheetName string, row *int) { col := 1 // print the table name tableNameStyle, _ := f.NewStyle(&excelize.Style{ @@ -44,15 +60,15 @@ func renderXlsxTable(tableValues TableValues, f *excelize.File, sheetName string *row += 2 return } - if tableValues.XlsxTableRendererFunc != nil { - tableValues.XlsxTableRendererFunc(tableValues, f, sheetName, row) + if renderer := getCustomXlsxRenderer(tableValues.Name); renderer != nil { + renderer(tableValues, f, sheetName, row) } else { DefaultXlsxTableRendererFunc(tableValues, f, sheetName, row) } *row++ } -func renderXlsxTableMultiTarget(targetTableValues []TableValues, targetNames []string, f *excelize.File, sheetName string, row *int) { +func renderXlsxTableMultiTarget(targetTableValues []table.TableValues, targetNames []string, f *excelize.File, sheetName string, row *int) { col := 1 // print the table name tableNameStyle, _ := f.NewStyle(&excelize.Style{ @@ -146,7 +162,7 @@ func renderXlsxTableMultiTarget(targetTableValues []TableValues, targetNames []s *row++ } -func DefaultXlsxTableRendererFunc(tableValues TableValues, f *excelize.File, sheetName string, row *int) { +func DefaultXlsxTableRendererFunc(tableValues table.TableValues, f *excelize.File, sheetName string, row *int) { headerStyle, _ := f.NewStyle(&excelize.Style{ Font: &excelize.Font{ Bold: true, @@ -207,7 +223,7 @@ const ( XlsxBriefSheetName = "Brief" ) -func createXlsxReport(allTableValues []TableValues) (out []byte, err error) { +func createXlsxReport(allTableValues []table.TableValues) (out []byte, err error) { f := excelize.NewFile() sheetName := XlsxPrimarySheetName _ = f.SetSheetName("Sheet1", sheetName) @@ -215,7 +231,7 @@ func createXlsxReport(allTableValues []TableValues) (out []byte, err error) { _ = f.SetColWidth(sheetName, "B", "L", 25) row := 1 for _, tableValues := range allTableValues { - if tableValues.Name == SystemSummaryTableName { + if tableValues.Name == table.SystemSummaryTableName { row := 1 sheetName := XlsxBriefSheetName _, _ = f.NewSheet(sheetName) @@ -236,7 +252,7 @@ func createXlsxReport(allTableValues []TableValues) (out []byte, err error) { return } -func createXlsxReportMultiTarget(allTargetsTableValues [][]TableValues, targetNames []string, allTableNames []string) (out []byte, err error) { +func createXlsxReportMultiTarget(allTargetsTableValues [][]table.TableValues, targetNames []string, allTableNames []string) (out []byte, err error) { f := excelize.NewFile() sheetName := XlsxPrimarySheetName _ = f.SetSheetName("Sheet1", sheetName) @@ -248,7 +264,7 @@ func createXlsxReportMultiTarget(allTargetsTableValues [][]TableValues, targetNa for _, tableName := range allTableNames { // build list of target names and TableValues for targets that have values for this table tableTargets := []string{} - tableValues := []TableValues{} + tableValues := []table.TableValues{} for targetIndex, targetTableValues := range allTargetsTableValues { tableIndex := findTableIndex(targetTableValues, tableName) if tableIndex == -1 { @@ -258,7 +274,7 @@ func createXlsxReportMultiTarget(allTargetsTableValues [][]TableValues, targetNa tableValues = append(tableValues, targetTableValues[tableIndex]) } // render the table, if system summary table put it in a separate sheet - if tableName == SystemSummaryTableName { + if tableName == table.SystemSummaryTableName { summaryRow := 1 sheetName := XlsxBriefSheetName _, _ = f.NewSheet(sheetName) diff --git a/internal/report/render_html.go b/internal/report/render_html.go index 8ab356e7..32da19fc 100644 --- a/internal/report/render_html.go +++ b/internal/report/render_html.go @@ -10,6 +10,7 @@ import ( htmltemplate "html/template" "log/slog" "math" + "perfspect/internal/table" "perfspect/internal/util" "slices" "sort" @@ -18,6 +19,53 @@ import ( texttemplate "text/template" // nosemgrep ) +// Package-level maps for custom HTML renderers +var customHTMLRenderers = map[string]table.HTMLTableRenderer{ + table.DIMMTableName: dimmTableHTMLRenderer, + table.FrequencyBenchmarkTableName: frequencyBenchmarkTableHtmlRenderer, + table.MemoryBenchmarkTableName: memoryBenchmarkTableHtmlRenderer, + table.CPUUtilizationTelemetryTableName: cpuUtilizationTelemetryTableHTMLRenderer, + table.UtilizationCategoriesTelemetryTableName: utilizationCategoriesTelemetryTableHTMLRenderer, + table.IPCTelemetryTableName: ipcTelemetryTableHTMLRenderer, + table.C6TelemetryTableName: c6TelemetryTableHTMLRenderer, + table.FrequencyTelemetryTableName: averageFrequencyTelemetryTableHTMLRenderer, + table.IRQRateTelemetryTableName: irqRateTelemetryTableHTMLRenderer, + table.DriveTelemetryTableName: driveTelemetryTableHTMLRenderer, + table.NetworkTelemetryTableName: networkTelemetryTableHTMLRenderer, + table.MemoryTelemetryTableName: memoryTelemetryTableHTMLRenderer, + table.PowerTelemetryTableName: powerTelemetryTableHTMLRenderer, + table.TemperatureTelemetryTableName: temperatureTelemetryTableHTMLRenderer, + table.InstructionTelemetryTableName: instructionTelemetryTableHTMLRenderer, + table.GaudiTelemetryTableName: gaudiTelemetryTableHTMLRenderer, + table.PDUTelemetryTableName: pduTelemetryTableHTMLRenderer, + table.CallStackFrequencyTableName: callStackFrequencyTableHTMLRenderer, + table.KernelLockAnalysisTableName: kernelLockAnalysisHTMLRenderer, +} + +var customHTMLMultiTargetRenderers = map[string]table.HTMLMultiTargetTableRenderer{ + table.MemoryBenchmarkTableName: memoryBenchmarkTableMultiTargetHtmlRenderer, +} + +// getCustomHTMLRenderer returns the custom renderer for a table, or nil if no custom renderer exists +func getCustomHTMLRenderer(tableName string) table.HTMLTableRenderer { + return customHTMLRenderers[tableName] +} + +// getCustomHTMLMultiTargetRenderer returns the custom multi-target renderer for a table, or nil if no custom renderer exists +func getCustomHTMLMultiTargetRenderer(tableName string) table.HTMLMultiTargetTableRenderer { + return customHTMLMultiTargetRenderers[tableName] +} + +// RegisterHTMLRenderer allows external packages to register custom HTML renderers for specific tables +func RegisterHTMLRenderer(tableName string, renderer table.HTMLTableRenderer) { + customHTMLRenderers[tableName] = renderer +} + +// RegisterHTMLMultiTargetRenderer allows external packages to register custom multi-target HTML renderers for specific tables +func RegisterHTMLMultiTargetRenderer(tableName string, renderer table.HTMLMultiTargetTableRenderer) { + customHTMLMultiTargetRenderers[tableName] = renderer +} + func getHtmlReportBegin() string { var sb strings.Builder sb.WriteString(`") { - t.Errorf("expected delimiter for parsing in master script") - } -} - -func TestFormMasterScriptNeedsElevatedFlag(t *testing.T) { - scripts := []ScriptDefinition{{Name: "user", Superuser: false}, {Name: "also user", Superuser: false}} - _, elevated, err := formMasterScript("/tmp/dir", scripts) - if err != nil { - t.Fatalf("error forming master script: %v", err) - } - if elevated { - t.Fatalf("expected elevated=false when no scripts require superuser") - } -} - -func TestFormMasterScriptEmptyScripts(t *testing.T) { - master, elevated, err := formMasterScript("/tmp/dir", nil) - if err != nil { - t.Fatalf("error forming master script: %v", err) - } - if elevated { - t.Fatalf("expected elevated=false with empty slice") - } - // Should still contain core function definitions even if no scripts. - if !strings.Contains(master, "start_scripts()") || !strings.Contains(master, "print_summary()") { - t.Errorf("template missing expected functions for empty slice") - } - t.Logf("MASTER SCRIPT EMPTY:\n%s", master) - // No orig_names assignments lines for empty slice. - if strings.Count(master, "orig_names[") > 0 { - for line := range strings.SplitSeq(master, "\n") { - if strings.Contains(line, "orig_names[") && strings.Contains(line, "]=") { - // assignment line detected - t.Errorf("no orig_names mappings should appear for empty slice") - } - } - } -} - -func TestFormMasterScriptExecutionIntegration(t *testing.T) { - // Integration test: create temp directory, stub two child scripts, run master script, parse output. - tmp := t.TempDir() - scripts := []ScriptDefinition{{Name: "alpha script"}, {Name: "beta-script"}} - master, elevated, err := formMasterScript(tmp, scripts) - if err != nil { - t.Fatalf("error forming master script: %v", err) - } - if elevated { // none marked superuser - t.Fatalf("did not expect elevated=true for non-superuser scripts") - } - // Create child scripts. - for _, s := range scripts { - sanitized := sanitizeScriptName(s.Name) - childPath := filepath.Join(tmp, sanitized+".sh") - content := "#!/usr/bin/env bash\n" + "echo STDOUT-" + sanitized + "\n" + "echo STDERR-" + sanitized + " 1>&2\n" - if err := os.WriteFile(childPath, []byte(content), 0o700); err != nil { - t.Fatalf("failed writing child script %s: %v", childPath, err) - } - } - // Write master script. - masterPath := filepath.Join(tmp, "parallel_master.sh") - if err := os.WriteFile(masterPath, []byte(master), 0o700); err != nil { - t.Fatalf("failed writing master script: %v", err) - } - // Run master script. - out, err := runLocalBash(masterPath) - if err != nil { - // Read master script content for debugging - content, _ := os.ReadFile(masterPath) - t.Fatalf("error executing master script: %v\nstdout+stderr: %s\nMASTER SCRIPT:\n%s", err, out, string(content)) - } - parsed := parseMasterScriptOutput(out) - if len(parsed) != 2 { - t.Fatalf("expected 2 parsed script outputs, got %d", len(parsed)) - } - // Validate each output. - for _, p := range parsed { - if p.Exitcode != 0 { // child scripts exit 0 - t.Errorf("expected exit code 0 for %s, got %d", p.Name, p.Exitcode) - } - if !strings.Contains(p.Stdout, "STDOUT-"+sanitizeScriptName(p.Name)) { - t.Errorf("stdout mismatch for %s: %q", p.Name, p.Stdout) - } - if !strings.Contains(p.Stderr, "STDERR-"+sanitizeScriptName(p.Name)) { - t.Errorf("stderr mismatch for %s: %q", p.Name, p.Stderr) - } - } -} - -// runLocalBash executes a bash script locally and returns combined stdout. -func runLocalBash(scriptPath string) (string, error) { - outBytes, err := exec.Command("bash", scriptPath).CombinedOutput() // #nosec G204 - return string(outBytes), err -} diff --git a/internal/script/script_test.go b/internal/script/script_test.go index 4647151a..e280359d 100644 --- a/internal/script/script_test.go +++ b/internal/script/script_test.go @@ -5,7 +5,9 @@ package script import ( "os" + "os/exec" "path" + "path/filepath" "regexp" "strings" "testing" @@ -211,3 +213,132 @@ mpstat -u -T -I SCPU -P ALL 1 $count`, } } } + +func TestFormMasterScriptTemplateStructure(t *testing.T) { + scripts := []ScriptDefinition{ + {Name: "alpha script", Superuser: false}, + {Name: "beta-script", Superuser: true}, + } + master, elevated, err := formMasterScript("/tmp/targetdir", scripts) + if err != nil { + t.Fatalf("error forming master script: %v", err) + } + if !elevated { + t.Fatalf("expected elevated=true when at least one script is superuser") + } + // Shebang + if !strings.HasPrefix(master, "#!/usr/bin/env bash") { + t.Errorf("master script missing shebang") + } + // Functions present + for _, fn := range []string{"start_scripts()", "kill_script()", "wait_for_scripts()", "print_summary()", "handle_sigint()"} { + if !strings.Contains(master, fn) { + t.Errorf("expected function %s in master script", fn) + } + } + // Sanitized names appear (spaces and hyphens replaced with underscores) + if !strings.Contains(master, "alpha_script") || !strings.Contains(master, "beta_script") { + t.Errorf("sanitized script names not found in template output") + } + // Mapping of original names present (orig_names associative array entries) + for _, mapping := range []string{"orig_names[alpha_script]=\"alpha script\"", "orig_names[beta_script]=\"beta-script\""} { + if !strings.Contains(master, mapping) { + t.Errorf("expected original name mapping %q in master script", mapping) + } + } + // Delimiter used for parsing + if !strings.Contains(master, "<---------------------->") { + t.Errorf("expected delimiter for parsing in master script") + } +} + +func TestFormMasterScriptNeedsElevatedFlag(t *testing.T) { + scripts := []ScriptDefinition{{Name: "user", Superuser: false}, {Name: "also user", Superuser: false}} + _, elevated, err := formMasterScript("/tmp/dir", scripts) + if err != nil { + t.Fatalf("error forming master script: %v", err) + } + if elevated { + t.Fatalf("expected elevated=false when no scripts require superuser") + } +} + +func TestFormMasterScriptEmptyScripts(t *testing.T) { + master, elevated, err := formMasterScript("/tmp/dir", nil) + if err != nil { + t.Fatalf("error forming master script: %v", err) + } + if elevated { + t.Fatalf("expected elevated=false with empty slice") + } + // Should still contain core function definitions even if no scripts. + if !strings.Contains(master, "start_scripts()") || !strings.Contains(master, "print_summary()") { + t.Errorf("template missing expected functions for empty slice") + } + t.Logf("MASTER SCRIPT EMPTY:\n%s", master) + // No orig_names assignments lines for empty slice. + if strings.Count(master, "orig_names[") > 0 { + for line := range strings.SplitSeq(master, "\n") { + if strings.Contains(line, "orig_names[") && strings.Contains(line, "]=") { + // assignment line detected + t.Errorf("no orig_names mappings should appear for empty slice") + } + } + } +} + +func TestFormMasterScriptExecutionIntegration(t *testing.T) { + // Integration test: create temp directory, stub two child scripts, run master script, parse output. + tmp := t.TempDir() + scripts := []ScriptDefinition{{Name: "alpha script"}, {Name: "beta-script"}} + master, elevated, err := formMasterScript(tmp, scripts) + if err != nil { + t.Fatalf("error forming master script: %v", err) + } + if elevated { // none marked superuser + t.Fatalf("did not expect elevated=true for non-superuser scripts") + } + // Create child scripts. + for _, s := range scripts { + sanitized := sanitizeScriptName(s.Name) + childPath := filepath.Join(tmp, sanitized+".sh") + content := "#!/usr/bin/env bash\n" + "echo STDOUT-" + sanitized + "\n" + "echo STDERR-" + sanitized + " 1>&2\n" + if err := os.WriteFile(childPath, []byte(content), 0o700); err != nil { + t.Fatalf("failed writing child script %s: %v", childPath, err) + } + } + // Write master script. + masterPath := filepath.Join(tmp, "parallel_master.sh") + if err := os.WriteFile(masterPath, []byte(master), 0o700); err != nil { + t.Fatalf("failed writing master script: %v", err) + } + // Run master script. + out, err := runLocalBash(masterPath) + if err != nil { + // Read master script content for debugging + content, _ := os.ReadFile(masterPath) + t.Fatalf("error executing master script: %v\nstdout+stderr: %s\nMASTER SCRIPT:\n%s", err, out, string(content)) + } + parsed := parseMasterScriptOutput(out) + if len(parsed) != 2 { + t.Fatalf("expected 2 parsed script outputs, got %d", len(parsed)) + } + // Validate each output. + for _, p := range parsed { + if p.Exitcode != 0 { // child scripts exit 0 + t.Errorf("expected exit code 0 for %s, got %d", p.Name, p.Exitcode) + } + if !strings.Contains(p.Stdout, "STDOUT-"+sanitizeScriptName(p.Name)) { + t.Errorf("stdout mismatch for %s: %q", p.Name, p.Stdout) + } + if !strings.Contains(p.Stderr, "STDERR-"+sanitizeScriptName(p.Name)) { + t.Errorf("stderr mismatch for %s: %q", p.Name, p.Stderr) + } + } +} + +// runLocalBash executes a bash script locally and returns combined stdout. +func runLocalBash(scriptPath string) (string, error) { + outBytes, err := exec.Command("bash", scriptPath).CombinedOutput() // #nosec G204 + return string(outBytes), err +} From f4afc52078cc8b65f517e3f904247b91dea1b441 Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Thu, 4 Dec 2025 16:55:03 -0800 Subject: [PATCH 4/6] table structs and funcs Signed-off-by: Harper, Jason M --- internal/table/table.go | 125 +++++++++++++++++++++++++++++++++++ internal/table/table_defs.go | 125 ----------------------------------- 2 files changed, 125 insertions(+), 125 deletions(-) diff --git a/internal/table/table.go b/internal/table/table.go index f78f518f..64b09763 100644 --- a/internal/table/table.go +++ b/internal/table/table.go @@ -11,8 +11,53 @@ import ( "perfspect/internal/script" "perfspect/internal/target" "slices" + + "github.com/xuri/excelize/v2" ) +// Field represents the values for a field in a table +type Field struct { + Name string + Description string // optional description of the field + Values []string +} + +// TableValues combines the table definition with the resulting fields and their values +type TableValues struct { + TableDefinition + Fields []Field + Insights []Insight +} + +// Insight represents an insight about the data in a table +type Insight struct { + Recommendation string + Justification string +} + +type FieldsRetriever func(map[string]script.ScriptOutput) []Field +type InsightsRetriever func(map[string]script.ScriptOutput, TableValues) []Insight +type HTMLTableRenderer func(TableValues, string) string +type HTMLMultiTargetTableRenderer func([]TableValues, []string) string +type TextTableRenderer func(TableValues) string +type XlsxTableRenderer func(TableValues, *excelize.File, string, *int) + +// TableDefinition defines the structure of a table in the report +type TableDefinition struct { + Name string + ScriptNames []string + Architectures []string // architectures, i.e., x86_64, aarch64. If empty, it will be present for all architectures. + Vendors []string // vendors, e.g., GenuineIntel, AuthenticAMD. If empty, it will be present for all vendors. + MicroArchitectures []string // microarchitectures, e.g., EMR, ICX. If empty, it will be present for all microarchitectures. + // Fields function is called to retrieve field values from the script outputs + FieldsFunc FieldsRetriever + MenuLabel string // add to tables that will be displayed in the menu + HasRows bool // table is meant to be displayed in row form, i.e., a field may have multiple values + NoDataFound string // message to display when no data is found + // insights function is used to retrieve insights about the data in the table + InsightsFunc InsightsRetriever +} + // GetTableByName retrieves a table definition by its name. func GetTableByName(name string) TableDefinition { if table, ok := tableDefinitions[name]; ok { @@ -81,3 +126,83 @@ func ProcessTables(tableNames []string, scriptOutputs map[string]script.ScriptOu } return } + +// GetScriptNamesForTable returns the script names required to generate the table with the given name +func GetScriptNamesForTable(name string) []string { + if _, ok := tableDefinitions[name]; !ok { + panic(fmt.Sprintf("table not found: %s", name)) + } + return tableDefinitions[name].ScriptNames +} + +// GetFieldIndex returns the index of a field with the given name in the TableValues structure. +// Returns: +// - int: The index of the field if found and valid, -1 otherwise +// - error: nil if successful, an error describing the issue otherwise +func GetFieldIndex(fieldName string, tableValues TableValues) (int, error) { + for i, field := range tableValues.Fields { + if field.Name == fieldName { + if len(field.Values) == 0 { + return -1, fmt.Errorf("field [%s] does not have associated value(s)", field.Name) + } + return i, nil + } + } + return -1, fmt.Errorf("field [%s] not found in table [%s]", fieldName, tableValues.Name) +} + +// GetValuesForTable returns the fields and their values for the table with the given name +func GetValuesForTable(name string, outputs map[string]script.ScriptOutput) TableValues { + // if table with given name doesn't exist, panic + if _, ok := tableDefinitions[name]; !ok { + panic(fmt.Sprintf("table not found: %s", name)) + } + table := tableDefinitions[name] + // ValuesFunc can't be nil + if table.FieldsFunc == nil { + panic(fmt.Sprintf("table %s, ValuesFunc cannot be nil", name)) + } + // call the table's FieldsFunc to get the table's fields and values + fields := table.FieldsFunc(outputs) + tableValues := TableValues{ + TableDefinition: tableDefinitions[name], + Fields: fields, + } + // sanity check + if err := validateTableValues(tableValues); err != nil { + slog.Error("table validation failed", "table", name, "error", err) + return TableValues{ + TableDefinition: tableDefinitions[name], + Fields: []Field{}, + } + } + // call the table's InsightsFunc to get insights about the data in the table + if table.InsightsFunc != nil { + tableValues.Insights = table.InsightsFunc(outputs, tableValues) + } + return tableValues +} + +func validateTableValues(tableValues TableValues) error { + if tableValues.Name == "" { + return fmt.Errorf("table name cannot be empty") + } + // no field values is a valid state + if len(tableValues.Fields) == 0 { + return nil + } + // field names cannot be empty + for i, field := range tableValues.Fields { + if field.Name == "" { + return fmt.Errorf("table %s, field %d, name cannot be empty", tableValues.Name, i) + } + } + // the number of entries in each field must be the same + numEntries := len(tableValues.Fields[0].Values) + for i, field := range tableValues.Fields { + if len(field.Values) != numEntries { + return fmt.Errorf("table %s, field %d, %s, number of entries must be the same for all fields, expected %d, got %d", tableValues.Name, i, field.Name, numEntries, len(field.Values)) + } + } + return nil +} diff --git a/internal/table/table_defs.go b/internal/table/table_defs.go index c9b2d49c..4361e80a 100644 --- a/internal/table/table_defs.go +++ b/internal/table/table_defs.go @@ -18,53 +18,8 @@ import ( "perfspect/internal/cpus" "perfspect/internal/script" - - "github.com/xuri/excelize/v2" ) -// Field represents the values for a field in a table -type Field struct { - Name string - Description string // optional description of the field - Values []string -} - -// TableValues combines the table definition with the resulting fields and their values -type TableValues struct { - TableDefinition - Fields []Field - Insights []Insight -} - -// Insight represents an insight about the data in a table -type Insight struct { - Recommendation string - Justification string -} - -type FieldsRetriever func(map[string]script.ScriptOutput) []Field -type InsightsRetriever func(map[string]script.ScriptOutput, TableValues) []Insight -type HTMLTableRenderer func(TableValues, string) string -type HTMLMultiTargetTableRenderer func([]TableValues, []string) string -type TextTableRenderer func(TableValues) string -type XlsxTableRenderer func(TableValues, *excelize.File, string, *int) - -// TableDefinition defines the structure of a table in the report -type TableDefinition struct { - Name string - ScriptNames []string - Architectures []string // architectures, i.e., x86_64, aarch64. If empty, it will be present for all architectures. - Vendors []string // vendors, e.g., GenuineIntel, AuthenticAMD. If empty, it will be present for all vendors. - MicroArchitectures []string // microarchitectures, e.g., EMR, ICX. If empty, it will be present for all microarchitectures. - // Fields function is called to retrieve field values from the script outputs - FieldsFunc FieldsRetriever - MenuLabel string // add to tables that will be displayed in the menu - HasRows bool // table is meant to be displayed in row form, i.e., a field may have multiple values - NoDataFound string // message to display when no data is found - // insights function is used to retrieve insights about the data in the table - InsightsFunc InsightsRetriever -} - const ( // report table names HostTableName = "Host" @@ -810,86 +765,6 @@ var tableDefinitions = map[string]TableDefinition{ FieldsFunc: kernelLockAnalysisTableValues}, } -// GetScriptNamesForTable returns the script names required to generate the table with the given name -func GetScriptNamesForTable(name string) []string { - if _, ok := tableDefinitions[name]; !ok { - panic(fmt.Sprintf("table not found: %s", name)) - } - return tableDefinitions[name].ScriptNames -} - -// GetFieldIndex returns the index of a field with the given name in the TableValues structure. -// Returns: -// - int: The index of the field if found and valid, -1 otherwise -// - error: nil if successful, an error describing the issue otherwise -func GetFieldIndex(fieldName string, tableValues TableValues) (int, error) { - for i, field := range tableValues.Fields { - if field.Name == fieldName { - if len(field.Values) == 0 { - return -1, fmt.Errorf("field [%s] does not have associated value(s)", field.Name) - } - return i, nil - } - } - return -1, fmt.Errorf("field [%s] not found in table [%s]", fieldName, tableValues.Name) -} - -// GetValuesForTable returns the fields and their values for the table with the given name -func GetValuesForTable(name string, outputs map[string]script.ScriptOutput) TableValues { - // if table with given name doesn't exist, panic - if _, ok := tableDefinitions[name]; !ok { - panic(fmt.Sprintf("table not found: %s", name)) - } - table := tableDefinitions[name] - // ValuesFunc can't be nil - if table.FieldsFunc == nil { - panic(fmt.Sprintf("table %s, ValuesFunc cannot be nil", name)) - } - // call the table's FieldsFunc to get the table's fields and values - fields := table.FieldsFunc(outputs) - tableValues := TableValues{ - TableDefinition: tableDefinitions[name], - Fields: fields, - } - // sanity check - if err := validateTableValues(tableValues); err != nil { - slog.Error("table validation failed", "table", name, "error", err) - return TableValues{ - TableDefinition: tableDefinitions[name], - Fields: []Field{}, - } - } - // call the table's InsightsFunc to get insights about the data in the table - if table.InsightsFunc != nil { - tableValues.Insights = table.InsightsFunc(outputs, tableValues) - } - return tableValues -} - -func validateTableValues(tableValues TableValues) error { - if tableValues.Name == "" { - return fmt.Errorf("table name cannot be empty") - } - // no field values is a valid state - if len(tableValues.Fields) == 0 { - return nil - } - // field names cannot be empty - for i, field := range tableValues.Fields { - if field.Name == "" { - return fmt.Errorf("table %s, field %d, name cannot be empty", tableValues.Name, i) - } - } - // the number of entries in each field must be the same - numEntries := len(tableValues.Fields[0].Values) - for i, field := range tableValues.Fields { - if len(field.Values) != numEntries { - return fmt.Errorf("table %s, field %d, %s, number of entries must be the same for all fields, expected %d, got %d", tableValues.Name, i, field.Name, numEntries, len(field.Values)) - } - } - return nil -} - // // define the fieldsFunc for each table // From bcad01740f1bcdfc171f064d8be8defdebfa5769 Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Fri, 5 Dec 2025 14:56:40 -0800 Subject: [PATCH 5/6] distribute table functions to respective commands or common Signed-off-by: Harper, Jason M --- cmd/config/config.go | 13 +- cmd/config/config_tables.go | 233 +++ cmd/config/flag_groups.go | 3 +- cmd/config/set.go | 12 +- cmd/flame/flame.go | 11 +- .../stacks.go => cmd/flame/flame_tables.go | 110 +- .../flame}/render_html_flamegraph.go | 5 +- cmd/lock/lock.go | 13 +- cmd/lock/lock_tables.go | 57 + cmd/metrics/metadata.go | 7 +- {internal/table => cmd/report}/accelerator.go | 2 +- .../table => cmd/report}/benchmarking.go | 2 +- cmd/report/cpu.go | 163 ++ {internal/table => cmd/report}/dimm.go | 9 +- {internal/table => cmd/report}/gpu.go | 7 +- {internal/table => cmd/report}/isa.go | 5 +- cmd/report/report.go | 126 +- .../report/report_tables.go | 1709 +++++------------ {internal/table => cmd/report}/security.go | 5 +- cmd/report/system.go | 141 ++ cmd/telemetry/telemetry.go | 74 +- cmd/telemetry/telemetry_tables.go | 1368 +++++++++++++ internal/{table => common}/cache.go | 50 +- internal/{table => common}/cache_test.go | 4 +- internal/common/common.go | 87 +- internal/{table => common}/frequency.go | 88 +- internal/{table => common}/frequency_test.go | 4 +- internal/{table => common}/nic.go | 12 +- internal/{table => common}/nic_test.go | 92 +- internal/{table => common}/power.go | 271 +-- internal/{table => common}/prefetcher.go | 26 +- internal/{table => common}/storage.go | 55 +- internal/common/table_defs.go | 68 + internal/common/table_helpers.go | 284 +++ .../table_helpers_test.go} | 153 +- internal/{table => common}/turbostat.go | 23 +- internal/{table => common}/turbostat_test.go | 12 +- internal/report/render_excel.go | 12 +- internal/report/render_html.go | 975 +--------- internal/report/render_raw.go | 7 +- internal/report/render_text.go | 47 +- internal/report/report.go | 10 +- internal/table/cpu.go | 249 --- internal/table/stacks_test.go | 157 -- internal/table/system.go | 99 - internal/table/table.go | 40 +- internal/table/table_helpers.go | 127 -- 47 files changed, 3508 insertions(+), 3519 deletions(-) create mode 100644 cmd/config/config_tables.go rename internal/table/stacks.go => cmd/flame/flame_tables.go (73%) rename {internal/report => cmd/flame}/render_html_flamegraph.go (98%) create mode 100644 cmd/lock/lock_tables.go rename {internal/table => cmd/report}/accelerator.go (99%) rename {internal/table => cmd/report}/benchmarking.go (99%) create mode 100644 cmd/report/cpu.go rename {internal/table => cmd/report}/dimm.go (98%) rename {internal/table => cmd/report}/gpu.go (95%) rename {internal/table => cmd/report}/isa.go (93%) rename internal/table/table_defs.go => cmd/report/report_tables.go (50%) rename {internal/table => cmd/report}/security.go (85%) create mode 100644 cmd/report/system.go create mode 100644 cmd/telemetry/telemetry_tables.go rename internal/{table => common}/cache.go (85%) rename internal/{table => common}/cache_test.go (97%) rename internal/{table => common}/frequency.go (82%) rename internal/{table => common}/frequency_test.go (98%) rename internal/{table => common}/nic.go (95%) rename internal/{table => common}/nic_test.go (88%) rename internal/{table => common}/power.go (85%) rename internal/{table => common}/prefetcher.go (90%) rename internal/{table => common}/storage.go (57%) create mode 100644 internal/common/table_defs.go create mode 100644 internal/common/table_helpers.go rename internal/{table/cpu_test.go => common/table_helpers_test.go} (64%) rename internal/{table => common}/turbostat.go (93%) rename internal/{table => common}/turbostat_test.go (98%) delete mode 100644 internal/table/cpu.go delete mode 100644 internal/table/stacks_test.go delete mode 100644 internal/table/system.go delete mode 100644 internal/table/table_helpers.go diff --git a/cmd/config/config.go b/cmd/config/config.go index 46982432..22f98141 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -284,9 +284,9 @@ func setOnTarget(cmd *cobra.Command, myTarget target.Target, flagGroups []flagGr // getConfig collects the configuration data from the target(s) func getConfig(myTargets []target.Target, localTempDir string) ([]common.TargetScriptOutputs, error) { - scriptNames := table.GetScriptNamesForTable(table.ConfigurationTableName) + var scriptsToRun []script.ScriptDefinition - for _, scriptName := range scriptNames { + for _, scriptName := range tableDefinitions[ConfigurationTableName].ScriptNames { scriptsToRun = append(scriptsToRun, script.GetScriptByName(scriptName)) } multiSpinner := progress.NewMultiSpinner() @@ -318,7 +318,6 @@ func getConfig(myTargets []target.Target, localTempDir string) ([]common.TargetS for _, target := range myTargets { for _, targetScriptOutputs := range allTargetScriptOutputs { if targetScriptOutputs.TargetName == target.GetName() { - targetScriptOutputs.TableNames = []string{table.ConfigurationTableName} orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, targetScriptOutputs) break } @@ -334,15 +333,17 @@ func processConfig(targetScriptOutputs []common.TargetScriptOutputs) (map[string var err error for _, targetScriptOutput := range targetScriptOutputs { // process the tables, i.e., get field values from raw script output - tableNames := []string{table.ConfigurationTableName} + tables := []table.TableDefinition{tableDefinitions[ConfigurationTableName]} var tableValues []table.TableValues - if tableValues, err = table.ProcessTables(tableNames, targetScriptOutput.ScriptOutputs); err != nil { + if tableValues, err = table.ProcessTables(tables, targetScriptOutput.ScriptOutputs); err != nil { err = fmt.Errorf("failed to process collected data: %v", err) return nil, err } // create the report for this single table var reportBytes []byte - if reportBytes, err = report.Create("txt", tableValues, targetScriptOutput.TargetName); err != nil { + report.RegisterTextRenderer(ConfigurationTableName, configurationTableTextRenderer) + + if reportBytes, err = report.Create("txt", tableValues, targetScriptOutput.TargetName, ""); err != nil { err = fmt.Errorf("failed to create report: %v", err) return nil, err } diff --git a/cmd/config/config_tables.go b/cmd/config/config_tables.go new file mode 100644 index 00000000..aa16e0d1 --- /dev/null +++ b/cmd/config/config_tables.go @@ -0,0 +1,233 @@ +package config + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "fmt" + "log/slog" + "perfspect/internal/common" + "perfspect/internal/cpus" + "perfspect/internal/script" + "perfspect/internal/table" + "slices" + "strings" +) + +const ( + ConfigurationTableName = "Configuration" +) + +var tableDefinitions = map[string]table.TableDefinition{ + ConfigurationTableName: { + Name: ConfigurationTableName, + Vendors: []string{cpus.IntelVendor}, + HasRows: false, + ScriptNames: []string{ + script.LscpuScriptName, + script.LscpuCacheScriptName, + script.LspciBitsScriptName, + script.LspciDevicesScriptName, + script.L3CacheWayEnabledName, + script.PackagePowerLimitName, + script.EpbScriptName, + script.EppScriptName, + script.EppValidScriptName, + script.EppPackageControlScriptName, + script.EppPackageScriptName, + script.ScalingGovernorScriptName, + script.UncoreMaxFromMSRScriptName, + script.UncoreMinFromMSRScriptName, + script.UncoreMaxFromTPMIScriptName, + script.UncoreMinFromTPMIScriptName, + script.UncoreDieTypesFromTPMIScriptName, + script.SpecCoreFrequenciesScriptName, + script.ElcScriptName, + script.PrefetchControlName, + script.PrefetchersName, + script.PrefetchersAtomName, + script.CstatesScriptName, + script.C1DemotionScriptName, + }, + FieldsFunc: configurationTableValues}, +} + +func configurationTableValues(outputs map[string]script.ScriptOutput) []table.Field { + uarch := common.UarchFromOutput(outputs) + if uarch == "" { + slog.Error("failed to get uarch from script outputs") + return []table.Field{} + } + // This table is only shown in text mode on stdout for the config command. The config + // command implements its own print logic and uses the Description field to show the command line + // argument for each config item. + fields := []table.Field{ + {Name: "Cores per Socket", Description: "--cores ", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, + {Name: "L3 Cache", Description: "--llc ", Values: []string{l3InstanceFromOutput(outputs)}}, + {Name: "Package Power / TDP", Description: "--tdp ", Values: []string{common.TDPFromOutput(outputs)}}, + {Name: "Core SSE Frequency", Description: "--core-max ", Values: []string{sseFrequenciesFromOutput(outputs)}}, + } + if strings.Contains(uarch, "SRF") || strings.Contains(uarch, "GNR") || strings.Contains(uarch, "CWF") { + fields = append(fields, []table.Field{ + {Name: "Uncore Max Frequency (Compute)", Description: "--uncore-max-compute ", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(true, true, outputs)}}, + {Name: "Uncore Min Frequency (Compute)", Description: "--uncore-min-compute ", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(false, true, outputs)}}, + {Name: "Uncore Max Frequency (I/O)", Description: "--uncore-max-io ", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(true, false, outputs)}}, + {Name: "Uncore Min Frequency (I/O)", Description: "--uncore-min-io ", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(false, false, outputs)}}, + }...) + } else { + fields = append(fields, []table.Field{ + {Name: "Uncore Max Frequency", Description: "--uncore-max ", Values: []string{common.UncoreMaxFrequencyFromOutput(outputs)}}, + {Name: "Uncore Min Frequency", Description: "--uncore-min ", Values: []string{common.UncoreMinFrequencyFromOutput(outputs)}}, + }...) + } + fields = append(fields, []table.Field{ + {Name: "Energy Performance Bias", Description: "--epb <0-15>", Values: []string{common.EPBFromOutput(outputs)}}, + {Name: "Energy Performance Preference", Description: "--epp <0-255>", Values: []string{common.EPPFromOutput(outputs)}}, + {Name: "Scaling Governor", Description: "--gov ", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, + }...) + // add ELC (for SRF, CWF and GNR only) + if strings.Contains(uarch, "SRF") || strings.Contains(uarch, "GNR") || strings.Contains(uarch, "CWF") { + fields = append(fields, table.Field{Name: "Efficiency Latency Control", Description: "--elc ", Values: []string{common.ELCSummaryFromOutput(outputs)}}) + } + // add prefetchers + for _, pf := range common.PrefetcherDefinitions { + if slices.Contains(pf.Uarchs, "all") || slices.Contains(pf.Uarchs, uarch[:3]) { + var scriptName string + switch pf.Msr { + case common.MsrPrefetchControl: + scriptName = script.PrefetchControlName + case common.MsrPrefetchers: + scriptName = script.PrefetchersName + case common.MsrAtomPrefTuning1: + scriptName = script.PrefetchersAtomName + default: + slog.Error("unknown msr for prefetcher", slog.String("msr", fmt.Sprintf("0x%x", pf.Msr))) + continue + } + msrVal := common.ValFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) + var enabledDisabled string + enabled, err := common.IsPrefetcherEnabled(msrVal, pf.Bit) + if err != nil { + slog.Warn("error checking prefetcher enabled status", slog.String("error", err.Error())) + continue + } + if enabled { + enabledDisabled = "Enabled" + } else { + enabledDisabled = "Disabled" + } + fields = append(fields, + table.Field{ + Name: pf.ShortName + " prefetcher", + Description: "--" + "pref-" + strings.ReplaceAll(strings.ToLower(pf.ShortName), " ", "") + " ", + Values: []string{enabledDisabled}}, + ) + } + } + // add C6 + c6 := common.C6FromOutput(outputs) + if c6 != "" { + fields = append(fields, table.Field{Name: "C6", Description: "--c6 ", Values: []string{c6}}) + } + // add C1 Demotion + c1Demotion := strings.TrimSpace(outputs[script.C1DemotionScriptName].Stdout) + if c1Demotion != "" { + fields = append(fields, table.Field{Name: "C1 Demotion", Description: "--c1-demotion ", Values: []string{c1Demotion}}) + } + return fields +} + +// l3InstanceFromOutput retrieves the L3 cache size per instance (per socket on Intel) in megabytes +func l3InstanceFromOutput(outputs map[string]script.ScriptOutput) string { + l3InstanceMB, _, err := common.GetL3MSRMB(outputs) + if err != nil { + slog.Info("Could not get L3 size from MSR, falling back to lscpu", slog.String("error", err.Error())) + l3InstanceMB, _, err = common.GetL3LscpuMB(outputs) + if err != nil { + slog.Error("Could not get L3 size from lscpu", slog.String("error", err.Error())) + return "" + } + } + return common.FormatCacheSizeMB(l3InstanceMB) +} + +// sseFrequenciesFromOutput gets the bucketed SSE frequencies from the output +// and returns a compact string representation with consolidated ranges, e.g.: +// "1-40/3.5, 41-60/3.4, 61-86/3.2" +func sseFrequenciesFromOutput(outputs map[string]script.ScriptOutput) string { + specCoreFrequencies, err := common.GetSpecFrequencyBuckets(outputs) + if err != nil { + return "" + } + sseFreqs := common.GetSSEFreqsFromBuckets(specCoreFrequencies) + if len(sseFreqs) < 1 { + return "" + } + + var result []string + i := 1 + for i < len(specCoreFrequencies) { + startIdx := i + currentFreq := sseFreqs[i-1] + + // Find consecutive buckets with the same frequency + for i < len(specCoreFrequencies) && sseFreqs[i-1] == currentFreq { + i++ + } + endIdx := i - 1 + + // Extract start and end core numbers from the ranges + startRange := strings.Split(specCoreFrequencies[startIdx][0], "-")[0] + endRange := strings.Split(specCoreFrequencies[endIdx][0], "-")[1] + + // Format the consolidated range + if startRange == endRange { + result = append(result, fmt.Sprintf("%s/%s", startRange, currentFreq)) + } else { + result = append(result, fmt.Sprintf("%s-%s/%s", startRange, endRange, currentFreq)) + } + } + + return strings.Join(result, ", ") +} + +// configurationTableTextRenderer renders the configuration table for text reports. +// It's similar to the default text table renderer, but uses the Description field +// to show the command line argument for each config item. +// Example output: +// Configuration +// ============= +// Cores per Socket: 86 --cores +// L3 Cache: 336M --llc +// Package Power / TDP: 350W --tdp +// All-Core Max Frequency: 3.2GHz --core-max +func configurationTableTextRenderer(tableValues table.TableValues) string { + var sb strings.Builder + + // Find the longest field name and value for formatting + maxFieldNameLen := 0 + maxValueLen := 0 + for _, field := range tableValues.Fields { + if len(field.Name) > maxFieldNameLen { + maxFieldNameLen = len(field.Name) + } + if len(field.Values) > 0 && len(field.Values[0]) > maxValueLen { + maxValueLen = len(field.Values[0]) + } + } + + // Print each field with name, value, and description (command-line arg) + for _, field := range tableValues.Fields { + var value string + if len(field.Values) > 0 { + value = field.Values[0] + } + // Format: "Field Name: Value Description" + sb.WriteString(fmt.Sprintf("%-*s %-*s %s\n", + maxFieldNameLen+1, field.Name+":", + maxValueLen, value, + field.Description)) + } + + return sb.String() +} diff --git a/cmd/config/flag_groups.go b/cmd/config/flag_groups.go index 57d3e677..6aacf99a 100644 --- a/cmd/config/flag_groups.go +++ b/cmd/config/flag_groups.go @@ -6,7 +6,6 @@ package config import ( "fmt" "perfspect/internal/common" - "perfspect/internal/table" "perfspect/internal/target" "regexp" "slices" @@ -193,7 +192,7 @@ func initializeFlags(cmd *cobra.Command) { flagGroups = append(flagGroups, group) // prefetcher options group = flagGroup{name: flagGroupPrefetcherName, flags: []flagDefinition{}} - for _, pref := range table.GetPrefetcherDefinitions() { + for _, pref := range common.GetPrefetcherDefinitions() { group.flags = append(group.flags, newStringFlag(cmd, // flag name diff --git a/cmd/config/set.go b/cmd/config/set.go index ebe77e9a..0549a3a3 100644 --- a/cmd/config/set.go +++ b/cmd/config/set.go @@ -4,9 +4,9 @@ import ( "fmt" "log/slog" "math" + "perfspect/internal/common" "perfspect/internal/cpus" "perfspect/internal/script" - "perfspect/internal/table" "perfspect/internal/target" "perfspect/internal/util" "regexp" @@ -131,7 +131,7 @@ func setLlcSize(desiredLlcSize float64, myTarget target.Target, localTempDir str return fmt.Errorf("failed to run scripts on target: %w", err) } - uarch := table.UarchFromOutput(outputs) + uarch := common.UarchFromOutput(outputs) cpu, err := cpus.GetCPUByMicroArchitecture(uarch) if err != nil { return fmt.Errorf("failed to get CPU by microarchitecture: %w", err) @@ -139,11 +139,11 @@ func setLlcSize(desiredLlcSize float64, myTarget target.Target, localTempDir str if cpu.CacheWayCount == 0 { return fmt.Errorf("cache way count is zero") } - maximumLlcSize, _, err := table.GetL3LscpuMB(outputs) + maximumLlcSize, _, err := common.GetL3LscpuMB(outputs) if err != nil { return fmt.Errorf("failed to get maximum LLC size: %w", err) } - currentLlcSize, _, err := table.GetL3MSRMB(outputs) + currentLlcSize, _, err := common.GetL3MSRMB(outputs) if err != nil { return fmt.Errorf("failed to get current LLC size: %w", err) } @@ -823,7 +823,7 @@ func getUarch(myTarget target.Target, localTempDir string) (string, error) { if err != nil { return "", fmt.Errorf("failed to run scripts on target: %w", err) } - uarch := table.UarchFromOutput(outputs) + uarch := common.UarchFromOutput(outputs) if uarch == "" { return "", fmt.Errorf("failed to get microarchitecture") } @@ -831,7 +831,7 @@ func getUarch(myTarget target.Target, localTempDir string) (string, error) { } func setPrefetcher(enableDisable string, myTarget target.Target, localTempDir string, prefetcherType string) error { - pf, err := table.GetPrefetcherDefByName(prefetcherType) + pf, err := common.GetPrefetcherDefByName(prefetcherType) if err != nil { return fmt.Errorf("failed to get prefetcher definition: %w", err) } diff --git a/cmd/flame/flame.go b/cmd/flame/flame.go index 76186a9d..2637888e 100644 --- a/cmd/flame/flame.go +++ b/cmd/flame/flame.go @@ -176,11 +176,11 @@ func validateFlags(cmd *cobra.Command, args []string) error { } func runCmd(cmd *cobra.Command, args []string) error { - var tableNames []string + var tables []table.TableDefinition if !flagNoSystemSummary { - tableNames = append(tableNames, table.BriefSysSummaryTableName) + tables = append(tables, common.TableDefinitions[common.BriefSysSummaryTableName]) } - tableNames = append(tableNames, table.CallStackFrequencyTableName) + tables = append(tables, tableDefinitions[CallStackFrequencyTableName]) reportingCommand := common.ReportingCommand{ Cmd: cmd, ReportNamePost: "flame", @@ -190,7 +190,10 @@ func runCmd(cmd *cobra.Command, args []string) error { "PIDs": strings.Join(util.IntSliceToStringSlice(flagPids), ","), "MaxDepth": strconv.Itoa(flagMaxDepth), }, - TableNames: tableNames, + Tables: tables, } + + report.RegisterHTMLRenderer(CallStackFrequencyTableName, callStackFrequencyTableHTMLRenderer) + return reportingCommand.Run() } diff --git a/internal/table/stacks.go b/cmd/flame/flame_tables.go similarity index 73% rename from internal/table/stacks.go rename to cmd/flame/flame_tables.go index b80a771f..c8dd95cc 100644 --- a/internal/table/stacks.go +++ b/cmd/flame/flame_tables.go @@ -1,4 +1,4 @@ -package table +package flame // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause @@ -7,70 +7,41 @@ import ( "fmt" "log/slog" "math" + "perfspect/internal/common" "perfspect/internal/script" + "perfspect/internal/table" "regexp" "strconv" "strings" ) -// getSectionsFromOutput parses output into sections, where the section name -// is the key in a map and the section content is the value -// sections are delimited by lines of the form ##########
########## -// example: -// ##########
########## -//
-//
-// ##########
########## -//
-// -// returns a map of section name to section content -// if the output is empty or contains no section headers, returns an empty map -// if a section contains no content, the value for that section is an empty string -func getSectionsFromOutput(output string) map[string]string { - sections := make(map[string]string) - re := regexp.MustCompile(`^########## (.+?) ##########$`) - var sectionName string - for line := range strings.SplitSeq(output, "\n") { - // check if the line is a section header - match := re.FindStringSubmatch(line) - if match != nil { - // if the section name isn't in the map yet, add it - if _, ok := sections[match[1]]; !ok { - sections[match[1]] = "" - } - // save the section name - sectionName = match[1] - continue - } - if sectionName != "" { - sections[sectionName] += line + "\n" - } - } - return sections +// flamegraph table names +const ( + CallStackFrequencyTableName = "Call Stack Frequency" +) + +// flamegraph tables +var tableDefinitions = map[string]table.TableDefinition{ + CallStackFrequencyTableName: { + Name: CallStackFrequencyTableName, + MenuLabel: CallStackFrequencyTableName, + ScriptNames: []string{ + script.CollapsedCallStacksScriptName, + }, + FieldsFunc: callStackFrequencyTableValues}, } -// sectionValueFromOutput returns the content of a section from the output -// if the section doesn't exist, returns an empty string -// if the section exists but has no content, returns an empty string -func sectionValueFromOutput(output string, sectionName string) string { - sections := getSectionsFromOutput(output) - if len(sections) == 0 { - slog.Warn("no sections in output") - return "" - } - if _, ok := sections[sectionName]; !ok { - slog.Warn("section not found in output", slog.String("section", sectionName)) - return "" +func callStackFrequencyTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Native Stacks", Values: []string{nativeFoldedFromOutput(outputs)}}, + {Name: "Java Stacks", Values: []string{javaFoldedFromOutput(outputs)}}, + {Name: "Maximum Render Depth", Values: []string{maxRenderDepthFromOutput(outputs)}}, } - if sections[sectionName] == "" { - slog.Warn("No content for section:", slog.String("section", sectionName)) - return "" - } - return sections[sectionName] + return fields } func javaFoldedFromOutput(outputs map[string]script.ScriptOutput) string { - sections := getSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) + sections := common.GetSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) if len(sections) == 0 { slog.Warn("no sections in collapsed call stack output") return "" @@ -108,7 +79,7 @@ func javaFoldedFromOutput(outputs map[string]script.ScriptOutput) string { } func nativeFoldedFromOutput(outputs map[string]script.ScriptOutput) string { - sections := getSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) + sections := common.GetSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) if len(sections) == 0 { slog.Warn("no sections in collapsed call stack output") return "" @@ -133,7 +104,7 @@ func nativeFoldedFromOutput(outputs map[string]script.ScriptOutput) string { } func maxRenderDepthFromOutput(outputs map[string]script.ScriptOutput) string { - sections := getSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) + sections := common.GetSectionsFromOutput(outputs[script.CollapsedCallStacksScriptName].Stdout) if len(sections) == 0 { slog.Warn("no sections in collapsed call stack output") return "" @@ -298,3 +269,32 @@ func mergeSystemFolded(perfFp string, perfDwarf string) (merged string, err erro merged = mergedStacks.dumpFolded() return } + +func callStackFrequencyTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + out := ` +` + out += renderFlameGraph("Native", tableValues, "Native Stacks") + out += renderFlameGraph("Java", tableValues, "Java Stacks") + return out +} diff --git a/internal/report/render_html_flamegraph.go b/cmd/flame/render_html_flamegraph.go similarity index 98% rename from internal/report/render_html_flamegraph.go rename to cmd/flame/render_html_flamegraph.go index 00fbd4f9..ed8f7eae 100644 --- a/internal/report/render_html_flamegraph.go +++ b/cmd/flame/render_html_flamegraph.go @@ -1,4 +1,4 @@ -package report +package flame // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause @@ -9,6 +9,7 @@ import ( "encoding/json" "fmt" "log/slog" + "perfspect/internal/report" "perfspect/internal/table" "perfspect/internal/util" "slices" @@ -168,7 +169,7 @@ func renderFlameGraph(header string, tableValues table.TableValues, field string folded := tableValues.Fields[fieldIdx].Values[0] if folded == "" { out += `

` + header + `

` - msg := noDataFound + msg := report.NoDataFound if tableValues.NoDataFound != "" { msg = tableValues.NoDataFound } diff --git a/cmd/lock/lock.go b/cmd/lock/lock.go index 184f1f1f..25fc8813 100755 --- a/cmd/lock/lock.go +++ b/cmd/lock/lock.go @@ -166,7 +166,7 @@ func formalizeOutputFormat(outputFormat []string) []string { func pullDataFiles(appContext common.AppContext, scriptOutputs map[string]script.ScriptOutput, myTarget target.Target, statusUpdate progress.MultiSpinnerUpdateFunc) error { localOutputDir := appContext.OutputDir - tableValues := table.GetValuesForTable(table.KernelLockAnalysisTableName, scriptOutputs) + tableValues := table.GetValuesForTable(tableDefinitions[KernelLockAnalysisTableName], scriptOutputs) found := false for _, field := range tableValues.Fields { if field.Name == "Perf Package Path" { @@ -193,11 +193,11 @@ func pullDataFiles(appContext common.AppContext, scriptOutputs map[string]script } func runCmd(cmd *cobra.Command, args []string) error { - var tableNames []string + var tables []table.TableDefinition if !flagNoSystemSummary { - tableNames = append(tableNames, table.BriefSysSummaryTableName) + tables = append(tables, common.TableDefinitions[common.BriefSysSummaryTableName]) } - tableNames = append(tableNames, table.KernelLockAnalysisTableName) + tables = append(tables, tableDefinitions[KernelLockAnalysisTableName]) reportingCommand := common.ReportingCommand{ Cmd: cmd, ReportNamePost: "lock", @@ -206,7 +206,7 @@ func runCmd(cmd *cobra.Command, args []string) error { "Duration": strconv.Itoa(flagDuration), "Package": strconv.FormatBool(flagPackage), }, - TableNames: tableNames, + Tables: tables, } // only try to download package when option specified @@ -219,5 +219,8 @@ func runCmd(cmd *cobra.Command, args []string) error { // workaround is to make an assignment to ensure the current command's output format // flag takes effect as expected. common.FlagFormat = formalizeOutputFormat(flagFormat) + + report.RegisterHTMLRenderer(KernelLockAnalysisTableName, kernelLockAnalysisHTMLRenderer) + return reportingCommand.Run() } diff --git a/cmd/lock/lock_tables.go b/cmd/lock/lock_tables.go new file mode 100644 index 00000000..72ad4860 --- /dev/null +++ b/cmd/lock/lock_tables.go @@ -0,0 +1,57 @@ +package lock + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + htmltemplate "html/template" + "perfspect/internal/common" + "perfspect/internal/report" + "perfspect/internal/script" + "perfspect/internal/table" + "strings" +) + +// lock table names +const ( + KernelLockAnalysisTableName = "Kernel Lock Analysis" +) + +// kernel lock analysis tables +var tableDefinitions = map[string]table.TableDefinition{ + KernelLockAnalysisTableName: { + Name: KernelLockAnalysisTableName, + MenuLabel: KernelLockAnalysisTableName, + ScriptNames: []string{ + script.ProfileKernelLockScriptName, + }, + FieldsFunc: kernelLockAnalysisTableValues}, +} + +func kernelLockAnalysisTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Hotspot without Callstack", Values: []string{common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_hotspot_no_children")}}, + {Name: "Hotspot with Callstack", Values: []string{common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_hotspot_callgraph")}}, + {Name: "Cache2Cache without Callstack", Values: []string{common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_c2c_no_children")}}, + {Name: "Cache2Cache with CallStack", Values: []string{common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_c2c_callgraph")}}, + {Name: "Lock Contention", Values: []string{common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_lock_contention")}}, + {Name: "Perf Package Path", Values: []string{strings.TrimSpace(common.SectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_package_path"))}}, + } + return fields +} + +func kernelLockAnalysisHTMLRenderer(tableValues table.TableValues, targetName string) string { + values := [][]string{} + var tableValueStyles [][]string + for _, field := range tableValues.Fields { + rowValues := []string{} + rowValues = append(rowValues, field.Name) + rowValues = append(rowValues, htmltemplate.HTMLEscapeString(field.Values[0])) + values = append(values, rowValues) + rowStyles := []string{} + rowStyles = append(rowStyles, "font-weight:bold") + rowStyles = append(rowStyles, "white-space: pre-wrap") + tableValueStyles = append(tableValueStyles, rowStyles) + } + return report.RenderHTMLTable([]string{}, values, "pure-table pure-table-striped", tableValueStyles) +} diff --git a/cmd/metrics/metadata.go b/cmd/metrics/metadata.go index 66fd37ee..8aa53d91 100644 --- a/cmd/metrics/metadata.go +++ b/cmd/metrics/metadata.go @@ -18,6 +18,7 @@ import ( "strings" "time" + "perfspect/internal/common" "perfspect/internal/cpus" "perfspect/internal/progress" "perfspect/internal/script" @@ -526,8 +527,7 @@ func getMetadataScripts(noRoot bool, noSystemSummary bool, numGPCounters int) (m } // add the system summary table scripts to the list if !noSystemSummary { - table := table.GetTableByName(table.BriefSysSummaryTableName) - for _, scriptName := range table.ScriptNames { + for _, scriptName := range common.TableDefinitions[common.BriefSysSummaryTableName].ScriptNames { scriptDef := script.GetScriptByName(scriptName) metadataScripts = append(metadataScripts, scriptDef) } @@ -609,8 +609,7 @@ func ReadJSONFromFile(path string) (md Metadata, err error) { // getSystemSummary - retrieves the system summary from the target func getSystemSummary(scriptOutputs map[string]script.ScriptOutput) (summaryFields [][]string, err error) { - var allTableValues []table.TableValues - allTableValues, err = table.ProcessTables([]string{table.BriefSysSummaryTableName}, scriptOutputs) + allTableValues, err := table.ProcessTables([]table.TableDefinition{common.TableDefinitions[common.BriefSysSummaryTableName]}, scriptOutputs) if err != nil { err = fmt.Errorf("failed to process script outputs: %w", err) return diff --git a/internal/table/accelerator.go b/cmd/report/accelerator.go similarity index 99% rename from internal/table/accelerator.go rename to cmd/report/accelerator.go index 36328177..f9ca55b0 100644 --- a/internal/table/accelerator.go +++ b/cmd/report/accelerator.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package table +package report import ( "fmt" diff --git a/internal/table/benchmarking.go b/cmd/report/benchmarking.go similarity index 99% rename from internal/table/benchmarking.go rename to cmd/report/benchmarking.go index 917deba6..41fa303a 100644 --- a/internal/table/benchmarking.go +++ b/cmd/report/benchmarking.go @@ -1,4 +1,4 @@ -package table +package report // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause diff --git a/cmd/report/cpu.go b/cmd/report/cpu.go new file mode 100644 index 00000000..5efc548b --- /dev/null +++ b/cmd/report/cpu.go @@ -0,0 +1,163 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "fmt" + "log/slog" + "strconv" + "strings" + + "perfspect/internal/common" + "perfspect/internal/cpus" + "perfspect/internal/script" +) + +func numaCPUListFromOutput(outputs map[string]script.ScriptOutput) string { + nodeCPUs := common.ValsFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node[0-9] CPU\(.*:\s*(.+?)$`) + return strings.Join(nodeCPUs, " :: ") +} + +func ppinsFromOutput(outputs map[string]script.ScriptOutput) string { + uniquePpins := []string{} + for line := range strings.SplitSeq(outputs[script.PPINName].Stdout, "\n") { + parts := strings.Split(line, ":") + if len(parts) < 2 { + continue + } + ppin := strings.TrimSpace(parts[1]) + found := false + for _, p := range uniquePpins { + if string(p) == ppin { + found = true + break + } + } + if !found && ppin != "" { + uniquePpins = append(uniquePpins, ppin) + } + } + return strings.Join(uniquePpins, ", ") +} + +func channelsFromOutput(outputs map[string]script.ScriptOutput) string { + family := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) + model := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) + stepping := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) + capid4 := common.ValFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) + devices := common.ValFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) + cpu, err := cpus.GetCPUExtended(family, model, stepping, capid4, devices) + if err != nil { + slog.Error("error getting CPU from CPUdb", slog.String("error", err.Error())) + return "" + } + return fmt.Sprintf("%d", cpu.MemoryChannelCount) +} + +func turboEnabledFromOutput(outputs map[string]script.ScriptOutput) string { + vendor := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) + switch vendor { + case cpus.IntelVendor: + val := common.ValFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, `^Intel Turbo Boost Technology\s*= (.+?)$`) + if val == "true" { + return "Enabled" + } + if val == "false" { + return "Disabled" + } + return "" // unknown value + case cpus.AMDVendor: + val := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Frequency boost.*:\s*(.+?)$`) + if val != "" { + return val + " (AMD Frequency Boost)" + } + } + return "" +} + +func chaCountFromOutput(outputs map[string]script.ScriptOutput) string { + // output is the result of three rdmsr calls + // - client cha count + // - cha count + // - spr cha count + // stop when we find a non-zero value + // note: rdmsr writes to stderr on error so we will likely have fewer than 3 lines in stdout + for hexCount := range strings.SplitSeq(outputs[script.ChaCountScriptName].Stdout, "\n") { + if hexCount != "" && hexCount != "0" { + count, err := strconv.ParseInt(hexCount, 16, 64) + if err == nil { + return fmt.Sprintf("%d", count) + } + } + } + return "" +} + +func numaBalancingFromOutput(outputs map[string]script.ScriptOutput) string { + if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "1") { + return "Enabled" + } else if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "0") { + return "Disabled" + } + return "" +} + +func clusteringModeFromOutput(outputs map[string]script.ScriptOutput) string { + uarch := common.UarchFromOutput(outputs) + sockets := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) + nodes := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`) + if uarch == "" || sockets == "" || nodes == "" { + return "" + } + socketCount, err := strconv.Atoi(sockets) + if err != nil { + slog.Error("failed to parse socket count", slog.String("error", err.Error())) + return "" + } + nodeCount, err := strconv.Atoi(nodes) + if err != nil { + slog.Error("failed to parse node count", slog.String("error", err.Error())) + return "" + } + if nodeCount == 0 || socketCount == 0 { + slog.Error("node count or socket count is zero") + return "" + } + nodesPerSocket := nodeCount / socketCount + switch uarch { + case "GNR_X1": + return "All2All" + case "GNR_X2": + switch nodesPerSocket { + case 1: + return "UMA 4 (Quad)" + case 2: + return "SNC 2" + } + case "GNR_X3": + switch nodesPerSocket { + case 1: + return "UMA 6 (Hex)" + case 3: + return "SNC 3" + } + case "SRF_SP": + return "UMA 2 (Hemi)" + case "SRF_AP": + switch nodesPerSocket { + case 1: + return "UMA 4 (Quad)" + case 2: + return "SNC 2" + } + case "CWF": + switch nodesPerSocket { + case 1: + return "UMA 6 (Hex)" + case 3: + return "SNC 3" + } + } + return "" +} diff --git a/internal/table/dimm.go b/cmd/report/dimm.go similarity index 98% rename from internal/table/dimm.go rename to cmd/report/dimm.go index dba1622f..0d814750 100644 --- a/internal/table/dimm.go +++ b/cmd/report/dimm.go @@ -1,4 +1,4 @@ -package table +package report // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause @@ -6,6 +6,7 @@ package table import ( "fmt" "log/slog" + "perfspect/internal/common" "perfspect/internal/script" "regexp" "strconv" @@ -30,7 +31,7 @@ const ( ) func dimmInfoFromDmiDecode(dmiDecodeOutput string) [][]string { - return valsArrayFromDmiDecodeRegexSubmatch( + return common.ValsArrayFromDmiDecodeRegexSubmatch( dmiDecodeOutput, "17", `^Bank Locator:\s*(.+?)$`, @@ -116,8 +117,8 @@ func derivedDimmsFieldFromOutput(outputs map[string]script.ScriptOutput) []deriv if err != nil || numChannels == 0 { return nil } - platformVendor := valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", `Vendor:\s*(.*)`) - numSockets, err := strconv.Atoi(valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(.*:\s*(.+?)$`)) + platformVendor := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", `Vendor:\s*(.*)`) + numSockets, err := strconv.Atoi(common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(.*:\s*(.+?)$`)) if err != nil || numSockets == 0 { return nil } diff --git a/internal/table/gpu.go b/cmd/report/gpu.go similarity index 95% rename from internal/table/gpu.go rename to cmd/report/gpu.go index bf9611d1..e507fde3 100644 --- a/internal/table/gpu.go +++ b/cmd/report/gpu.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package table +package report import ( "log/slog" @@ -10,6 +10,7 @@ import ( "strconv" "strings" + "perfspect/internal/common" "perfspect/internal/script" ) @@ -142,7 +143,7 @@ type GPU struct { func gpuInfoFromOutput(outputs map[string]script.ScriptOutput) []GPU { gpus := []GPU{} - gpusLshw := valsArrayFromRegexSubmatch(outputs[script.LshwScriptName].Stdout, `^pci.*?\s+display\s+(\w+).*?\s+\[(\w+):(\w+)]$`) + gpusLshw := common.ValsArrayFromRegexSubmatch(outputs[script.LshwScriptName].Stdout, `^pci.*?\s+display\s+(\w+).*?\s+\[(\w+):(\w+)]$`) idxMfgName := 0 idxMfgID := 1 idxDevID := 2 @@ -206,7 +207,7 @@ func gaudiInfoFromOutput(outputs map[string]script.ScriptOutput) []Gaudi { gaudis[i].Microarchitecture = strings.TrimSpace(outputs[script.GaudiArchitectureScriptName].Stdout) } // get NUMA affinity - numaAffinities := valsArrayFromRegexSubmatch(outputs[script.GaudiNumaScriptName].Stdout, `^(\d+)\s+(\d+)\s+$`) + numaAffinities := common.ValsArrayFromRegexSubmatch(outputs[script.GaudiNumaScriptName].Stdout, `^(\d+)\s+(\d+)\s+$`) if len(numaAffinities) != len(gaudis) { slog.Error("number of gaudis in gaudi info and numa output do not match", slog.Int("gaudis", len(gaudis)), slog.Int("numaAffinities", len(numaAffinities))) return nil diff --git a/internal/table/isa.go b/cmd/report/isa.go similarity index 93% rename from internal/table/isa.go rename to cmd/report/isa.go index d439e428..b804a48d 100644 --- a/internal/table/isa.go +++ b/cmd/report/isa.go @@ -1,9 +1,10 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package table +package report import ( + "perfspect/internal/common" "perfspect/internal/script" ) @@ -56,7 +57,7 @@ func yesIfTrue(val string) string { func isaSupportedFromOutput(outputs map[string]script.ScriptOutput) []string { var supported []string for _, isa := range isas { - oneSupported := yesIfTrue(valFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, isa.CPUID+`\s*= (.+?)$`)) + oneSupported := yesIfTrue(common.ValFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, isa.CPUID+`\s*= (.+?)$`)) supported = append(supported, oneSupported) } return supported diff --git a/cmd/report/report.go b/cmd/report/report.go index 7e7545b9..c7829e1d 100644 --- a/cmd/report/report.go +++ b/cmd/report/report.go @@ -139,52 +139,53 @@ var benchmarkOptions = []string{ var benchmarkAll = "all" -var benchmarkTableNames = map[string][]string{ - "speed": {table.SpeedBenchmarkTableName}, - "power": {table.PowerBenchmarkTableName}, - "temperature": {table.TemperatureBenchmarkTableName}, - "frequency": {table.FrequencyBenchmarkTableName}, - "memory": {table.MemoryBenchmarkTableName}, - "numa": {table.NUMABenchmarkTableName}, - "storage": {table.StorageBenchmarkTableName}, +// map benchmark flag values, e.g., "--benchmark speed,power" to associated tables +var benchmarkTables = map[string][]table.TableDefinition{ + "speed": {tableDefinitions[SpeedBenchmarkTableName]}, + "power": {tableDefinitions[PowerBenchmarkTableName]}, + "temperature": {tableDefinitions[TemperatureBenchmarkTableName]}, + "frequency": {tableDefinitions[FrequencyBenchmarkTableName]}, + "memory": {tableDefinitions[MemoryBenchmarkTableName]}, + "numa": {tableDefinitions[NUMABenchmarkTableName]}, + "storage": {tableDefinitions[StorageBenchmarkTableName]}, } var benchmarkSummaryTableName = "Benchmark Summary" // categories maps flag names to tables that will be included in report var categories = []common.Category{ - {FlagName: flagSystemSummaryName, FlagVar: &flagSystemSummary, Help: "System Summary", TableNames: []string{table.SystemSummaryTableName}}, - {FlagName: flagHostName, FlagVar: &flagHost, Help: "Host", TableNames: []string{table.HostTableName}}, - {FlagName: flagBiosName, FlagVar: &flagBios, Help: "BIOS", TableNames: []string{table.BIOSTableName}}, - {FlagName: flagOsName, FlagVar: &flagOs, Help: "Operating System", TableNames: []string{table.OperatingSystemTableName}}, - {FlagName: flagSoftwareName, FlagVar: &flagSoftware, Help: "Software Versions", TableNames: []string{table.SoftwareVersionTableName}}, - {FlagName: flagCpuName, FlagVar: &flagCpu, Help: "Processor Details", TableNames: []string{table.CPUTableName}}, - {FlagName: flagPrefetcherName, FlagVar: &flagPrefetcher, Help: "Prefetchers", TableNames: []string{table.PrefetcherTableName}}, - {FlagName: flagIsaName, FlagVar: &flagIsa, Help: "Instruction Sets", TableNames: []string{table.ISATableName}}, - {FlagName: flagAcceleratorName, FlagVar: &flagAccelerator, Help: "On-board Accelerators", TableNames: []string{table.AcceleratorTableName}}, - {FlagName: flagPowerName, FlagVar: &flagPower, Help: "Power Settings", TableNames: []string{table.PowerTableName}}, - {FlagName: flagCstatesName, FlagVar: &flagCstates, Help: "C-states", TableNames: []string{table.CstateTableName}}, - {FlagName: flagFrequencyName, FlagVar: &flagFrequency, Help: "Maximum Frequencies", TableNames: []string{table.MaximumFrequencyTableName}}, - {FlagName: flagSSTName, FlagVar: &flagSST, Help: "Speed Select Technology Settings", TableNames: []string{table.SSTTFHPTableName, table.SSTTFLPTableName}}, - {FlagName: flagUncoreName, FlagVar: &flagUncore, Help: "Uncore Configuration", TableNames: []string{table.UncoreTableName}}, - {FlagName: flagElcName, FlagVar: &flagElc, Help: "Efficiency Latency Control Settings", TableNames: []string{table.ElcTableName}}, - {FlagName: flagMemoryName, FlagVar: &flagMemory, Help: "Memory Configuration", TableNames: []string{table.MemoryTableName}}, - {FlagName: flagDimmName, FlagVar: &flagDimm, Help: "DIMM Population", TableNames: []string{table.DIMMTableName}}, - {FlagName: flagNetConfigName, FlagVar: &flagNetConfig, Help: "Network Configuration", TableNames: []string{table.NetworkConfigTableName}}, - {FlagName: flagNicName, FlagVar: &flagNic, Help: "Network Cards", TableNames: []string{table.NICTableName, table.NICCpuAffinityTableName, table.NICPacketSteeringTableName}}, - {FlagName: flagDiskName, FlagVar: &flagDisk, Help: "Storage Devices", TableNames: []string{table.DiskTableName}}, - {FlagName: flagFilesystemName, FlagVar: &flagFilesystem, Help: "File Systems", TableNames: []string{table.FilesystemTableName}}, - {FlagName: flagGpuName, FlagVar: &flagGpu, Help: "GPUs", TableNames: []string{table.GPUTableName}}, - {FlagName: flagGaudiName, FlagVar: &flagGaudi, Help: "Gaudi Devices", TableNames: []string{table.GaudiTableName}}, - {FlagName: flagCxlName, FlagVar: &flagCxl, Help: "CXL Devices", TableNames: []string{table.CXLTableName}}, - {FlagName: flagPcieName, FlagVar: &flagPcie, Help: "PCIE Slots", TableNames: []string{table.PCIeTableName}}, - {FlagName: flagCveName, FlagVar: &flagCve, Help: "Vulnerabilities", TableNames: []string{table.CVETableName}}, - {FlagName: flagProcessName, FlagVar: &flagProcess, Help: "Process List", TableNames: []string{table.ProcessTableName}}, - {FlagName: flagSensorName, FlagVar: &flagSensor, Help: "Sensor Status", TableNames: []string{table.SensorTableName}}, - {FlagName: flagChassisStatusName, FlagVar: &flagChassisStatus, Help: "Chassis Status", TableNames: []string{table.ChassisStatusTableName}}, - {FlagName: flagPmuName, FlagVar: &flagPmu, Help: "Performance Monitoring Unit Status", TableNames: []string{table.PMUTableName}}, - {FlagName: flagSystemEventLogName, FlagVar: &flagSystemEventLog, Help: "System Event Log", TableNames: []string{table.SystemEventLogTableName}}, - {FlagName: flagKernelLogName, FlagVar: &flagKernelLog, Help: "Kernel Log", TableNames: []string{table.KernelLogTableName}}, + {FlagName: flagSystemSummaryName, FlagVar: &flagSystemSummary, Help: "System Summary", Tables: []table.TableDefinition{tableDefinitions[SystemSummaryTableName]}}, + {FlagName: flagHostName, FlagVar: &flagHost, Help: "Host", Tables: []table.TableDefinition{tableDefinitions[HostTableName]}}, + {FlagName: flagBiosName, FlagVar: &flagBios, Help: "BIOS", Tables: []table.TableDefinition{tableDefinitions[BIOSTableName]}}, + {FlagName: flagOsName, FlagVar: &flagOs, Help: "Operating System", Tables: []table.TableDefinition{tableDefinitions[OperatingSystemTableName]}}, + {FlagName: flagSoftwareName, FlagVar: &flagSoftware, Help: "Software Versions", Tables: []table.TableDefinition{tableDefinitions[SoftwareVersionTableName]}}, + {FlagName: flagCpuName, FlagVar: &flagCpu, Help: "Processor Details", Tables: []table.TableDefinition{tableDefinitions[CPUTableName]}}, + {FlagName: flagPrefetcherName, FlagVar: &flagPrefetcher, Help: "Prefetchers", Tables: []table.TableDefinition{tableDefinitions[PrefetcherTableName]}}, + {FlagName: flagIsaName, FlagVar: &flagIsa, Help: "Instruction Sets", Tables: []table.TableDefinition{tableDefinitions[ISATableName]}}, + {FlagName: flagAcceleratorName, FlagVar: &flagAccelerator, Help: "On-board Accelerators", Tables: []table.TableDefinition{tableDefinitions[AcceleratorTableName]}}, + {FlagName: flagPowerName, FlagVar: &flagPower, Help: "Power Settings", Tables: []table.TableDefinition{tableDefinitions[PowerTableName]}}, + {FlagName: flagCstatesName, FlagVar: &flagCstates, Help: "C-states", Tables: []table.TableDefinition{tableDefinitions[CstateTableName]}}, + {FlagName: flagFrequencyName, FlagVar: &flagFrequency, Help: "Maximum Frequencies", Tables: []table.TableDefinition{tableDefinitions[MaximumFrequencyTableName]}}, + {FlagName: flagSSTName, FlagVar: &flagSST, Help: "Speed Select Technology Settings", Tables: []table.TableDefinition{tableDefinitions[SSTTFHPTableName], tableDefinitions[SSTTFLPTableName]}}, + {FlagName: flagUncoreName, FlagVar: &flagUncore, Help: "Uncore Configuration", Tables: []table.TableDefinition{tableDefinitions[UncoreTableName]}}, + {FlagName: flagElcName, FlagVar: &flagElc, Help: "Efficiency Latency Control Settings", Tables: []table.TableDefinition{tableDefinitions[ElcTableName]}}, + {FlagName: flagMemoryName, FlagVar: &flagMemory, Help: "Memory Configuration", Tables: []table.TableDefinition{tableDefinitions[MemoryTableName]}}, + {FlagName: flagDimmName, FlagVar: &flagDimm, Help: "DIMM Population", Tables: []table.TableDefinition{tableDefinitions[DIMMTableName]}}, + {FlagName: flagNetConfigName, FlagVar: &flagNetConfig, Help: "Network Configuration", Tables: []table.TableDefinition{tableDefinitions[NetworkConfigTableName]}}, + {FlagName: flagNicName, FlagVar: &flagNic, Help: "Network Cards", Tables: []table.TableDefinition{tableDefinitions[NICTableName], tableDefinitions[NICCpuAffinityTableName], tableDefinitions[NICPacketSteeringTableName]}}, + {FlagName: flagDiskName, FlagVar: &flagDisk, Help: "Storage Devices", Tables: []table.TableDefinition{tableDefinitions[DiskTableName]}}, + {FlagName: flagFilesystemName, FlagVar: &flagFilesystem, Help: "File Systems", Tables: []table.TableDefinition{tableDefinitions[FilesystemTableName]}}, + {FlagName: flagGpuName, FlagVar: &flagGpu, Help: "GPUs", Tables: []table.TableDefinition{tableDefinitions[GPUTableName]}}, + {FlagName: flagGaudiName, FlagVar: &flagGaudi, Help: "Gaudi Devices", Tables: []table.TableDefinition{tableDefinitions[GaudiTableName]}}, + {FlagName: flagCxlName, FlagVar: &flagCxl, Help: "CXL Devices", Tables: []table.TableDefinition{tableDefinitions[CXLTableName]}}, + {FlagName: flagPcieName, FlagVar: &flagPcie, Help: "PCIE Slots", Tables: []table.TableDefinition{tableDefinitions[PCIeTableName]}}, + {FlagName: flagCveName, FlagVar: &flagCve, Help: "Vulnerabilities", Tables: []table.TableDefinition{tableDefinitions[CVETableName]}}, + {FlagName: flagProcessName, FlagVar: &flagProcess, Help: "Process List", Tables: []table.TableDefinition{tableDefinitions[ProcessTableName]}}, + {FlagName: flagSensorName, FlagVar: &flagSensor, Help: "Sensor Status", Tables: []table.TableDefinition{tableDefinitions[SensorTableName]}}, + {FlagName: flagChassisStatusName, FlagVar: &flagChassisStatus, Help: "Chassis Status", Tables: []table.TableDefinition{tableDefinitions[ChassisStatusTableName]}}, + {FlagName: flagPmuName, FlagVar: &flagPmu, Help: "Performance Monitoring Unit Status", Tables: []table.TableDefinition{tableDefinitions[PMUTableName]}}, + {FlagName: flagSystemEventLogName, FlagVar: &flagSystemEventLog, Help: "System Event Log", Tables: []table.TableDefinition{tableDefinitions[SystemEventLogTableName]}}, + {FlagName: flagKernelLogName, FlagVar: &flagKernelLog, Help: "Kernel Log", Tables: []table.TableDefinition{tableDefinitions[KernelLogTableName]}}, } func init() { @@ -328,16 +329,16 @@ func validateFlags(cmd *cobra.Command, args []string) error { } func runCmd(cmd *cobra.Command, args []string) error { - tableNames := []string{} + tables := []table.TableDefinition{} // add category tables for _, cat := range categories { if *cat.FlagVar || flagAll { - tableNames = append(tableNames, cat.TableNames...) + tables = append(tables, cat.Tables...) } } // add benchmark tables - for _, benchmark := range flagBenchmark { - tableNames = append(tableNames, benchmarkTableNames[benchmark]...) + for _, benchmarkFlagValue := range flagBenchmark { + tables = append(tables, benchmarkTables[benchmarkFlagValue]...) } // include benchmark summary table if all benchmark options are selected var summaryFunc common.SummaryFunc @@ -352,29 +353,36 @@ func runCmd(cmd *cobra.Command, args []string) error { reportingCommand := common.ReportingCommand{ Cmd: cmd, ScriptParams: map[string]string{"StorageDir": flagStorageDir}, - TableNames: tableNames, + Tables: tables, SummaryFunc: summaryFunc, SummaryTableName: benchmarkSummaryTableName, - SummaryBeforeTableName: table.SpeedBenchmarkTableName, + SummaryBeforeTableName: SpeedBenchmarkTableName, InsightsFunc: insightsFunc, } + + report.RegisterHTMLRenderer(DIMMTableName, dimmTableHTMLRenderer) + report.RegisterHTMLRenderer(FrequencyBenchmarkTableName, frequencyBenchmarkTableHtmlRenderer) + report.RegisterHTMLRenderer(MemoryBenchmarkTableName, memoryBenchmarkTableHtmlRenderer) + + report.RegisterHTMLMultiTargetRenderer(MemoryBenchmarkTableName, memoryBenchmarkTableMultiTargetHtmlRenderer) + return reportingCommand.Run() } func benchmarkSummaryFromTableValues(allTableValues []table.TableValues, outputs map[string]script.ScriptOutput) table.TableValues { - maxFreq := getValueFromTableValues(getTableValues(allTableValues, table.FrequencyBenchmarkTableName), "SSE", 0) + maxFreq := getValueFromTableValues(getTableValues(allTableValues, FrequencyBenchmarkTableName), "SSE", 0) if maxFreq != "" { maxFreq = maxFreq + " GHz" } - allCoreMaxFreq := getValueFromTableValues(getTableValues(allTableValues, table.FrequencyBenchmarkTableName), "SSE", -1) + allCoreMaxFreq := getValueFromTableValues(getTableValues(allTableValues, FrequencyBenchmarkTableName), "SSE", -1) if allCoreMaxFreq != "" { allCoreMaxFreq = allCoreMaxFreq + " GHz" } // get the maximum memory bandwidth from the memory latency table - memLatTableValues := getTableValues(allTableValues, table.MemoryBenchmarkTableName) + memLatTableValues := getTableValues(allTableValues, MemoryBenchmarkTableName) var bandwidthValues []string if len(memLatTableValues.Fields) > 1 { - bandwidthValues = getTableValues(allTableValues, table.MemoryBenchmarkTableName).Fields[1].Values + bandwidthValues = getTableValues(allTableValues, MemoryBenchmarkTableName).Fields[1].Values } maxBandwidth := 0.0 for _, bandwidthValue := range bandwidthValues { @@ -392,7 +400,7 @@ func benchmarkSummaryFromTableValues(allTableValues []table.TableValues, outputs maxMemBW = fmt.Sprintf("%.1f GB/s", maxBandwidth) } // get the minimum memory latency - minLatency := getValueFromTableValues(getTableValues(allTableValues, table.MemoryBenchmarkTableName), "Latency (ns)", 0) + minLatency := getValueFromTableValues(getTableValues(allTableValues, MemoryBenchmarkTableName), "Latency (ns)", 0) if minLatency != "" { minLatency = minLatency + " ns" } @@ -408,18 +416,18 @@ func benchmarkSummaryFromTableValues(allTableValues []table.TableValues, outputs MenuLabel: benchmarkSummaryTableName, }, Fields: []table.Field{ - {Name: "CPU Speed", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.SpeedBenchmarkTableName), "Ops/s", 0) + " Ops/s"}}, + {Name: "CPU Speed", Values: []string{getValueFromTableValues(getTableValues(allTableValues, SpeedBenchmarkTableName), "Ops/s", 0) + " Ops/s"}}, {Name: "Single-core Maximum frequency", Values: []string{maxFreq}}, {Name: "All-core Maximum frequency", Values: []string{allCoreMaxFreq}}, - {Name: "Maximum Power", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.PowerBenchmarkTableName), "Maximum Power", 0)}}, - {Name: "Maximum Temperature", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.TemperatureBenchmarkTableName), "Maximum Temperature", 0)}}, - {Name: "Minimum Power", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.PowerBenchmarkTableName), "Minimum Power", 0)}}, + {Name: "Maximum Power", Values: []string{getValueFromTableValues(getTableValues(allTableValues, PowerBenchmarkTableName), "Maximum Power", 0)}}, + {Name: "Maximum Temperature", Values: []string{getValueFromTableValues(getTableValues(allTableValues, TemperatureBenchmarkTableName), "Maximum Temperature", 0)}}, + {Name: "Minimum Power", Values: []string{getValueFromTableValues(getTableValues(allTableValues, PowerBenchmarkTableName), "Minimum Power", 0)}}, {Name: "Memory Peak Bandwidth", Values: []string{maxMemBW}}, {Name: "Memory Minimum Latency", Values: []string{minLatency}}, - {Name: "Disk Read Bandwidth", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.StorageBenchmarkTableName), "Single-Thread Read Bandwidth", 0)}}, - {Name: "Disk Write Bandwidth", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.StorageBenchmarkTableName), "Single-Thread Write Bandwidth", 0)}}, - {Name: "Microarchitecture", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.SystemSummaryTableName), "Microarchitecture", 0)}}, - {Name: "Sockets", Values: []string{getValueFromTableValues(getTableValues(allTableValues, table.SystemSummaryTableName), "Sockets", 0)}}, + {Name: "Disk Read Bandwidth", Values: []string{getValueFromTableValues(getTableValues(allTableValues, StorageBenchmarkTableName), "Single-Thread Read Bandwidth", 0)}}, + {Name: "Disk Write Bandwidth", Values: []string{getValueFromTableValues(getTableValues(allTableValues, StorageBenchmarkTableName), "Single-Thread Write Bandwidth", 0)}}, + {Name: "Microarchitecture", Values: []string{getValueFromTableValues(getTableValues(allTableValues, SystemSummaryTableName), "Microarchitecture", 0)}}, + {Name: "Sockets", Values: []string{getValueFromTableValues(getTableValues(allTableValues, SystemSummaryTableName), "Sockets", 0)}}, }, } } diff --git a/internal/table/table_defs.go b/cmd/report/report_tables.go similarity index 50% rename from internal/table/table_defs.go rename to cmd/report/report_tables.go index 4361e80a..604c42e8 100644 --- a/internal/table/table_defs.go +++ b/cmd/report/report_tables.go @@ -1,4 +1,4 @@ -package table +package report // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause @@ -6,18 +6,20 @@ package table // table_defs.go defines the tables used for generating reports import ( - "encoding/csv" "fmt" + htmltemplate "html/template" "log/slog" - "regexp" - "slices" + "math" "sort" "strconv" "strings" - "time" + "perfspect/internal/common" "perfspect/internal/cpus" + "perfspect/internal/report" "perfspect/internal/script" + "perfspect/internal/table" + "perfspect/internal/util" ) const ( @@ -68,49 +70,9 @@ const ( MemoryBenchmarkTableName = "Memory Benchmark" NUMABenchmarkTableName = "NUMA Benchmark" StorageBenchmarkTableName = "Storage Benchmark" - // telemetry table names - CPUUtilizationTelemetryTableName = "CPU Utilization Telemetry" - UtilizationCategoriesTelemetryTableName = "Utilization Categories Telemetry" - IPCTelemetryTableName = "IPC Telemetry" - C6TelemetryTableName = "C6 Telemetry" - FrequencyTelemetryTableName = "Frequency Telemetry" - IRQRateTelemetryTableName = "IRQ Rate Telemetry" - InstructionTelemetryTableName = "Instruction Telemetry" - DriveTelemetryTableName = "Drive Telemetry" - NetworkTelemetryTableName = "Network Telemetry" - MemoryTelemetryTableName = "Memory Telemetry" - PowerTelemetryTableName = "Power Telemetry" - TemperatureTelemetryTableName = "Temperature Telemetry" - GaudiTelemetryTableName = "Gaudi Telemetry" - PDUTelemetryTableName = "PDU Telemetry" - // config table names - ConfigurationTableName = "Configuration" - // flamegraph table names - CallStackFrequencyTableName = "Call Stack Frequency" - // lock table names - KernelLockAnalysisTableName = "Kernel Lock Analysis" - // common table names - BriefSysSummaryTableName = "Brief System Summary" ) // menu labels -const ( - // telemetry table menu labels - CPUUtilizationTelemetryMenuLabel = "CPU Utilization" - UtilizationCategoriesTelemetryMenuLabel = "Utilization Categories" - IPCTelemetryMenuLabel = "IPC" - C6TelemetryMenuLabel = "C6" - FrequencyTelemetryMenuLabel = "Frequency" - IRQRateTelemetryMenuLabel = "IRQ Rate" - InstructionTelemetryMenuLabel = "Instruction" - DriveTelemetryMenuLabel = "Drive" - NetworkTelemetryMenuLabel = "Network" - MemoryTelemetryMenuLabel = "Memory" - PowerTelemetryMenuLabel = "Power" - TemperatureTelemetryMenuLabel = "Temperature" - GaudiTelemetryMenuLabel = "Gaudi" - PDUTelemetryMenuLabel = "PDU" -) const ( // menu labels @@ -128,7 +90,7 @@ const ( SystemSummaryMenuLabel = "System Summary" ) -var tableDefinitions = map[string]TableDefinition{ +var tableDefinitions = map[string]table.TableDefinition{ // // configuration tables // @@ -489,66 +451,6 @@ var tableDefinitions = map[string]TableDefinition{ script.CveScriptName, }, FieldsFunc: systemSummaryTableValues}, - BriefSysSummaryTableName: { - Name: BriefSysSummaryTableName, - MenuLabel: BriefSysSummaryTableName, - HasRows: false, - ScriptNames: []string{ - script.HostnameScriptName, - script.DateScriptName, - script.LscpuScriptName, - script.LscpuCacheScriptName, - script.LspciBitsScriptName, - script.LspciDevicesScriptName, - script.MaximumFrequencyScriptName, - script.SpecCoreFrequenciesScriptName, - script.MeminfoScriptName, - script.NicInfoScriptName, - script.DiskInfoScriptName, - script.UnameScriptName, - script.EtcReleaseScriptName, - script.PackagePowerLimitName, - script.EpbScriptName, - script.ScalingDriverScriptName, - script.ScalingGovernorScriptName, - script.CstatesScriptName, - script.ElcScriptName, - }, - FieldsFunc: briefSummaryTableValues}, - // - // configuration set table - // - ConfigurationTableName: { - Name: ConfigurationTableName, - Vendors: []string{cpus.IntelVendor}, - HasRows: false, - ScriptNames: []string{ - script.LscpuScriptName, - script.LscpuCacheScriptName, - script.LspciBitsScriptName, - script.LspciDevicesScriptName, - script.L3CacheWayEnabledName, - script.PackagePowerLimitName, - script.EpbScriptName, - script.EppScriptName, - script.EppValidScriptName, - script.EppPackageControlScriptName, - script.EppPackageScriptName, - script.ScalingGovernorScriptName, - script.UncoreMaxFromMSRScriptName, - script.UncoreMinFromMSRScriptName, - script.UncoreMaxFromTPMIScriptName, - script.UncoreMinFromTPMIScriptName, - script.UncoreDieTypesFromTPMIScriptName, - script.SpecCoreFrequenciesScriptName, - script.ElcScriptName, - script.PrefetchControlName, - script.PrefetchersName, - script.PrefetchersAtomName, - script.CstatesScriptName, - script.C1DemotionScriptName, - }, - FieldsFunc: configurationTableValues}, // // benchmarking tables // @@ -620,149 +522,6 @@ var tableDefinitions = map[string]TableDefinition{ script.StorageBenchmarkScriptName, }, FieldsFunc: storageBenchmarkTableValues}, - // - // telemetry tables - // - CPUUtilizationTelemetryTableName: { - Name: CPUUtilizationTelemetryTableName, - MenuLabel: CPUUtilizationTelemetryMenuLabel, - HasRows: true, - ScriptNames: []string{ - script.MpstatTelemetryScriptName, - }, - FieldsFunc: cpuUtilizationTelemetryTableValues}, - UtilizationCategoriesTelemetryTableName: { - Name: UtilizationCategoriesTelemetryTableName, - MenuLabel: UtilizationCategoriesTelemetryMenuLabel, - HasRows: true, - ScriptNames: []string{ - script.MpstatTelemetryScriptName, - }, - FieldsFunc: utilizationCategoriesTelemetryTableValues}, - IPCTelemetryTableName: { - Name: IPCTelemetryTableName, - MenuLabel: IPCTelemetryMenuLabel, - Architectures: []string{cpus.X86Architecture}, - HasRows: true, - ScriptNames: []string{ - script.TurbostatTelemetryScriptName, - }, - FieldsFunc: ipcTelemetryTableValues}, - C6TelemetryTableName: { - Name: C6TelemetryTableName, - MenuLabel: C6TelemetryMenuLabel, - Architectures: []string{cpus.X86Architecture}, - HasRows: true, - ScriptNames: []string{ - script.TurbostatTelemetryScriptName, - }, - FieldsFunc: c6TelemetryTableValues}, - FrequencyTelemetryTableName: { - Name: FrequencyTelemetryTableName, - MenuLabel: FrequencyTelemetryMenuLabel, - Architectures: []string{cpus.X86Architecture}, - HasRows: true, - ScriptNames: []string{ - script.TurbostatTelemetryScriptName, - }, - FieldsFunc: frequencyTelemetryTableValues}, - IRQRateTelemetryTableName: { - Name: IRQRateTelemetryTableName, - MenuLabel: IRQRateTelemetryMenuLabel, - HasRows: true, - ScriptNames: []string{ - script.MpstatTelemetryScriptName, - }, - FieldsFunc: irqRateTelemetryTableValues}, - DriveTelemetryTableName: { - Name: DriveTelemetryTableName, - MenuLabel: DriveTelemetryMenuLabel, - HasRows: true, - ScriptNames: []string{ - script.IostatTelemetryScriptName, - }, - FieldsFunc: driveTelemetryTableValues}, - NetworkTelemetryTableName: { - Name: NetworkTelemetryTableName, - MenuLabel: NetworkTelemetryMenuLabel, - HasRows: true, - ScriptNames: []string{ - script.NetworkTelemetryScriptName, - }, - FieldsFunc: networkTelemetryTableValues}, - MemoryTelemetryTableName: { - Name: MemoryTelemetryTableName, - MenuLabel: MemoryTelemetryMenuLabel, - HasRows: true, - ScriptNames: []string{ - script.MemoryTelemetryScriptName, - }, - FieldsFunc: memoryTelemetryTableValues}, - PowerTelemetryTableName: { - Name: PowerTelemetryTableName, - MenuLabel: PowerTelemetryMenuLabel, - Architectures: []string{cpus.X86Architecture}, - HasRows: true, - ScriptNames: []string{ - script.TurbostatTelemetryScriptName, - }, - FieldsFunc: powerTelemetryTableValues}, - TemperatureTelemetryTableName: { - Name: TemperatureTelemetryTableName, - MenuLabel: TemperatureTelemetryMenuLabel, - Architectures: []string{cpus.X86Architecture}, - HasRows: true, - ScriptNames: []string{ - script.TurbostatTelemetryScriptName, - }, - FieldsFunc: temperatureTelemetryTableValues}, - InstructionTelemetryTableName: { - Name: InstructionTelemetryTableName, - MenuLabel: InstructionTelemetryMenuLabel, - Architectures: []string{cpus.X86Architecture}, - HasRows: true, - ScriptNames: []string{ - script.InstructionTelemetryScriptName, - }, - FieldsFunc: instructionTelemetryTableValues}, - GaudiTelemetryTableName: { - Name: GaudiTelemetryTableName, - MenuLabel: GaudiTelemetryMenuLabel, - Architectures: []string{cpus.X86Architecture}, - HasRows: true, - ScriptNames: []string{ - script.GaudiTelemetryScriptName, - }, - NoDataFound: "No Gaudi telemetry found. Gaudi devices and the hl-smi tool must be installed on the target system to collect Gaudi stats.", - FieldsFunc: gaudiTelemetryTableValues}, - PDUTelemetryTableName: { - Name: PDUTelemetryTableName, - MenuLabel: PDUTelemetryMenuLabel, - HasRows: true, - ScriptNames: []string{ - script.PDUTelemetryScriptName, - }, - FieldsFunc: pduTelemetryTableValues}, - // - // flamegraph tables - // - CallStackFrequencyTableName: { - Name: CallStackFrequencyTableName, - MenuLabel: CallStackFrequencyTableName, - ScriptNames: []string{ - script.CollapsedCallStacksScriptName, - }, - FieldsFunc: callStackFrequencyTableValues}, - // - // kernel lock analysis tables - // - KernelLockAnalysisTableName: { - Name: KernelLockAnalysisTableName, - MenuLabel: KernelLockAnalysisTableName, - ScriptNames: []string{ - script.ProfileKernelLockScriptName, - }, - FieldsFunc: kernelLockAnalysisTableValues}, } // @@ -776,19 +535,19 @@ var tableDefinitions = map[string]TableDefinition{ // depending on the system configuration. If no data is found for a table with rows, // the FieldsFunc should return an empty slice of fields. -func hostTableValues(outputs map[string]script.ScriptOutput) []Field { +func hostTableValues(outputs map[string]script.ScriptOutput) []table.Field { hostName := strings.TrimSpace(outputs[script.HostnameScriptName].Stdout) time := strings.TrimSpace(outputs[script.DateScriptName].Stdout) - system := valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + - " " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + - ", " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Version:\s*(.+?)$`) - baseboard := valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Manufacturer:\s*(.+?)$`) + - " " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Product Name:\s*(.+?)$`) + - ", " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Version:\s*(.+?)$`) - chassis := valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Manufacturer:\s*(.+?)$`) + - " " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Type:\s*(.+?)$`) + - ", " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Version:\s*(.+?)$`) - return []Field{ + system := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + + " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + + ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Version:\s*(.+?)$`) + baseboard := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Manufacturer:\s*(.+?)$`) + + " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Product Name:\s*(.+?)$`) + + ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Version:\s*(.+?)$`) + chassis := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Manufacturer:\s*(.+?)$`) + + " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Type:\s*(.+?)$`) + + ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Version:\s*(.+?)$`) + return []table.Field{ {Name: "Host Name", Values: []string{hostName}}, {Name: "Time", Values: []string{time}}, {Name: "System", Values: []string{system}}, @@ -797,8 +556,8 @@ func hostTableValues(outputs map[string]script.ScriptOutput) []Field { } } -func pcieSlotsTableValues(outputs map[string]script.ScriptOutput) []Field { - fieldValues := valsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "9", +func pcieSlotsTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fieldValues := common.ValsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "9", []string{ `^Designation:\s*(.+?)$`, `^Type:\s*(.+?)$`, @@ -808,9 +567,9 @@ func pcieSlotsTableValues(outputs map[string]script.ScriptOutput) []Field { }..., ) if len(fieldValues) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Designation"}, {Name: "Type"}, {Name: "Length"}, @@ -825,13 +584,13 @@ func pcieSlotsTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func biosTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ +func biosTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ {Name: "Vendor"}, {Name: "Version"}, {Name: "Release Date"}, } - fieldValues := valsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", + fieldValues := common.ValsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", []string{ `^Vendor:\s*(.+?)$`, `^Version:\s*(.+?)$`, @@ -850,78 +609,78 @@ func biosTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func operatingSystemTableValues(outputs map[string]script.ScriptOutput) []Field { - return []Field{ - {Name: "OS", Values: []string{operatingSystemFromOutput(outputs)}}, - {Name: "Kernel", Values: []string{valFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, +func operatingSystemTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return []table.Field{ + {Name: "OS", Values: []string{common.OperatingSystemFromOutput(outputs)}}, + {Name: "Kernel", Values: []string{common.ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, {Name: "Boot Parameters", Values: []string{strings.TrimSpace(outputs[script.ProcCmdlineScriptName].Stdout)}}, - {Name: "Microcode", Values: []string{valFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`)}}, + {Name: "Microcode", Values: []string{common.ValFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`)}}, } } -func softwareVersionTableValues(outputs map[string]script.ScriptOutput) []Field { - return []Field{ - {Name: "GCC", Values: []string{valFromRegexSubmatch(outputs[script.GccVersionScriptName].Stdout, `^(gcc .*)$`)}}, - {Name: "GLIBC", Values: []string{valFromRegexSubmatch(outputs[script.GlibcVersionScriptName].Stdout, `^(ldd .*)`)}}, - {Name: "Binutils", Values: []string{valFromRegexSubmatch(outputs[script.BinutilsVersionScriptName].Stdout, `^(GNU ld .*)$`)}}, - {Name: "Python", Values: []string{valFromRegexSubmatch(outputs[script.PythonVersionScriptName].Stdout, `^(Python .*)$`)}}, - {Name: "Python3", Values: []string{valFromRegexSubmatch(outputs[script.Python3VersionScriptName].Stdout, `^(Python 3.*)$`)}}, - {Name: "Java", Values: []string{valFromRegexSubmatch(outputs[script.JavaVersionScriptName].Stdout, `^(openjdk .*)$`)}}, - {Name: "OpenSSL", Values: []string{valFromRegexSubmatch(outputs[script.OpensslVersionScriptName].Stdout, `^(OpenSSL .*)$`)}}, +func softwareVersionTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return []table.Field{ + {Name: "GCC", Values: []string{common.ValFromRegexSubmatch(outputs[script.GccVersionScriptName].Stdout, `^(gcc .*)$`)}}, + {Name: "GLIBC", Values: []string{common.ValFromRegexSubmatch(outputs[script.GlibcVersionScriptName].Stdout, `^(ldd .*)`)}}, + {Name: "Binutils", Values: []string{common.ValFromRegexSubmatch(outputs[script.BinutilsVersionScriptName].Stdout, `^(GNU ld .*)$`)}}, + {Name: "Python", Values: []string{common.ValFromRegexSubmatch(outputs[script.PythonVersionScriptName].Stdout, `^(Python .*)$`)}}, + {Name: "Python3", Values: []string{common.ValFromRegexSubmatch(outputs[script.Python3VersionScriptName].Stdout, `^(Python 3.*)$`)}}, + {Name: "Java", Values: []string{common.ValFromRegexSubmatch(outputs[script.JavaVersionScriptName].Stdout, `^(openjdk .*)$`)}}, + {Name: "OpenSSL", Values: []string{common.ValFromRegexSubmatch(outputs[script.OpensslVersionScriptName].Stdout, `^(OpenSSL .*)$`)}}, } } -func cpuTableValues(outputs map[string]script.ScriptOutput) []Field { +func cpuTableValues(outputs map[string]script.ScriptOutput) []table.Field { var l1d, l1i, l2 string - lscpuCache, err := parseLscpuCacheOutput(outputs[script.LscpuCacheScriptName].Stdout) + lscpuCache, err := common.ParseLscpuCacheOutput(outputs[script.LscpuCacheScriptName].Stdout) if err != nil { slog.Warn("failed to parse lscpu cache output", "error", err) } else { if _, ok := lscpuCache["L1d"]; ok { - l1d = l1l2CacheSizeFromLscpuCache(lscpuCache["L1d"]) + l1d = common.L1l2CacheSizeFromLscpuCache(lscpuCache["L1d"]) } if _, ok := lscpuCache["L1i"]; ok { - l1i = l1l2CacheSizeFromLscpuCache(lscpuCache["L1i"]) + l1i = common.L1l2CacheSizeFromLscpuCache(lscpuCache["L1i"]) } if _, ok := lscpuCache["L2"]; ok { - l2 = l1l2CacheSizeFromLscpuCache(lscpuCache["L2"]) - } - } - return []Field{ - {Name: "CPU Model", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, - {Name: "Architecture", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Architecture:\s*(.+)$`)}}, - {Name: "Microarchitecture", Values: []string{UarchFromOutput(outputs)}}, - {Name: "Family", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`)}}, - {Name: "Model", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`)}}, - {Name: "Stepping", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`)}}, - {Name: "Base Frequency", Values: []string{baseFrequencyFromOutput(outputs)}, Description: "The minimum guaranteed speed of a single core under standard conditions."}, - {Name: "Maximum Frequency", Values: []string{maxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, - {Name: "All-core Maximum Frequency", Values: []string{allCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, - {Name: "CPUs", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, - {Name: "On-line CPU List", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^On-line CPU\(s\) list:\s*(.+)$`)}}, - {Name: "Hyperthreading", Values: []string{hyperthreadingFromOutput(outputs)}}, - {Name: "Cores per Socket", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, - {Name: "Sockets", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, - {Name: "NUMA Nodes", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, + l2 = common.L1l2CacheSizeFromLscpuCache(lscpuCache["L2"]) + } + } + return []table.Field{ + {Name: "CPU Model", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, + {Name: "Architecture", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Architecture:\s*(.+)$`)}}, + {Name: "Microarchitecture", Values: []string{common.UarchFromOutput(outputs)}}, + {Name: "Family", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`)}}, + {Name: "Model", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`)}}, + {Name: "Stepping", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`)}}, + {Name: "Base Frequency", Values: []string{common.BaseFrequencyFromOutput(outputs)}, Description: "The minimum guaranteed speed of a single core under standard conditions."}, + {Name: "Maximum Frequency", Values: []string{common.MaxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, + {Name: "All-core Maximum Frequency", Values: []string{common.AllCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, + {Name: "CPUs", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, + {Name: "On-line CPU List", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^On-line CPU\(s\) list:\s*(.+)$`)}}, + {Name: "Hyperthreading", Values: []string{common.HyperthreadingFromOutput(outputs)}}, + {Name: "Cores per Socket", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, + {Name: "Sockets", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, + {Name: "NUMA Nodes", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, {Name: "NUMA CPU List", Values: []string{numaCPUListFromOutput(outputs)}}, {Name: "L1d Cache", Values: []string{l1d}, Description: "The size of the L1 data cache for one core."}, {Name: "L1i Cache", Values: []string{l1i}, Description: "The size of the L1 instruction cache for one core."}, {Name: "L2 Cache", Values: []string{l2}, Description: "The size of the L2 cache for one core."}, - {Name: "L3 Cache (instance/total)", Values: []string{l3FromOutput(outputs)}, Description: "The size of one L3 cache instance and the total L3 cache size for the system."}, - {Name: "L3 per Core", Values: []string{l3PerCoreFromOutput(outputs)}, Description: "The L3 cache size per core."}, + {Name: "L3 Cache (instance/total)", Values: []string{common.L3FromOutput(outputs)}, Description: "The size of one L3 cache instance and the total L3 cache size for the system."}, + {Name: "L3 per Core", Values: []string{common.L3PerCoreFromOutput(outputs)}, Description: "The L3 cache size per core."}, {Name: "Memory Channels", Values: []string{channelsFromOutput(outputs)}}, {Name: "Intel Turbo Boost", Values: []string{turboEnabledFromOutput(outputs)}}, - {Name: "Virtualization", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Virtualization:\s*(.+)$`)}}, + {Name: "Virtualization", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Virtualization:\s*(.+)$`)}}, {Name: "PPINs", Values: []string{ppinsFromOutput(outputs)}}, } } -func prefetcherTableValues(outputs map[string]script.ScriptOutput) []Field { - prefetchers := prefetchersFromOutput(outputs) +func prefetcherTableValues(outputs map[string]script.ScriptOutput) []table.Field { + prefetchers := common.PrefetchersFromOutput(outputs) if len(prefetchers) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Prefetcher"}, {Name: "Description"}, {Name: "MSR"}, @@ -938,16 +697,16 @@ func prefetcherTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func cpuTableInsights(outputs map[string]script.ScriptOutput, tableValues TableValues) []Insight { - insights := []Insight{} +func cpuTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { + insights := []table.Insight{} addInsightFunc := func(fieldName, bestValue string) { - fieldIndex, err := GetFieldIndex(fieldName, tableValues) + fieldIndex, err := table.GetFieldIndex(fieldName, tableValues) if err != nil { slog.Warn(err.Error()) } else { fieldValue := tableValues.Fields[fieldIndex].Values[0] if fieldValue != "" && fieldValue != "N/A" && fieldValue != bestValue { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: fmt.Sprintf("Consider enabling %s.", fieldName), Justification: fmt.Sprintf("%s is not enabled.", fieldName), }) @@ -957,13 +716,13 @@ func cpuTableInsights(outputs map[string]script.ScriptOutput, tableValues TableV addInsightFunc("Hyperthreading", "Enabled") addInsightFunc("Intel Turbo Boost", "Enabled") // Xeon Generation - familyIndex, err := GetFieldIndex("Family", tableValues) + familyIndex, err := table.GetFieldIndex("Family", tableValues) if err != nil { slog.Warn(err.Error()) } else { family := tableValues.Fields[familyIndex].Values[0] if cpus.IsIntelCPUFamilyStr(family) { // Intel - uarchIndex, err := GetFieldIndex("Microarchitecture", tableValues) + uarchIndex, err := table.GetFieldIndex("Microarchitecture", tableValues) if err != nil { slog.Warn(err.Error()) } else { @@ -985,7 +744,7 @@ func cpuTableInsights(outputs map[string]script.ScriptOutput, tableValues TableV xeonGen, ok := xeonGens[uarch[:3]] if ok { if xeonGen < xeonGens["SPR"] { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider upgrading to the latest generation Intel(r) Xeon(r) CPU.", Justification: "The CPU is 2 or more generations behind the latest Intel(r) Xeon(r) CPU.", }) @@ -994,7 +753,7 @@ func cpuTableInsights(outputs map[string]script.ScriptOutput, tableValues TableV } } } else { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider upgrading to an Intel(r) Xeon(r) CPU.", Justification: "The current CPU is not an Intel(r) Xeon(r) CPU.", }) @@ -1003,11 +762,11 @@ func cpuTableInsights(outputs map[string]script.ScriptOutput, tableValues TableV return insights } -func isaTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{} +func isaTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{} supported := isaSupportedFromOutput(outputs) for i, isa := range isaFullNames() { - fields = append(fields, Field{ + fields = append(fields, table.Field{ Name: isa, Values: []string{supported[i]}, }) @@ -1015,12 +774,12 @@ func isaTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func acceleratorTableValues(outputs map[string]script.ScriptOutput) []Field { +func acceleratorTableValues(outputs map[string]script.ScriptOutput) []table.Field { names := acceleratorNames() if len(names) == 0 { - return []Field{} + return []table.Field{} } - return []Field{ + return []table.Field{ {Name: "Name", Values: names}, {Name: "Count", Values: acceleratorCountsFromOutput(outputs)}, {Name: "Work Queues", Values: acceleratorWorkQueuesFromOutput(outputs)}, @@ -1029,19 +788,19 @@ func acceleratorTableValues(outputs map[string]script.ScriptOutput) []Field { } } -func acceleratorTableInsights(outputs map[string]script.ScriptOutput, tableValues TableValues) []Insight { - insights := []Insight{} - nameFieldIndex, err := GetFieldIndex("Name", tableValues) +func acceleratorTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { + insights := []table.Insight{} + nameFieldIndex, err := table.GetFieldIndex("Name", tableValues) if err != nil { slog.Warn(err.Error()) return insights } - countFieldIndex, err := GetFieldIndex("Count", tableValues) + countFieldIndex, err := table.GetFieldIndex("Count", tableValues) if err != nil { slog.Warn(err.Error()) return insights } - queuesFieldIndex, err := GetFieldIndex("Work Queues", tableValues) + queuesFieldIndex, err := table.GetFieldIndex("Work Queues", tableValues) if err != nil { slog.Warn(err.Error()) return insights @@ -1050,13 +809,13 @@ func acceleratorTableInsights(outputs map[string]script.ScriptOutput, tableValue name := tableValues.Fields[nameFieldIndex].Values[i] queues := tableValues.Fields[queuesFieldIndex].Values[i] if name == "DSA" && count != "0" && queues == "None" { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider configuring DSA to allow accelerated data copy and transformation in DSA-enabled software.", Justification: "No work queues are configured for DSA accelerator(s).", }) } if name == "IAA" && count != "0" && queues == "None" { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider configuring IAA to allow accelerated compression and decompression in IAA-enabled software.", Justification: "No work queues are configured for IAA accelerator(s).", }) @@ -1065,26 +824,26 @@ func acceleratorTableInsights(outputs map[string]script.ScriptOutput, tableValue return insights } -func powerTableValues(outputs map[string]script.ScriptOutput) []Field { - return []Field{ - {Name: "TDP", Values: []string{tdpFromOutput(outputs)}}, - {Name: "Energy Performance Bias", Values: []string{epbFromOutput(outputs)}}, - {Name: "Energy Performance Preference", Values: []string{eppFromOutput(outputs)}}, +func powerTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return []table.Field{ + {Name: "TDP", Values: []string{common.TDPFromOutput(outputs)}}, + {Name: "Energy Performance Bias", Values: []string{common.EPBFromOutput(outputs)}}, + {Name: "Energy Performance Preference", Values: []string{common.EPPFromOutput(outputs)}}, {Name: "Scaling Governor", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, {Name: "Scaling Driver", Values: []string{strings.TrimSpace(outputs[script.ScalingDriverScriptName].Stdout)}}, } } -func powerTableInsights(outputs map[string]script.ScriptOutput, tableValues TableValues) []Insight { - insights := []Insight{} +func powerTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { + insights := []table.Insight{} addInsightFunc := func(fieldName, bestValue string) { - fieldIndex, err := GetFieldIndex(fieldName, tableValues) + fieldIndex, err := table.GetFieldIndex(fieldName, tableValues) if err != nil { slog.Warn(err.Error()) } else { fieldValue := tableValues.Fields[fieldIndex].Values[0] if fieldValue != "" && fieldValue != bestValue { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: fmt.Sprintf("Consider setting %s to '%s'.", fieldName, bestValue), Justification: fmt.Sprintf("%s is set to '%s'", fieldName, fieldValue), }) @@ -1098,12 +857,12 @@ func powerTableInsights(outputs map[string]script.ScriptOutput, tableValues Tabl return insights } -func cstateTableValues(outputs map[string]script.ScriptOutput) []Field { - cstates := cstatesFromOutput(outputs) +func cstateTableValues(outputs map[string]script.ScriptOutput) []table.Field { + cstates := common.CstatesFromOutput(outputs) if len(cstates) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Name"}, {Name: "Status"}, // enabled/disabled } @@ -1114,38 +873,38 @@ func cstateTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func uncoreTableValues(outputs map[string]script.ScriptOutput) []Field { - uarch := UarchFromOutput(outputs) +func uncoreTableValues(outputs map[string]script.ScriptOutput) []table.Field { + uarch := common.UarchFromOutput(outputs) if uarch == "" { slog.Error("failed to get uarch from script outputs") - return []Field{} + return []table.Field{} } if strings.Contains(uarch, "SRF") || strings.Contains(uarch, "GNR") || strings.Contains(uarch, "CWF") { - return []Field{ - {Name: "Min Frequency (Compute)", Values: []string{uncoreMinMaxDieFrequencyFromOutput(false, true, outputs)}}, - {Name: "Min Frequency (I/O)", Values: []string{uncoreMinMaxDieFrequencyFromOutput(false, false, outputs)}}, - {Name: "Max Frequency (Compute)", Values: []string{uncoreMinMaxDieFrequencyFromOutput(true, true, outputs)}}, - {Name: "Max Frequency (I/O)", Values: []string{uncoreMinMaxDieFrequencyFromOutput(true, false, outputs)}}, + return []table.Field{ + {Name: "Min Frequency (Compute)", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(false, true, outputs)}}, + {Name: "Min Frequency (I/O)", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(false, false, outputs)}}, + {Name: "Max Frequency (Compute)", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(true, true, outputs)}}, + {Name: "Max Frequency (I/O)", Values: []string{common.UncoreMinMaxDieFrequencyFromOutput(true, false, outputs)}}, {Name: "CHA Count", Values: []string{chaCountFromOutput(outputs)}}, } } else { // field counts need to match for the all_hosts reports to work properly - return []Field{ - {Name: "Min Frequency", Values: []string{uncoreMinFrequencyFromOutput(outputs)}}, + return []table.Field{ + {Name: "Min Frequency", Values: []string{common.UncoreMinFrequencyFromOutput(outputs)}}, {Name: "N/A", Values: []string{""}}, - {Name: "Max Frequency", Values: []string{uncoreMaxFrequencyFromOutput(outputs)}}, + {Name: "Max Frequency", Values: []string{common.UncoreMaxFrequencyFromOutput(outputs)}}, {Name: "N/A", Values: []string{""}}, {Name: "CHA Count", Values: []string{chaCountFromOutput(outputs)}}, } } } -func elcTableValues(outputs map[string]script.ScriptOutput) []Field { - return elcFieldValuesFromOutput(outputs) +func elcTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return common.ELCFieldValuesFromOutput(outputs) } -func elcTableInsights(outputs map[string]script.ScriptOutput, tableValues TableValues) []Insight { - insights := []Insight{} - modeFieldIndex, err := GetFieldIndex("Mode", tableValues) +func elcTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { + insights := []table.Insight{} + modeFieldIndex, err := table.GetFieldIndex("Mode", tableValues) if err != nil { slog.Warn(err.Error()) } else { @@ -1153,7 +912,7 @@ func elcTableInsights(outputs map[string]script.ScriptOutput, tableValues TableV firstMode := tableValues.Fields[modeFieldIndex].Values[0] for _, mode := range tableValues.Fields[modeFieldIndex].Values[1:] { if mode != firstMode { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider setting Efficiency Latency Control mode consistently across all dies.", Justification: "ELC mode is not set consistently across all dies.", }) @@ -1163,7 +922,7 @@ func elcTableInsights(outputs map[string]script.ScriptOutput, tableValues TableV // suggest setting ELC mode to 'Latency Optimized' or 'Default' based on the current setting for _, mode := range tableValues.Fields[modeFieldIndex].Values { if mode != "" && mode != "Latency Optimized" { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider setting Efficiency Latency Control mode to 'Latency Optimized' when workload is highly sensitive to memory latency.", Justification: fmt.Sprintf("ELC mode is set to '%s' on at least one die.", mode), }) @@ -1172,7 +931,7 @@ func elcTableInsights(outputs map[string]script.ScriptOutput, tableValues TableV } for _, mode := range tableValues.Fields[modeFieldIndex].Values { if mode != "" && mode != "Default" { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider setting Efficiency Latency Control mode to 'Default' to balance uncore performance and power utilization.", Justification: fmt.Sprintf("ELC mode is set to '%s' on at least one die.", mode), }) @@ -1180,9 +939,9 @@ func elcTableInsights(outputs map[string]script.ScriptOutput, tableValues TableV } } // if epb is not set to 'Performance (0)' and ELC mode is set to 'Latency Optimized', suggest setting epb to 'Performance (0)' - epb := epbFromOutput(outputs) + epb := common.EPBFromOutput(outputs) if epb != "" && epb != "Performance (0)" && firstMode == "Latency Optimized" { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider setting Energy Performance Bias to 'Performance (0)' to allow Latency Optimized mode to operate as designed.", Justification: fmt.Sprintf("Energy Performance Bias is set to '%s' and ELC Mode is set to '%s'.", epb, firstMode), }) @@ -1191,18 +950,18 @@ func elcTableInsights(outputs map[string]script.ScriptOutput, tableValues TableV return insights } -func maximumFrequencyTableValues(outputs map[string]script.ScriptOutput) []Field { - frequencyBuckets, err := getSpecFrequencyBuckets(outputs) +func maximumFrequencyTableValues(outputs map[string]script.ScriptOutput) []table.Field { + frequencyBuckets, err := common.GetSpecFrequencyBuckets(outputs) if err != nil { slog.Warn("unable to get spec core frequencies", slog.String("error", err.Error())) - return []Field{} + return []table.Field{} } - var fields []Field + var fields []table.Field for i, row := range frequencyBuckets { // first row is field names if i == 0 { for _, fieldName := range row { - fields = append(fields, Field{Name: fieldName}) + fields = append(fields, table.Field{Name: fieldName}) } continue } @@ -1214,17 +973,17 @@ func maximumFrequencyTableValues(outputs map[string]script.ScriptOutput) []Field return fields } -func sstTFHPTableValues(outputs map[string]script.ScriptOutput) []Field { +func sstTFHPTableValues(outputs map[string]script.ScriptOutput) []table.Field { output := outputs[script.SSTTFHPScriptName].Stdout if len(output) == 0 { - return []Field{} + return []table.Field{} } lines := strings.Split(output, "\n") if len(lines) >= 1 && (strings.Contains(lines[0], "not supported") || strings.Contains(lines[0], "not enabled")) { - return []Field{} + return []table.Field{} } // lines should contain CSV formatted data - fields := []Field{} + fields := []table.Field{} for i, line := range lines { // field names are in the header if i == 0 { @@ -1233,7 +992,7 @@ func sstTFHPTableValues(outputs map[string]script.ScriptOutput) []Field { if j > 1 { fieldName = fieldName + " (MHz)" } - fields = append(fields, Field{Name: fieldName}) + fields = append(fields, table.Field{Name: fieldName}) } continue } @@ -1250,7 +1009,7 @@ func sstTFHPTableValues(outputs map[string]script.ScriptOutput) []Field { // confirm value is a number if _, err := strconv.Atoi(value); err != nil { slog.Warn("unexpected non-numeric value in line", slog.String("line", line), slog.String("value", value)) - return []Field{} + return []table.Field{} } if j > 1 { value = value + "00" @@ -1261,22 +1020,22 @@ func sstTFHPTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func sstTFLPTableValues(outputs map[string]script.ScriptOutput) []Field { +func sstTFLPTableValues(outputs map[string]script.ScriptOutput) []table.Field { output := outputs[script.SSTTFLPScriptName].Stdout if len(output) == 0 { - return []Field{} + return []table.Field{} } lines := strings.Split(output, "\n") if len(lines) >= 1 && (strings.Contains(lines[0], "not supported") || strings.Contains(lines[0], "not enabled")) { - return []Field{} + return []table.Field{} } // lines should contain CSV formatted data - fields := []Field{} + fields := []table.Field{} for i, line := range lines { // field names are in the header if i == 0 { for fieldName := range strings.SplitSeq(line, ",") { - fields = append(fields, Field{Name: fieldName + " (MHz)"}) + fields = append(fields, table.Field{Name: fieldName + " (MHz)"}) } continue } @@ -1293,7 +1052,7 @@ func sstTFLPTableValues(outputs map[string]script.ScriptOutput) []Field { // confirm value is a number if _, err := strconv.Atoi(value); err != nil { slog.Warn("unexpected non-numeric value in line", slog.String("line", line), slog.String("value", value)) - return []Field{} + return []table.Field{} } fields[j].Values = append(fields[j].Values, value+"00") } @@ -1301,17 +1060,17 @@ func sstTFLPTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func memoryTableValues(outputs map[string]script.ScriptOutput) []Field { - return []Field{ +func memoryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return []table.Field{ {Name: "Installed Memory", Values: []string{installedMemoryFromOutput(outputs)}}, - {Name: "MemTotal", Values: []string{valFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemTotal:\s*(.+?)$`)}}, - {Name: "MemFree", Values: []string{valFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemFree:\s*(.+?)$`)}}, - {Name: "MemAvailable", Values: []string{valFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemAvailable:\s*(.+?)$`)}}, - {Name: "Buffers", Values: []string{valFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Buffers:\s*(.+?)$`)}}, - {Name: "Cached", Values: []string{valFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Cached:\s*(.+?)$`)}}, - {Name: "HugePages_Total", Values: []string{valFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^HugePages_Total:\s*(.+?)$`)}}, - {Name: "Hugepagesize", Values: []string{valFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Hugepagesize:\s*(.+?)$`)}}, - {Name: "Transparent Huge Pages", Values: []string{valFromRegexSubmatch(outputs[script.TransparentHugePagesScriptName].Stdout, `.*\[(.*)\].*`)}}, + {Name: "MemTotal", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemTotal:\s*(.+?)$`)}}, + {Name: "MemFree", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemFree:\s*(.+?)$`)}}, + {Name: "MemAvailable", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemAvailable:\s*(.+?)$`)}}, + {Name: "Buffers", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Buffers:\s*(.+?)$`)}}, + {Name: "Cached", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Cached:\s*(.+?)$`)}}, + {Name: "HugePages_Total", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^HugePages_Total:\s*(.+?)$`)}}, + {Name: "Hugepagesize", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Hugepagesize:\s*(.+?)$`)}}, + {Name: "Transparent Huge Pages", Values: []string{common.ValFromRegexSubmatch(outputs[script.TransparentHugePagesScriptName].Stdout, `.*\[(.*)\].*`)}}, {Name: "Automatic NUMA Balancing", Values: []string{numaBalancingFromOutput(outputs)}}, {Name: "Populated Memory Channels", Values: []string{populatedChannelsFromOutput(outputs)}}, {Name: "Total Memory Encryption (TME)", Values: []string{strings.TrimSpace(outputs[script.TmeScriptName].Stdout)}}, @@ -1319,29 +1078,29 @@ func memoryTableValues(outputs map[string]script.ScriptOutput) []Field { } } -func memoryTableInsights(outputs map[string]script.ScriptOutput, tableValues TableValues) []Insight { - insights := []Insight{} +func memoryTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { + insights := []table.Insight{} // check if memory is not fully populated - populatedChannelsIndex, err := GetFieldIndex("Populated Memory Channels", tableValues) + populatedChannelsIndex, err := table.GetFieldIndex("Populated Memory Channels", tableValues) if err != nil { slog.Warn(err.Error()) } else { populatedChannels := tableValues.Fields[populatedChannelsIndex].Values[0] if populatedChannels != "" { - uarch := UarchFromOutput(outputs) + uarch := common.UarchFromOutput(outputs) if uarch != "" { cpu, err := cpus.GetCPUByMicroArchitecture(uarch) if err != nil { slog.Warn(err.Error()) } else { - sockets := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) + sockets := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) socketCount, err := strconv.Atoi(sockets) if err != nil { slog.Warn(err.Error()) } else { totalMemoryChannels := socketCount * cpu.MemoryChannelCount if populatedChannels != strconv.Itoa(totalMemoryChannels) { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: fmt.Sprintf("Consider populating all (%d) memory channels.", totalMemoryChannels), Justification: fmt.Sprintf("%s memory channels are populated.", populatedChannels), }) @@ -1352,19 +1111,19 @@ func memoryTableInsights(outputs map[string]script.ScriptOutput, tableValues Tab } } // check if NUMA balancing is not enabled (when there are multiple NUMA nodes) - nodes := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`) + nodes := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`) nodeCount, err := strconv.Atoi(nodes) if err != nil { slog.Warn(err.Error()) } else { if nodeCount > 1 { - numaBalancingIndex, err := GetFieldIndex("Automatic NUMA Balancing", tableValues) + numaBalancingIndex, err := table.GetFieldIndex("Automatic NUMA Balancing", tableValues) if err != nil { slog.Warn(err.Error()) } else { numaBalancing := tableValues.Fields[numaBalancingIndex].Values[0] if numaBalancing != "" && numaBalancing != "Enabled" { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider enabling Automatic NUMA Balancing.", Justification: "Automatic NUMA Balancing is not enabled.", }) @@ -1376,8 +1135,8 @@ func memoryTableInsights(outputs map[string]script.ScriptOutput, tableValues Tab return insights } -func dimmTableValues(outputs map[string]script.ScriptOutput) []Field { - dimmFieldValues := valsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "17", +func dimmTableValues(outputs map[string]script.ScriptOutput) []table.Field { + dimmFieldValues := common.ValsArrayFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "17", []string{ `^Bank Locator:\s*(.+?)$`, `^Locator:\s*(.+?)$`, @@ -1393,9 +1152,9 @@ func dimmTableValues(outputs map[string]script.ScriptOutput) []Field { }..., ) if len(dimmFieldValues) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Bank Locator"}, {Name: "Locator"}, {Name: "Manufacturer"}, @@ -1433,14 +1192,14 @@ func dimmTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func dimmTableInsights(outputs map[string]script.ScriptOutput, tableValues TableValues) []Insight { - insights := []Insight{} +func dimmTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { + insights := []table.Insight{} // check if are configured for their maximum speed - SpeedIndex, err := GetFieldIndex("Speed", tableValues) + SpeedIndex, err := table.GetFieldIndex("Speed", tableValues) if err != nil { slog.Warn(err.Error()) } else { - ConfiguredSpeedIndex, err := GetFieldIndex("Configured Speed", tableValues) + ConfiguredSpeedIndex, err := table.GetFieldIndex("Configured Speed", tableValues) if err != nil { slog.Warn(err.Error()) } else { @@ -1456,7 +1215,7 @@ func dimmTableInsights(outputs map[string]script.ScriptOutput, tableValues Table slog.Warn(err.Error()) } else { if speedVal < configuredSpeedVal { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider configuring DIMMs for their maximum speed.", Justification: fmt.Sprintf("DIMMs configured for %s when their maximum speed is %s.", configuredSpeed, speed), }) @@ -1470,12 +1229,12 @@ func dimmTableInsights(outputs map[string]script.ScriptOutput, tableValues Table return insights } -func nicTableValues(outputs map[string]script.ScriptOutput) []Field { - allNicsInfo := parseNicInfo(outputs[script.NicInfoScriptName].Stdout) +func nicTableValues(outputs map[string]script.ScriptOutput) []table.Field { + allNicsInfo := common.ParseNicInfo(outputs[script.NicInfoScriptName].Stdout) if len(allNicsInfo) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Name"}, {Name: "Vendor (ID)"}, {Name: "Model (ID)"}, @@ -1536,13 +1295,13 @@ func nicTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func nicPacketSteeringTableValues(outputs map[string]script.ScriptOutput) []Field { - allNicsInfo := parseNicInfo(outputs[script.NicInfoScriptName].Stdout) +func nicPacketSteeringTableValues(outputs map[string]script.ScriptOutput) []table.Field { + allNicsInfo := common.ParseNicInfo(outputs[script.NicInfoScriptName].Stdout) if len(allNicsInfo) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Interface"}, {Name: "Type", Description: "XPS (Transmit Packet Steering) and RPS (Receive Packet Steering) are software-based mechanisms that allow the selection of a specific logical CPU core to handle the transmission or processing of network packets for a given queue."}, {Name: "Queue:CPU(s) | Queue|CPU(s) | ..."}, @@ -1565,7 +1324,7 @@ func nicPacketSteeringTableValues(outputs map[string]script.ScriptOutput) []Fiel } if len(fields[0].Values) == 0 { - return []Field{} + return []table.Field{} } return fields } @@ -1598,12 +1357,12 @@ func formatQueueCPUMappings(mappings map[string]string, prefix string) string { return strings.Join(queueMappings, " | ") } -func nicCpuAffinityTableValues(outputs map[string]script.ScriptOutput) []Field { - nicIRQMappings := nicIRQMappingsFromOutput(outputs) +func nicCpuAffinityTableValues(outputs map[string]script.ScriptOutput) []table.Field { + nicIRQMappings := common.NICIrqMappingsFromOutput(outputs) if len(nicIRQMappings) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Interface"}, {Name: "IRQ:CPU | IRQ:CPU | ..."}, } @@ -1614,9 +1373,9 @@ func nicCpuAffinityTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func networkConfigTableValues(outputs map[string]script.ScriptOutput) []Field { +func networkConfigTableValues(outputs map[string]script.ScriptOutput) []table.Field { // these are the fields we want to display - fields := []Field{ + fields := []table.Field{ {Name: "net.ipv4.tcp_rmem"}, {Name: "net.ipv4.tcp_wmem"}, {Name: "net.core.rmem_max"}, @@ -1652,12 +1411,12 @@ func networkConfigTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func diskTableValues(outputs map[string]script.ScriptOutput) []Field { - allDisksInfo := diskInfoFromOutput(outputs) +func diskTableValues(outputs map[string]script.ScriptOutput) []table.Field { + allDisksInfo := common.DiskInfoFromOutput(outputs) if len(allDisksInfo) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Name"}, {Name: "Model"}, {Name: "Size"}, @@ -1692,19 +1451,19 @@ func diskTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func filesystemTableValues(outputs map[string]script.ScriptOutput) []Field { +func filesystemTableValues(outputs map[string]script.ScriptOutput) []table.Field { return filesystemFieldValuesFromOutput(outputs) } -func filesystemTableInsights(outputs map[string]script.ScriptOutput, tableValues TableValues) []Insight { - insights := []Insight{} - mountOptionsIndex, err := GetFieldIndex("Mount Options", tableValues) +func filesystemTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { + insights := []table.Insight{} + mountOptionsIndex, err := table.GetFieldIndex("Mount Options", tableValues) if err != nil { slog.Warn(err.Error()) } else { for i, options := range tableValues.Fields[mountOptionsIndex].Values { if strings.Contains(options, "discard") { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: fmt.Sprintf("Consider mounting the '%s' file system without the 'discard' option and instead configure periodic TRIM for SSDs, if used for I/O intensive workloads.", tableValues.Fields[0].Values[i]), Justification: fmt.Sprintf("The '%s' filesystem is mounted with 'discard' option.", tableValues.Fields[0].Values[i]), }) @@ -1714,12 +1473,12 @@ func filesystemTableInsights(outputs map[string]script.ScriptOutput, tableValues return insights } -func gpuTableValues(outputs map[string]script.ScriptOutput) []Field { +func gpuTableValues(outputs map[string]script.ScriptOutput) []table.Field { gpuInfos := gpuInfoFromOutput(outputs) if len(gpuInfos) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Manufacturer"}, {Name: "Model"}, {Name: "PCI ID"}, @@ -1732,12 +1491,12 @@ func gpuTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func gaudiTableValues(outputs map[string]script.ScriptOutput) []Field { +func gaudiTableValues(outputs map[string]script.ScriptOutput) []table.Field { gaudiInfos := gaudiInfoFromOutput(outputs) if len(gaudiInfos) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Module ID"}, {Name: "Microarchitecture"}, {Name: "Serial Number"}, @@ -1762,12 +1521,12 @@ func gaudiTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func cxlTableValues(outputs map[string]script.ScriptOutput) []Field { +func cxlTableValues(outputs map[string]script.ScriptOutput) []table.Field { cxlDevices := getPCIDevices("CXL", outputs) if len(cxlDevices) == 0 { - return []Field{} + return []table.Field{} } - fields := []Field{ + fields := []table.Field{ {Name: "Slot"}, {Name: "Class"}, {Name: "Vendor"}, @@ -1789,20 +1548,20 @@ func cxlTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func cveTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{} +func cveTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{} cves := cveInfoFromOutput(outputs) for _, cve := range cves { - fields = append(fields, Field{Name: cve[0], Values: []string{cve[1]}}) + fields = append(fields, table.Field{Name: cve[0], Values: []string{cve[1]}}) } return fields } -func cveTableInsights(outputs map[string]script.ScriptOutput, tableValues TableValues) []Insight { - insights := []Insight{} +func cveTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { + insights := []table.Insight{} for _, field := range tableValues.Fields { if strings.HasPrefix(field.Values[0], "VULN") { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: fmt.Sprintf("Consider applying the security patch for %s.", field.Name), Justification: fmt.Sprintf("The system is vulnerable to %s.", field.Name), }) @@ -1811,13 +1570,13 @@ func cveTableInsights(outputs map[string]script.ScriptOutput, tableValues TableV return insights } -func processTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{} +func processTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{} for i, line := range strings.Split(outputs[script.ProcessListScriptName].Stdout, "\n") { tokens := strings.Fields(line) if i == 0 { // header -- defines fields in table for _, token := range tokens { - fields = append(fields, Field{Name: token}) + fields = append(fields, table.Field{Name: token}) } continue } @@ -1833,8 +1592,8 @@ func processTableValues(outputs map[string]script.ScriptOutput) []Field { return fields } -func sensorTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ +func sensorTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ {Name: "Sensor"}, {Name: "Reading"}, {Name: "Status"}, @@ -1849,13 +1608,13 @@ func sensorTableValues(outputs map[string]script.ScriptOutput) []Field { fields[2].Values = append(fields[2].Values, tokens[2]) } if len(fields[0].Values) == 0 { - return []Field{} + return []table.Field{} } return fields } -func chassisStatusTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{} +func chassisStatusTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{} for line := range strings.SplitSeq(outputs[script.IpmitoolChassisScriptName].Stdout, "\n") { tokens := strings.Split(line, ":") if len(tokens) != 2 { @@ -1866,13 +1625,13 @@ func chassisStatusTableValues(outputs map[string]script.ScriptOutput) []Field { if strings.Contains(fieldName, "Button") { // skip button status continue } - fields = append(fields, Field{Name: fieldName, Values: []string{fieldValue}}) + fields = append(fields, table.Field{Name: fieldName, Values: []string{fieldValue}}) } return fields } -func systemEventLogTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ +func systemEventLogTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ {Name: "Date"}, {Name: "Time"}, {Name: "Sensor"}, @@ -1891,14 +1650,14 @@ func systemEventLogTableValues(outputs map[string]script.ScriptOutput) []Field { fields[4].Values = append(fields[4].Values, tokens[4]) } if len(fields[0].Values) == 0 { - return []Field{} + return []table.Field{} } return fields } -func systemEventLogTableInsights(outputs map[string]script.ScriptOutput, tableValues TableValues) []Insight { - insights := []Insight{} - sensorFieldIndex, err := GetFieldIndex("Sensor", tableValues) +func systemEventLogTableInsights(outputs map[string]script.ScriptOutput, tableValues table.TableValues) []table.Insight { + insights := []table.Insight{} + sensorFieldIndex, err := table.GetFieldIndex("Sensor", tableValues) if err != nil { slog.Warn(err.Error()) } else { @@ -1909,7 +1668,7 @@ func systemEventLogTableInsights(outputs map[string]script.ScriptOutput, tableVa } } if temperatureEvents > 0 { - insights = append(insights, Insight{ + insights = append(insights, table.Insight{ Recommendation: "Consider reviewing the System Event Log table.", Justification: fmt.Sprintf("Detected '%d' temperature-related service action(s) in the System Event Log.", temperatureEvents), }) @@ -1918,223 +1677,111 @@ func systemEventLogTableInsights(outputs map[string]script.ScriptOutput, tableVa return insights } -func kernelLogTableValues(outputs map[string]script.ScriptOutput) []Field { - return []Field{ +func kernelLogTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return []table.Field{ {Name: "Entries", Values: strings.Split(outputs[script.KernelLogScriptName].Stdout, "\n")}, } } -func pmuTableValues(outputs map[string]script.ScriptOutput) []Field { - return []Field{ +func pmuTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return []table.Field{ {Name: "PMU Driver Version", Values: []string{strings.TrimSpace(outputs[script.PMUDriverVersionScriptName].Stdout)}}, - {Name: "cpu_cycles", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30a (.*)$`)}}, - {Name: "instructions", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x309 (.*)$`)}}, - {Name: "ref_cycles", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30b (.*)$`)}}, - {Name: "topdown_slots", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30c (.*)$`)}}, - {Name: "gen_programmable_1", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc1 (.*)$`)}}, - {Name: "gen_programmable_2", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc2 (.*)$`)}}, - {Name: "gen_programmable_3", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc3 (.*)$`)}}, - {Name: "gen_programmable_4", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc4 (.*)$`)}}, - {Name: "gen_programmable_5", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc5 (.*)$`)}}, - {Name: "gen_programmable_6", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc6 (.*)$`)}}, - {Name: "gen_programmable_7", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc7 (.*)$`)}}, - {Name: "gen_programmable_8", Values: []string{valFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc8 (.*)$`)}}, - } -} - -func systemSummaryTableValues(outputs map[string]script.ScriptOutput) []Field { - system := valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + - " " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + - ", " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Version:\s*(.+?)$`) - baseboard := valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Manufacturer:\s*(.+?)$`) + - " " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Product Name:\s*(.+?)$`) + - ", " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Version:\s*(.+?)$`) - chassis := valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Manufacturer:\s*(.+?)$`) + - " " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Type:\s*(.+?)$`) + - ", " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Version:\s*(.+?)$`) - - return []Field{ + {Name: "cpu_cycles", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30a (.*)$`)}}, + {Name: "instructions", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x309 (.*)$`)}}, + {Name: "ref_cycles", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30b (.*)$`)}}, + {Name: "topdown_slots", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0x30c (.*)$`)}}, + {Name: "gen_programmable_1", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc1 (.*)$`)}}, + {Name: "gen_programmable_2", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc2 (.*)$`)}}, + {Name: "gen_programmable_3", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc3 (.*)$`)}}, + {Name: "gen_programmable_4", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc4 (.*)$`)}}, + {Name: "gen_programmable_5", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc5 (.*)$`)}}, + {Name: "gen_programmable_6", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc6 (.*)$`)}}, + {Name: "gen_programmable_7", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc7 (.*)$`)}}, + {Name: "gen_programmable_8", Values: []string{common.ValFromRegexSubmatch(outputs[script.PMUBusyScriptName].Stdout, `^0xc8 (.*)$`)}}, + } +} + +func systemSummaryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + system := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + + " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + + ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Version:\s*(.+?)$`) + baseboard := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Manufacturer:\s*(.+?)$`) + + " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Product Name:\s*(.+?)$`) + + ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "2", `^Version:\s*(.+?)$`) + chassis := common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Manufacturer:\s*(.+?)$`) + + " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Type:\s*(.+?)$`) + + ", " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "3", `^Version:\s*(.+?)$`) + + return []table.Field{ {Name: "Host Name", Values: []string{strings.TrimSpace(outputs[script.HostnameScriptName].Stdout)}}, {Name: "Time", Values: []string{strings.TrimSpace(outputs[script.DateScriptName].Stdout)}}, {Name: "System", Values: []string{system}}, {Name: "Baseboard", Values: []string{baseboard}}, {Name: "Chassis", Values: []string{chassis}}, - {Name: "CPU Model", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, - {Name: "Architecture", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Architecture:\s*(.+)$`)}}, - {Name: "Microarchitecture", Values: []string{UarchFromOutput(outputs)}}, - {Name: "L3 Cache (instance/total)", Values: []string{l3FromOutput(outputs)}, Description: "The size of one L3 cache instance and the total L3 cache size for the system."}, - {Name: "Cores per Socket", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, - {Name: "Sockets", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, - {Name: "Hyperthreading", Values: []string{hyperthreadingFromOutput(outputs)}}, - {Name: "CPUs", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, + {Name: "CPU Model", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, + {Name: "Architecture", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Architecture:\s*(.+)$`)}}, + {Name: "Microarchitecture", Values: []string{common.UarchFromOutput(outputs)}}, + {Name: "L3 Cache (instance/total)", Values: []string{common.L3FromOutput(outputs)}, Description: "The size of one L3 cache instance and the total L3 cache size for the system."}, + {Name: "Cores per Socket", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, + {Name: "Sockets", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, + {Name: "Hyperthreading", Values: []string{common.HyperthreadingFromOutput(outputs)}}, + {Name: "CPUs", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, {Name: "Intel Turbo Boost", Values: []string{turboEnabledFromOutput(outputs)}}, - {Name: "Base Frequency", Values: []string{baseFrequencyFromOutput(outputs)}, Description: "The minimum guaranteed speed of a single core under standard conditions."}, - {Name: "Maximum Frequency", Values: []string{maxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, - {Name: "All-core Maximum Frequency", Values: []string{allCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, - {Name: "NUMA Nodes", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, - {Name: "Prefetchers", Values: []string{prefetchersSummaryFromOutput(outputs)}}, + {Name: "Base Frequency", Values: []string{common.BaseFrequencyFromOutput(outputs)}, Description: "The minimum guaranteed speed of a single core under standard conditions."}, + {Name: "Maximum Frequency", Values: []string{common.MaxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, + {Name: "All-core Maximum Frequency", Values: []string{common.AllCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, + {Name: "NUMA Nodes", Values: []string{common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, + {Name: "Prefetchers", Values: []string{common.PrefetchersSummaryFromOutput(outputs)}}, {Name: "PPINs", Values: []string{ppinsFromOutput(outputs)}}, {Name: "Accelerators Available [used]", Values: []string{acceleratorSummaryFromOutput(outputs)}}, {Name: "Installed Memory", Values: []string{installedMemoryFromOutput(outputs)}}, - {Name: "Hugepagesize", Values: []string{valFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Hugepagesize:\s*(.+?)$`)}}, - {Name: "Transparent Huge Pages", Values: []string{valFromRegexSubmatch(outputs[script.TransparentHugePagesScriptName].Stdout, `.*\[(.*)\].*`)}}, + {Name: "Hugepagesize", Values: []string{common.ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^Hugepagesize:\s*(.+?)$`)}}, + {Name: "Transparent Huge Pages", Values: []string{common.ValFromRegexSubmatch(outputs[script.TransparentHugePagesScriptName].Stdout, `.*\[(.*)\].*`)}}, {Name: "Automatic NUMA Balancing", Values: []string{numaBalancingFromOutput(outputs)}}, - {Name: "NIC", Values: []string{nicSummaryFromOutput(outputs)}}, - {Name: "Disk", Values: []string{diskSummaryFromOutput(outputs)}}, - {Name: "BIOS", Values: []string{valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", `^Version:\s*(.+?)$`)}}, - {Name: "Microcode", Values: []string{valFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`)}}, - {Name: "OS", Values: []string{operatingSystemFromOutput(outputs)}}, - {Name: "Kernel", Values: []string{valFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, - {Name: "TDP", Values: []string{tdpFromOutput(outputs)}}, - {Name: "Energy Performance Bias", Values: []string{epbFromOutput(outputs)}}, + {Name: "NIC", Values: []string{common.NICSummaryFromOutput(outputs)}}, + {Name: "Disk", Values: []string{common.DiskSummaryFromOutput(outputs)}}, + {Name: "BIOS", Values: []string{common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "0", `^Version:\s*(.+?)$`)}}, + {Name: "Microcode", Values: []string{common.ValFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`)}}, + {Name: "OS", Values: []string{common.OperatingSystemFromOutput(outputs)}}, + {Name: "Kernel", Values: []string{common.ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, + {Name: "TDP", Values: []string{common.TDPFromOutput(outputs)}}, + {Name: "Energy Performance Bias", Values: []string{common.EPBFromOutput(outputs)}}, {Name: "Scaling Governor", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, {Name: "Scaling Driver", Values: []string{strings.TrimSpace(outputs[script.ScalingDriverScriptName].Stdout)}}, - {Name: "C-states", Values: []string{cstatesSummaryFromOutput(outputs)}}, - {Name: "Efficiency Latency Control", Values: []string{elcSummaryFromOutput(outputs)}}, + {Name: "C-states", Values: []string{common.CstatesSummaryFromOutput(outputs)}}, + {Name: "Efficiency Latency Control", Values: []string{common.ELCSummaryFromOutput(outputs)}}, {Name: "CVEs", Values: []string{cveSummaryFromOutput(outputs)}}, {Name: "System Summary", Values: []string{systemSummaryFromOutput(outputs)}}, } } -func briefSummaryTableValues(outputs map[string]script.ScriptOutput) []Field { - return []Field{ - {Name: "Host Name", Values: []string{strings.TrimSpace(outputs[script.HostnameScriptName].Stdout)}}, // Hostname - {Name: "Time", Values: []string{strings.TrimSpace(outputs[script.DateScriptName].Stdout)}}, // Date - {Name: "CPU Model", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, // Lscpu - {Name: "Microarchitecture", Values: []string{UarchFromOutput(outputs)}}, // Lscpu, LspciBits, LspciDevices - {Name: "TDP", Values: []string{tdpFromOutput(outputs)}}, // PackagePowerLimit - {Name: "Sockets", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, // Lscpu - {Name: "Cores per Socket", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, // Lscpu - {Name: "Hyperthreading", Values: []string{hyperthreadingFromOutput(outputs)}}, // Lscpu, LspciBits, LspciDevices - {Name: "CPUs", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, // Lscpu - {Name: "NUMA Nodes", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, // Lscpu - {Name: "Scaling Driver", Values: []string{strings.TrimSpace(outputs[script.ScalingDriverScriptName].Stdout)}}, // ScalingDriver - {Name: "Scaling Governor", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, // ScalingGovernor - {Name: "C-states", Values: []string{cstatesSummaryFromOutput(outputs)}}, // Cstates - {Name: "Maximum Frequency", Values: []string{maxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, // MaximumFrequency, SpecCoreFrequencies, - {Name: "All-core Maximum Frequency", Values: []string{allCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, // Lscpu, LspciBits, LspciDevices, SpecCoreFrequencies - {Name: "Energy Performance Bias", Values: []string{epbFromOutput(outputs)}}, // EpbSource, EpbBIOS, EpbOS - {Name: "Efficiency Latency Control", Values: []string{elcSummaryFromOutput(outputs)}}, // Elc - {Name: "MemTotal", Values: []string{valFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemTotal:\s*(.+?)$`)}}, // Meminfo - {Name: "NIC", Values: []string{nicSummaryFromOutput(outputs)}}, // Lshw, NicInfo - {Name: "Disk", Values: []string{diskSummaryFromOutput(outputs)}}, // DiskInfo, Hdparm - {Name: "OS", Values: []string{operatingSystemFromOutput(outputs)}}, // EtcRelease - {Name: "Kernel", Values: []string{valFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, // Uname - } -} - -func configurationTableValues(outputs map[string]script.ScriptOutput) []Field { - uarch := UarchFromOutput(outputs) - if uarch == "" { - slog.Error("failed to get uarch from script outputs") - return []Field{} - } - // This table is only shown in text mode on stdout for the config command. The config - // command implements its own print logic and uses the Description field to show the command line - // argument for each config item. - fields := []Field{ - {Name: "Cores per Socket", Description: "--cores ", Values: []string{valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, - {Name: "L3 Cache", Description: "--llc ", Values: []string{l3InstanceFromOutput(outputs)}}, - {Name: "Package Power / TDP", Description: "--tdp ", Values: []string{tdpFromOutput(outputs)}}, - {Name: "Core SSE Frequency", Description: "--core-max ", Values: []string{sseFrequenciesFromOutput(outputs)}}, - } - if strings.Contains(uarch, "SRF") || strings.Contains(uarch, "GNR") || strings.Contains(uarch, "CWF") { - fields = append(fields, []Field{ - {Name: "Uncore Max Frequency (Compute)", Description: "--uncore-max-compute ", Values: []string{uncoreMinMaxDieFrequencyFromOutput(true, true, outputs)}}, - {Name: "Uncore Min Frequency (Compute)", Description: "--uncore-min-compute ", Values: []string{uncoreMinMaxDieFrequencyFromOutput(false, true, outputs)}}, - {Name: "Uncore Max Frequency (I/O)", Description: "--uncore-max-io ", Values: []string{uncoreMinMaxDieFrequencyFromOutput(true, false, outputs)}}, - {Name: "Uncore Min Frequency (I/O)", Description: "--uncore-min-io ", Values: []string{uncoreMinMaxDieFrequencyFromOutput(false, false, outputs)}}, - }...) - } else { - fields = append(fields, []Field{ - {Name: "Uncore Max Frequency", Description: "--uncore-max ", Values: []string{uncoreMaxFrequencyFromOutput(outputs)}}, - {Name: "Uncore Min Frequency", Description: "--uncore-min ", Values: []string{uncoreMinFrequencyFromOutput(outputs)}}, - }...) - } - fields = append(fields, []Field{ - {Name: "Energy Performance Bias", Description: "--epb <0-15>", Values: []string{epbFromOutput(outputs)}}, - {Name: "Energy Performance Preference", Description: "--epp <0-255>", Values: []string{eppFromOutput(outputs)}}, - {Name: "Scaling Governor", Description: "--gov ", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, - }...) - // add ELC (for SRF, CWF and GNR only) - if strings.Contains(uarch, "SRF") || strings.Contains(uarch, "GNR") || strings.Contains(uarch, "CWF") { - fields = append(fields, Field{Name: "Efficiency Latency Control", Description: "--elc ", Values: []string{elcSummaryFromOutput(outputs)}}) - } - // add prefetchers - for _, pf := range prefetcherDefinitions { - if slices.Contains(pf.Uarchs, "all") || slices.Contains(pf.Uarchs, uarch[:3]) { - var scriptName string - switch pf.Msr { - case MsrPrefetchControl: - scriptName = script.PrefetchControlName - case MsrPrefetchers: - scriptName = script.PrefetchersName - case MsrAtomPrefTuning1: - scriptName = script.PrefetchersAtomName - default: - slog.Error("unknown msr for prefetcher", slog.String("msr", fmt.Sprintf("0x%x", pf.Msr))) - continue - } - msrVal := valFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) - var enabledDisabled string - enabled, err := isPrefetcherEnabled(msrVal, pf.Bit) - if err != nil { - slog.Warn("error checking prefetcher enabled status", slog.String("error", err.Error())) - continue - } - if enabled { - enabledDisabled = "Enabled" - } else { - enabledDisabled = "Disabled" - } - fields = append(fields, - Field{ - Name: pf.ShortName + " prefetcher", - Description: "--" + "pref-" + strings.ReplaceAll(strings.ToLower(pf.ShortName), " ", "") + " ", - Values: []string{enabledDisabled}}, - ) - } - } - // add C6 - c6 := c6FromOutput(outputs) - if c6 != "" { - fields = append(fields, Field{Name: "C6", Description: "--c6 ", Values: []string{c6}}) - } - // add C1 Demotion - c1Demotion := strings.TrimSpace(outputs[script.C1DemotionScriptName].Stdout) - if c1Demotion != "" { - fields = append(fields, Field{Name: "C1 Demotion", Description: "--c1-demotion ", Values: []string{c1Demotion}}) - } - return fields -} - // benchmarking -func speedBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field { - return []Field{ +func speedBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return []table.Field{ {Name: "Ops/s", Values: []string{cpuSpeedFromOutput(outputs)}}, } } -func powerBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field { - return []Field{ - {Name: "Maximum Power", Values: []string{maxTotalPackagePowerFromOutput(outputs[script.PowerBenchmarkScriptName].Stdout)}}, - {Name: "Minimum Power", Values: []string{minTotalPackagePowerFromOutput(outputs[script.IdlePowerBenchmarkScriptName].Stdout)}}, +func powerBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return []table.Field{ + {Name: "Maximum Power", Values: []string{common.MaxTotalPackagePowerFromOutput(outputs[script.PowerBenchmarkScriptName].Stdout)}}, + {Name: "Minimum Power", Values: []string{common.MinTotalPackagePowerFromOutput(outputs[script.IdlePowerBenchmarkScriptName].Stdout)}}, } } -func temperatureBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field { - return []Field{ - {Name: "Maximum Temperature", Values: []string{maxPackageTemperatureFromOutput(outputs[script.PowerBenchmarkScriptName].Stdout)}}, +func temperatureBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return []table.Field{ + {Name: "Maximum Temperature", Values: []string{common.MaxPackageTemperatureFromOutput(outputs[script.PowerBenchmarkScriptName].Stdout)}}, } } -func frequencyBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field { +func frequencyBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.Field { // get the sse, avx256, and avx512 frequencies from the avx-turbo output instructionFreqs, err := avxTurboFrequenciesFromOutput(outputs[script.FrequencyBenchmarkScriptName].Stdout) if err != nil { slog.Error("unable to get avx turbo frequencies", slog.String("error", err.Error())) - return []Field{} + return []table.Field{} } // we're expecting scalar_iadd, avx256_fma, avx512_fma scalarIaddFreqs := instructionFreqs["scalar_iadd"] @@ -2143,17 +1790,17 @@ func frequencyBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Fie // stop if we don't have any scalar_iadd frequencies if len(scalarIaddFreqs) == 0 { slog.Error("no scalar_iadd frequencies found") - return []Field{} + return []table.Field{} } // get the spec core frequencies from the spec output var specSSEFreqs []string - frequencyBuckets, err := getSpecFrequencyBuckets(outputs) + frequencyBuckets, err := common.GetSpecFrequencyBuckets(outputs) if err == nil && len(frequencyBuckets) >= 2 { // get the frequencies from the buckets - specSSEFreqs, err = expandTurboFrequencies(frequencyBuckets, "sse") + specSSEFreqs, err = common.ExpandTurboFrequencies(frequencyBuckets, "sse") if err != nil { slog.Error("unable to convert buckets to counts", slog.String("error", err.Error())) - return []Field{} + return []table.Field{} } // trim the spec frequencies to the length of the scalar_iadd frequencies // this can happen when the actual core count is less than the number of cores in the spec @@ -2170,7 +1817,7 @@ func frequencyBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Fie } } // create the fields - fields := []Field{ + fields := []table.Field{ {Name: "cores"}, } coresIdx := 0 // always the first field @@ -2179,19 +1826,19 @@ func frequencyBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Fie var avx2FieldIdx int var avx512FieldIdx int if len(specSSEFreqs) > 0 { - fields = append(fields, Field{Name: "SSE (expected)", Description: "The expected frequency, when running SSE instructions, for the given number of active cores."}) + fields = append(fields, table.Field{Name: "SSE (expected)", Description: "The expected frequency, when running SSE instructions, for the given number of active cores."}) specSSEFieldIdx = len(fields) - 1 } if len(scalarIaddFreqs) > 0 { - fields = append(fields, Field{Name: "SSE", Description: "The measured frequency, when running SSE instructions, for the given number of active cores."}) + fields = append(fields, table.Field{Name: "SSE", Description: "The measured frequency, when running SSE instructions, for the given number of active cores."}) scalarIaddFieldIdx = len(fields) - 1 } if len(avx256FmaFreqs) > 0 { - fields = append(fields, Field{Name: "AVX2", Description: "The measured frequency, when running AVX2 instructions, for the given number of active cores."}) + fields = append(fields, table.Field{Name: "AVX2", Description: "The measured frequency, when running AVX2 instructions, for the given number of active cores."}) avx2FieldIdx = len(fields) - 1 } if len(avx512FmaFreqs) > 0 { - fields = append(fields, Field{Name: "AVX512", Description: "The measured frequency, when running AVX512 instructions, for the given number of active cores."}) + fields = append(fields, table.Field{Name: "AVX512", Description: "The measured frequency, when running AVX512 instructions, for the given number of active cores."}) avx512FieldIdx = len(fields) - 1 } // add the data to the fields @@ -2229,8 +1876,8 @@ func frequencyBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Fie return fields } -func memoryBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ +func memoryBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ {Name: "Latency (ns)"}, {Name: "Bandwidth (GB/s)"}, } @@ -2243,7 +1890,7 @@ func memoryBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field 00008 261.54 225073.3 ... */ - latencyBandwidthPairs := valsArrayFromRegexSubmatch(outputs[script.MemoryBenchmarkScriptName].Stdout, `\s*[0-9]*\s*([0-9]*\.[0-9]+)\s*([0-9]*\.[0-9]+)`) + latencyBandwidthPairs := common.ValsArrayFromRegexSubmatch(outputs[script.MemoryBenchmarkScriptName].Stdout, `\s*[0-9]*\s*([0-9]*\.[0-9]+)\s*([0-9]*\.[0-9]+)`) for _, latencyBandwidth := range latencyBandwidthPairs { latency := latencyBandwidth[0] bandwidth, err := strconv.ParseFloat(latencyBandwidth[1], 32) @@ -2256,13 +1903,13 @@ func memoryBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field fields[1].Values = append([]string{fmt.Sprintf("%.1f", bandwidth/1000)}, fields[1].Values...) } if len(fields[0].Values) == 0 { - return []Field{} + return []table.Field{} } return fields } -func numaBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ +func numaBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ {Name: "Node"}, } /* MLC Output: @@ -2271,10 +1918,10 @@ func numaBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field { 0 175610.3 55579.7 1 55575.2 175656.7 */ - nodeBandwidthsPairs := valsArrayFromRegexSubmatch(outputs[script.NumaBenchmarkScriptName].Stdout, `^\s+(\d)\s+(\d.*)$`) + nodeBandwidthsPairs := common.ValsArrayFromRegexSubmatch(outputs[script.NumaBenchmarkScriptName].Stdout, `^\s+(\d)\s+(\d.*)$`) // add 1 field per numa node for _, nodeBandwidthsPair := range nodeBandwidthsPairs { - fields = append(fields, Field{Name: nodeBandwidthsPair[0]}) + fields = append(fields, table.Field{Name: nodeBandwidthsPair[0]}) } // add rows for _, nodeBandwidthsPair := range nodeBandwidthsPairs { @@ -2282,7 +1929,7 @@ func numaBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field { bandwidths := strings.Split(strings.TrimSpace(nodeBandwidthsPair[1]), "\t") if len(bandwidths) != len(nodeBandwidthsPairs) { slog.Warn(fmt.Sprintf("Mismatched number of bandwidths for numa node %s, %s", nodeBandwidthsPair[0], nodeBandwidthsPair[1])) - return []Field{} + return []table.Field{} } for i, bw := range bandwidths { bw = strings.TrimSpace(bw) @@ -2295,7 +1942,7 @@ func numaBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field { } } if len(fields[0].Values) == 0 { - return []Field{} + return []table.Field{} } return fields } @@ -2309,19 +1956,19 @@ func formatOrEmpty(format string, value any) string { return s } -func storageBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field { +func storageBenchmarkTableValues(outputs map[string]script.ScriptOutput) []table.Field { fioData, err := storagePerfFromOutput(outputs) if err != nil { slog.Error("failed to get storage benchmark data", slog.String("error", err.Error())) - return []Field{} + return []table.Field{} } if len(fioData.Jobs) == 0 { - return []Field{} + return []table.Field{} } // Initialize the fields for metrics (column headers) - fields := []Field{ + fields := []table.Field{ {Name: "Job"}, {Name: "Read Latency (us)"}, {Name: "Read IOPs"}, @@ -2346,550 +1993,212 @@ func storageBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field return fields } -// telemetry - -func cpuUtilizationTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - {Name: "CPU"}, - {Name: "CORE"}, - {Name: "SOCK"}, - {Name: "NODE"}, - {Name: "%usr"}, - {Name: "%nice"}, - {Name: "%sys"}, - {Name: "%iowait"}, - {Name: "%irq"}, - {Name: "%soft"}, - {Name: "%steal"}, - {Name: "%guest"}, - {Name: "%gnice"}, - {Name: "%idle"}, - } - reStat := regexp.MustCompile(`^(\d\d:\d\d:\d\d)\s+(\d+)\s+(\d+)\s+(\d+)\s+(-*\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$`) - for line := range strings.SplitSeq(outputs[script.MpstatTelemetryScriptName].Stdout, "\n") { - match := reStat.FindStringSubmatch(line) - if len(match) == 0 { - continue - } - for i := range fields { - fields[i].Values = append(fields[i].Values, match[i+1]) - } - } - if len(fields[0].Values) == 0 { - return []Field{} - } - return fields -} - -func utilizationCategoriesTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - {Name: "%usr"}, - {Name: "%nice"}, - {Name: "%sys"}, - {Name: "%iowait"}, - {Name: "%irq"}, - {Name: "%soft"}, - {Name: "%steal"}, - {Name: "%guest"}, - {Name: "%gnice"}, - {Name: "%idle"}, - } - reStat := regexp.MustCompile(`^(\d\d:\d\d:\d\d)\s+all\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$`) - for line := range strings.SplitSeq(outputs[script.MpstatTelemetryScriptName].Stdout, "\n") { - match := reStat.FindStringSubmatch(line) - if len(match) == 0 { - continue - } - for i := range fields { - fields[i].Values = append(fields[i].Values, match[i+1]) - } - } - if len(fields[0].Values) == 0 { - return []Field{} - } - return fields -} - -func irqRateTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - {Name: "CPU"}, - {Name: "HI/s"}, - {Name: "TIMER/s"}, - {Name: "NET_TX/s"}, - {Name: "NET_RX/s"}, - {Name: "BLOCK/s"}, - {Name: "IRQ_POLL/s"}, - {Name: "TASKLET/s"}, - {Name: "SCHED/s"}, - {Name: "HRTIMER/s"}, - {Name: "RCU/s"}, - } - reStat := regexp.MustCompile(`^(\d\d:\d\d:\d\d)\s+(\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$`) - for line := range strings.SplitSeq(outputs[script.MpstatTelemetryScriptName].Stdout, "\n") { - match := reStat.FindStringSubmatch(line) - if len(match) == 0 { - continue - } - for i := range fields { - fields[i].Values = append(fields[i].Values, match[i+1]) - } - } - if len(fields[0].Values) == 0 { - return []Field{} - } - return fields -} - -func driveTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - {Name: "Device"}, - {Name: "tps"}, - {Name: "kB_read/s"}, - {Name: "kB_wrtn/s"}, - {Name: "kB_dscd/s"}, - } - // the time is on its own line, so we need to keep track of it - reTime := regexp.MustCompile(`^\d\d\d\d-\d\d-\d\dT(\d\d:\d\d:\d\d)`) - // don't capture the last three vals: "kB_read","kB_wrtn","kB_dscd" -- they aren't the same scale as the others - reStat := regexp.MustCompile(`^(\w+)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*\d+\s*\d+\s*\d+$`) - var time string - for line := range strings.SplitSeq(outputs[script.IostatTelemetryScriptName].Stdout, "\n") { - match := reTime.FindStringSubmatch(line) - if len(match) > 0 { - time = match[1] - continue - } - match = reStat.FindStringSubmatch(line) - if len(match) > 0 { - fields[0].Values = append(fields[0].Values, time) - for i := range fields[1:] { - fields[i+1].Values = append(fields[i+1].Values, match[i+1]) +func dimmDetails(dimm []string) (details string) { + if strings.Contains(dimm[SizeIdx], "No") { + details = "No Module Installed" + } else { + // Intel PMEM modules may have serial number appended to end of part number... + // strip that off so it doesn't mess with color selection later + partNumber := dimm[PartIdx] + if strings.Contains(dimm[DetailIdx], "Synchronous Non-Volatile") && + dimm[ManufacturerIdx] == "Intel" && + strings.HasSuffix(dimm[PartIdx], dimm[SerialIdx]) { + partNumber = dimm[PartIdx][:len(dimm[PartIdx])-len(dimm[SerialIdx])] + } + // example: "64GB DDR5 R2 Synchronous Registered (Buffered) Micron Technology MTC78ASF4G72PZ-2G6E1 6400 MT/s [6000 MT/s]" + details = fmt.Sprintf("%s %s %s R%s %s %s %s [%s]", + strings.ReplaceAll(dimm[SizeIdx], " ", ""), + dimm[TypeIdx], + dimm[DetailIdx], + dimm[RankIdx], + dimm[ManufacturerIdx], + partNumber, + strings.ReplaceAll(dimm[SpeedIdx], " ", ""), + strings.ReplaceAll(dimm[ConfiguredSpeedIdx], " ", "")) + } + return +} + +func dimmTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + if tableValues.Fields[DerivedSocketIdx].Values[0] == "" || tableValues.Fields[DerivedChannelIdx].Values[0] == "" || tableValues.Fields[DerivedSlotIdx].Values[0] == "" { + return report.DefaultHTMLTableRendererFunc(tableValues) + } + htmlColors := []string{"lightgreen", "orange", "aqua", "lime", "yellow", "beige", "magenta", "violet", "salmon", "pink"} + var slotColorIndices = make(map[string]int) + // socket -> channel -> slot -> dimm details + var dimms = map[string]map[string]map[string]string{} + for dimmIdx := range tableValues.Fields[DerivedSocketIdx].Values { + if _, ok := dimms[tableValues.Fields[DerivedSocketIdx].Values[dimmIdx]]; !ok { + dimms[tableValues.Fields[DerivedSocketIdx].Values[dimmIdx]] = make(map[string]map[string]string) + } + if _, ok := dimms[tableValues.Fields[DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[DerivedChannelIdx].Values[dimmIdx]]; !ok { + dimms[tableValues.Fields[DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[DerivedChannelIdx].Values[dimmIdx]] = make(map[string]string) + } + dimmValues := []string{} + for _, field := range tableValues.Fields { + dimmValues = append(dimmValues, field.Values[dimmIdx]) + } + dimms[tableValues.Fields[DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[DerivedChannelIdx].Values[dimmIdx]][tableValues.Fields[DerivedSlotIdx].Values[dimmIdx]] = dimmDetails(dimmValues) + } + + var socketTableHeaders = []string{"Socket", ""} + var socketTableValues [][]string + var socketKeys []string + for k := range dimms { + socketKeys = append(socketKeys, k) + } + sort.Strings(socketKeys) + for _, socket := range socketKeys { + socketMap := dimms[socket] + socketTableValues = append(socketTableValues, []string{}) + var channelTableHeaders = []string{"Channel", "Slots"} + var channelTableValues [][]string + var channelKeys []int + for k := range socketMap { + channel, err := strconv.Atoi(k) + if err != nil { + slog.Error("failed to convert channel to int", slog.String("error", err.Error())) + return "" } - } - } - if len(fields[0].Values) == 0 { - return []Field{} - } - return fields -} - -func networkTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - {Name: "IFACE"}, - {Name: "rxpck/s"}, - {Name: "txpck/s"}, - {Name: "rxkB/s"}, - {Name: "txkB/s"}, - } - // don't capture the last four vals: "rxcmp/s","txcmp/s","rxcmt/s","%ifutil" -- obscure more important vals - reStat := regexp.MustCompile(`^(\d+:\d+:\d+)\s*(\w*)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*\d+.\d+\s*\d+.\d+\s*\d+.\d+\s*\d+.\d+$`) - for line := range strings.SplitSeq(outputs[script.NetworkTelemetryScriptName].Stdout, "\n") { - match := reStat.FindStringSubmatch(line) - if len(match) == 0 { - continue - } - for i := range fields { - fields[i].Values = append(fields[i].Values, match[i+1]) - } - } - if len(fields[0].Values) == 0 { - return []Field{} - } - return fields -} - -func memoryTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - {Name: "free"}, - {Name: "avail"}, - {Name: "used"}, - {Name: "buffers"}, - {Name: "cache"}, - {Name: "commit"}, - {Name: "active"}, - {Name: "inactive"}, - {Name: "dirty"}, - } - reStat := regexp.MustCompile(`^(\d+:\d+:\d+)\s*(\d+)\s*(\d+)\s*(\d+)\s*\d+\.\d+\s*(\d+)\s*(\d+)\s*(\d+)\s*\d+\.\d+\s*(\d+)\s*(\d+)\s*(\d+)$`) - for line := range strings.SplitSeq(outputs[script.MemoryTelemetryScriptName].Stdout, "\n") { - match := reStat.FindStringSubmatch(line) - if len(match) == 0 { - continue - } - for i := range fields { - fields[i].Values = append(fields[i].Values, match[i+1]) - } - } - if len(fields[0].Values) == 0 { - return []Field{} - } - return fields -} - -func powerTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - } - packageRows, err := turbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"PkgWatt", "RAMWatt"}) - if err != nil { - slog.Error(err.Error()) - return []Field{} - } - for i := range packageRows { - fields = append(fields, Field{Name: fmt.Sprintf("Package %d", i)}) - fields = append(fields, Field{Name: fmt.Sprintf("DRAM %d", i)}) - } - // for each package - numPackages := len(packageRows) - for i := range packageRows { - // traverse the rows - for _, row := range packageRows[i] { - if i == 0 { - fields[0].Values = append(fields[0].Values, row[0]) // Timestamp + channelKeys = append(channelKeys, channel) + } + sort.Ints(channelKeys) + for _, channel := range channelKeys { + channelMap := socketMap[strconv.Itoa(channel)] + channelTableValues = append(channelTableValues, []string{}) + var slotTableHeaders []string + var slotTableValues [][]string + var slotTableValuesStyles [][]string + var slotKeys []string + for k := range channelMap { + slotKeys = append(slotKeys, k) } - // append the package power and DRAM power to the fields - fields[i*numPackages+1].Values = append(fields[i*numPackages+1].Values, row[1]) // Package power - fields[i*numPackages+2].Values = append(fields[i*numPackages+2].Values, row[2]) // DRAM power - } - } - if len(fields[0].Values) == 0 { - return []Field{} - } - return fields -} - -func temperatureTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - {Name: "Core (Avg.)"}, - } - platformRows, err := turbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"CoreTmp"}) - if err != nil { - slog.Error(err.Error()) - return []Field{} - } - packageRows, err := turbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"PkgTmp"}) - if err != nil { - // not an error, just means no package rows (package temperature) - slog.Warn(err.Error()) - } - // add the package rows to the fields - for i := range packageRows { - fields = append(fields, Field{Name: fmt.Sprintf("Package %d", i)}) - } - // for each platform row - for i := range platformRows { - // append the timestamp to the fields - fields[0].Values = append(fields[0].Values, platformRows[i][0]) // Timestamp - // append the core temperature values to the fields - fields[1].Values = append(fields[1].Values, platformRows[i][1]) // Core temperature - } - // for each package - for i := range packageRows { - // traverse the rows - for _, row := range packageRows[i] { - // append the package temperature to the fields - fields[i+2].Values = append(fields[i+2].Values, row[1]) // Package temperature - } - } - if len(fields[0].Values) == 0 { - return []Field{} - } - return fields -} - -func frequencyTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - {Name: "Core (Avg.)"}, - } - platformRows, err := turbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"Bzy_MHz"}) - if err != nil { - slog.Error(err.Error()) - return []Field{} - } - packageRows, err := turbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"UncMHz"}) - if err != nil { - // not an error, just means no package rows (uncore frequency) - slog.Warn(err.Error()) - } - // add the package rows to the fields - for i := range packageRows { - fields = append(fields, Field{Name: fmt.Sprintf("Uncore Package %d", i)}) - } - // for each platform row - for i := range platformRows { - // append the timestamp to the fields - fields[0].Values = append(fields[0].Values, platformRows[i][0]) // Timestamp - // append the core frequency values to the fields - fields[1].Values = append(fields[1].Values, platformRows[i][1]) // Core frequency - } - // for each package - for i := range packageRows { - // traverse the rows - for _, row := range packageRows[i] { - // append the package frequency to the fields - fields[i+2].Values = append(fields[i+2].Values, row[1]) // Package frequency - } - } - if len(fields[0].Values) == 0 { - return []Field{} - } - return fields -} - -func ipcTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - {Name: "Core (Avg.)"}, - } - platformRows, err := turbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"IPC"}) - if err != nil { - slog.Error(err.Error()) - return []Field{} - } - if len(platformRows) == 0 { - slog.Warn("no platform rows found in turbostat telemetry output") - return []Field{} - } - // for each platform row - for i := range platformRows { - // append the timestamp to the fields - fields[0].Values = append(fields[0].Values, platformRows[i][0]) // Timestamp - // append the core IPC values to the fields - fields[1].Values = append(fields[1].Values, platformRows[i][1]) // Core IPC - } - return fields -} - -func c6TelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Time"}, - {Name: "Package (Avg.)"}, - {Name: "Core (Avg.)"}, - } - platformRows, err := turbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"C6%", "CPU%c6"}) - if err != nil { - slog.Error(err.Error()) - return []Field{} - } - if len(platformRows) == 0 { - slog.Warn("no platform rows found in turbostat telemetry output") - return []Field{} - } - // for each platform row - for i := range platformRows { - // append the timestamp to the fields - fields[0].Values = append(fields[0].Values, platformRows[i][0]) // Timestamp - // append the C6 residency values to the fields - fields[1].Values = append(fields[1].Values, platformRows[i][1]) // C6% - // append the CPU C6 residency values to the fields - fields[2].Values = append(fields[2].Values, platformRows[i][2]) // CPU%c6 - } - return fields -} - -func gaudiTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - // parse the CSV output - csvOutput := outputs[script.GaudiTelemetryScriptName].Stdout - if csvOutput == "" { - return []Field{} - } - r := csv.NewReader(strings.NewReader(csvOutput)) - rows, err := r.ReadAll() - if err != nil { - slog.Error(err.Error()) - return []Field{} - } - if len(rows) < 2 { - slog.Error("gaudi stats output is not in expected format") - return []Field{} - } - // build fields to match CSV output from hl_smi tool - fields := []Field{} - // first row is the header, extract field names - for _, fieldName := range rows[0] { - fields = append(fields, Field{Name: strings.TrimSpace(fieldName)}) - } - // values start in 2nd row - for _, row := range rows[1:] { - for i := range fields { - // reformat the timestamp field to only include the time - if i == 0 { - // parse the timestamp field's value - rowTime, err := time.Parse("Mon Jan 2 15:04:05 MST 2006", row[i]) - if err != nil { - err = fmt.Errorf("unable to parse Gaudi telemetry timestamp: %s", row[i]) - slog.Error(err.Error()) - return []Field{} + sort.Strings(slotKeys) + slotTableValues = append(slotTableValues, []string{}) + slotTableValuesStyles = append(slotTableValuesStyles, []string{}) + for _, slot := range slotKeys { + dimmDetails := channelMap[slot] + slotTableValues[0] = append(slotTableValues[0], htmltemplate.HTMLEscapeString(dimmDetails)) + var slotColor string + if dimmDetails == "No Module Installed" { + slotColor = "background-color:silver" + } else { + if _, ok := slotColorIndices[dimmDetails]; !ok { + slotColorIndices[dimmDetails] = int(math.Min(float64(len(slotColorIndices)), float64(len(htmlColors)-1))) + } + slotColor = "background-color:" + htmlColors[slotColorIndices[dimmDetails]] } - // reformat the timestamp field's value to include time only - timestamp := rowTime.Format("15:04:05") - fields[i].Values = append(fields[i].Values, timestamp) - } else { - fields[i].Values = append(fields[i].Values, strings.TrimSpace(row[i])) + slotTableValuesStyles[0] = append(slotTableValuesStyles[0], slotColor) } + slotTable := report.RenderHTMLTable(slotTableHeaders, slotTableValues, "pure-table pure-table-bordered", slotTableValuesStyles) + // channel number + channelTableValues[len(channelTableValues)-1] = append(channelTableValues[len(channelTableValues)-1], strconv.Itoa(channel)) + // slot table + channelTableValues[len(channelTableValues)-1] = append(channelTableValues[len(channelTableValues)-1], slotTable) + // style + } + channelTable := report.RenderHTMLTable(channelTableHeaders, channelTableValues, "pure-table pure-table-bordered", [][]string{}) + // socket number + socketTableValues[len(socketTableValues)-1] = append(socketTableValues[len(socketTableValues)-1], socket) + // channel table + socketTableValues[len(socketTableValues)-1] = append(socketTableValues[len(socketTableValues)-1], channelTable) + } + return report.RenderHTMLTable(socketTableHeaders, socketTableValues, "pure-table pure-table-bordered", [][]string{}) +} + +func renderFrequencyTable(tableValues table.TableValues) (out string) { + var rows [][]string + headers := []string{""} + valuesStyles := [][]string{} + for i := range tableValues.Fields[0].Values { + headers = append(headers, fmt.Sprintf("%d", i+1)) + } + for _, field := range tableValues.Fields[1:] { + row := append([]string{report.CreateFieldNameWithDescription(field.Name, field.Description)}, field.Values...) + rows = append(rows, row) + valuesStyles = append(valuesStyles, []string{"font-weight:bold"}) + } + out = report.RenderHTMLTable(headers, rows, "pure-table pure-table-striped", valuesStyles) + return +} + +func coreTurboFrequencyTableHTMLRenderer(tableValues table.TableValues) string { + data := [][]report.ScatterPoint{} + datasetNames := []string{} + for _, field := range tableValues.Fields[1:] { + points := []report.ScatterPoint{} + for i, val := range field.Values { + if val == "" { + break + } + freq, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing frequency", slog.String("error", err.Error())) + return "" + } + points = append(points, report.ScatterPoint{X: float64(i + 1), Y: freq}) } - } - return fields -} - -func pduTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - // extract PDU fields and their values from PDU telemetry script output - // output is CSV formatted: - // Timestamp,ActivePower(W) - // 18:32:38,123.45 - // 18:32:40,124.10 - // ... - fields := []Field{} - reader := csv.NewReader(strings.NewReader(outputs[script.PDUTelemetryScriptName].Stdout)) - records, err := reader.ReadAll() - if err != nil { - slog.Error("failed to read PDU telemetry CSV output", slog.String("error", err.Error())) - return []Field{} - } - if len(records) == 0 { - return []Field{} - } - // first row is the header - for _, header := range records[0] { - fields = append(fields, Field{Name: header, Values: []string{}}) - } - // subsequent rows are data - for _, record := range records[1:] { - if len(record) != len(fields) { - slog.Error("unexpected number of fields in PDU telemetry output", slog.Int("expected", len(fields)), slog.Int("got", len(record))) - return []Field{} - } - for i, value := range record { - fields[i].Values = append(fields[i].Values, value) + if len(points) > 0 { + data = append(data, points) + datasetNames = append(datasetNames, field.Name) } } - return fields + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("turboFrequency%d", util.RandUint(10000)), + XaxisText: "Core Count", + YaxisText: "Frequency (GHz)", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "4", + SuggestedMin: "2", + SuggestedMax: "4", + } + out := report.RenderScatterChart(data, datasetNames, chartConfig) + out += "\n" + out += renderFrequencyTable(tableValues) + return out } -func callStackFrequencyTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Native Stacks", Values: []string{nativeFoldedFromOutput(outputs)}}, - {Name: "Java Stacks", Values: []string{javaFoldedFromOutput(outputs)}}, - {Name: "Maximum Render Depth", Values: []string{maxRenderDepthFromOutput(outputs)}}, - } - return fields +func frequencyBenchmarkTableHtmlRenderer(tableValues table.TableValues, targetName string) string { + return coreTurboFrequencyTableHTMLRenderer(tableValues) } -func kernelLockAnalysisTableValues(outputs map[string]script.ScriptOutput) []Field { - fields := []Field{ - {Name: "Hotspot without Callstack", Values: []string{sectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_hotspot_no_children")}}, - {Name: "Hotspot with Callstack", Values: []string{sectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_hotspot_callgraph")}}, - {Name: "Cache2Cache without Callstack", Values: []string{sectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_c2c_no_children")}}, - {Name: "Cache2Cache with CallStack", Values: []string{sectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_c2c_callgraph")}}, - {Name: "Lock Contention", Values: []string{sectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_lock_contention")}}, - {Name: "Perf Package Path", Values: []string{strings.TrimSpace(sectionValueFromOutput(outputs[script.ProfileKernelLockScriptName].Stdout, "perf_package_path"))}}, - } - return fields +func memoryBenchmarkTableHtmlRenderer(tableValues table.TableValues, targetName string) string { + return memoryBenchmarkTableMultiTargetHtmlRenderer([]table.TableValues{tableValues}, []string{targetName}) } -func instructionTelemetryTableValues(outputs map[string]script.ScriptOutput) []Field { - // first two lines are not part of the CSV output, they are the start time and interval - var startTime time.Time - var interval int - lines := strings.Split(outputs[script.InstructionTelemetryScriptName].Stdout, "\n") - if len(lines) < 4 { - slog.Warn("no data found in instruction mix output") - return []Field{} - } - // TIME - line := lines[0] - if !strings.HasPrefix(line, "TIME") { - slog.Error("instruction mix output is not in expected format, missing TIME") - return []Field{} - } else { - val := strings.Split(line, " ")[1] - var err error - startTime, err = time.Parse("15:04:05", val) - if err != nil { - slog.Error(fmt.Sprintf("unable to parse instruction mix start time: %s", val)) - return []Field{} - } - } - // INTERVAL - line = lines[1] - if !strings.HasPrefix(line, "INTERVAL") { - slog.Error("instruction mix output is not in expected format, missing INTERVAL") - return []Field{} - } else { - val := strings.Split(line, " ")[1] - var err error - interval, err = strconv.Atoi(val) - if err != nil { - slog.Error(fmt.Sprintf("unable to convert instruction mix interval to int: %s", val)) - return []Field{} - } - } - // remove blank lines that occur throughout the remaining lines - csvLines := []string{} - for _, line := range lines[2:] { // skip the TIME and INTERVAL lines - if line != "" { - csvLines = append(csvLines, line) - } - } - if len(csvLines) < 2 { - slog.Error("instruction mix CSV output is not in expected format, missing header and data") - return []Field{} - } - // if processwatch was killed, it may print a partial output line at the end - // check if the last line is a partial line by comparing the number of fields in the last line to the number of fields in the header - if len(strings.Split(csvLines[len(csvLines)-1], ",")) != len(strings.Split(csvLines[0], ",")) { - slog.Debug("removing partial line from instruction mix output", "line", csvLines[len(csvLines)-1], "lineNo", len(csvLines)-1) - csvLines = csvLines[:len(csvLines)-1] // remove the last line - } - // CSV - r := csv.NewReader(strings.NewReader(strings.Join(csvLines, "\n"))) - rows, err := r.ReadAll() - if err != nil { - slog.Error(err.Error()) - return []Field{} - } - if len(rows) < 2 { - slog.Error("instruction mix CSV output is not in expected format") - return []Field{} - } - fields := []Field{{Name: "Time"}} - // first row is the header, extract field names, skip the first three fields (interval, pid, name) - if len(rows[0]) < 3 { - slog.Error("not enough headers in instruction mix CSV output", slog.Any("headers", rows[0])) - return []Field{} - } - for _, field := range rows[0][3:] { - fields = append(fields, Field{Name: field}) - } - sample := -1 - // values start in 2nd row, we're only interested in the first row of the sample - for _, row := range rows[1:] { - if len(row) < 2+len(fields) { - continue - } - rowSample, err := strconv.Atoi(row[0]) - if err != nil { - slog.Error(fmt.Sprintf("unable to convert instruction mix sample to int: %s", row[0])) - continue - } - if rowSample != sample { // new sample - sample = rowSample - for i := range fields { - if i == 0 { - fields[i].Values = append(fields[i].Values, startTime.Add(time.Duration(interval+(sample*interval))*time.Second).Format("15:04:05")) - } else { - fields[i].Values = append(fields[i].Values, row[i+2]) - } +func memoryBenchmarkTableMultiTargetHtmlRenderer(allTableValues []table.TableValues, targetNames []string) string { + data := [][]report.ScatterPoint{} + datasetNames := []string{} + for targetIdx, tableValues := range allTableValues { + points := []report.ScatterPoint{} + for valIdx := range tableValues.Fields[0].Values { + latency, err := strconv.ParseFloat(tableValues.Fields[0].Values[valIdx], 64) + if err != nil { + slog.Error("error parsing latency", slog.String("error", err.Error())) + return "" } - } - } - return fields + bandwidth, err := strconv.ParseFloat(tableValues.Fields[1].Values[valIdx], 64) + if err != nil { + slog.Error("error parsing bandwidth", slog.String("error", err.Error())) + return "" + } + points = append(points, report.ScatterPoint{X: bandwidth, Y: latency}) + } + data = append(data, points) + datasetNames = append(datasetNames, targetNames[targetIdx]) + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("latencyBandwidth%d", util.RandUint(10000)), + XaxisText: "Bandwidth (GB/s)", + YaxisText: "Latency (ns)", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "4", + SuggestedMin: "0", + SuggestedMax: "0", + } + return report.RenderScatterChart(data, datasetNames, chartConfig) } diff --git a/internal/table/security.go b/cmd/report/security.go similarity index 85% rename from internal/table/security.go rename to cmd/report/security.go index e5b4c077..c815a0b4 100644 --- a/internal/table/security.go +++ b/cmd/report/security.go @@ -1,20 +1,21 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package table +package report import ( "fmt" "sort" "strings" + "perfspect/internal/common" "perfspect/internal/script" ) func cveInfoFromOutput(outputs map[string]script.ScriptOutput) [][]string { vulns := make(map[string]string) // from spectre-meltdown-checker - for _, pair := range valsArrayFromRegexSubmatch(outputs[script.CveScriptName].Stdout, `(CVE-\d+-\d+): (.+)`) { + for _, pair := range common.ValsArrayFromRegexSubmatch(outputs[script.CveScriptName].Stdout, `(CVE-\d+-\d+): (.+)`) { vulns[pair[0]] = pair[1] } // sort the vulnerabilities by CVE ID diff --git a/cmd/report/system.go b/cmd/report/system.go new file mode 100644 index 00000000..a00d2beb --- /dev/null +++ b/cmd/report/system.go @@ -0,0 +1,141 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +package report + +import ( + "fmt" + "log/slog" + "regexp" + "strings" + "time" + + "perfspect/internal/common" + "perfspect/internal/cpus" + "perfspect/internal/script" + "perfspect/internal/table" +) + +func systemSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + // BASELINE: 1-node, 2x Intel® Xeon® , xx cores, 100W TDP, HT On/Off?, Turbo On/Off?, Total Memory xxx GB (xx slots/ xx GB/ xxxx MHz [run @ xxxx MHz] ), , , , . Test by Intel as of . + template := "1-node, %s, %sx %s, %s cores, %s TDP, %s %s, %s %s, Total Memory %s, BIOS %s, microcode %s, %s, %s, %s, %s. Test by Intel as of %s." + var systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date string + + // system type + systemType = common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + " " + common.ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) + // socket count + socketCount = common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(\d+)$`) + // CPU model + cpuModel = common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model name:\s*(.+?)$`) + // core count + coreCount = common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(\d+)$`) + // TDP + tdp = common.TDPFromOutput(outputs) + if tdp == "" { + tdp = "?" + } + vendor := common.ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) + // hyperthreading + htLabel = "HT" + if vendor == cpus.AMDVendor { + htLabel = "SMT" + } + htOnOff = common.HyperthreadingFromOutput(outputs) + switch htOnOff { + case "Enabled": + htOnOff = "On" + case "Disabled": + htOnOff = "Off" + case "N/A": + htOnOff = "N/A" + default: + htOnOff = "?" + } + // turbo + turboLabel = "Turbo" + if vendor == cpus.AMDVendor { + turboLabel = "Boost" + } + turboOnOff = turboEnabledFromOutput(outputs) + if strings.Contains(strings.ToLower(turboOnOff), "enabled") { + turboOnOff = "On" + } else if strings.Contains(strings.ToLower(turboOnOff), "disabled") { + turboOnOff = "Off" + } else { + turboOnOff = "?" + } + // memory + installedMem = installedMemoryFromOutput(outputs) + // BIOS + biosVersion = common.ValFromRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, `^Version:\s*(.+?)$`) + // microcode + uCodeVersion = common.ValFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`) + // NICs + nics = common.NICSummaryFromOutput(outputs) + // disks + disks = common.DiskSummaryFromOutput(outputs) + // OS + operatingSystem = common.OperatingSystemFromOutput(outputs) + // kernel + kernelVersion = common.ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`) + // date + date = strings.TrimSpace(outputs[script.DateScriptName].Stdout) + // parse date so that we can format it + parsedTime, err := time.Parse("Mon Jan 2 15:04:05 MST 2006", date) // without AM/PM + if err != nil { + parsedTime, err = time.Parse("Mon Jan 2 15:04:05 AM MST 2006", date) // with AM/PM + } + if err == nil { + date = parsedTime.Format("January 2 2006") + } + + // put it all together + return fmt.Sprintf(template, systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date) +} + +func filesystemFieldValuesFromOutput(outputs map[string]script.ScriptOutput) []table.Field { + fieldValues := []table.Field{} + reFindmnt := regexp.MustCompile(`(.*)\s(.*)\s(.*)\s(.*)`) + for i, line := range strings.Split(outputs[script.DfScriptName].Stdout, "\n") { + if line == "" { + continue + } + fields := strings.Fields(line) + // "Mounted On" gets split into two fields, rejoin + if i == 0 && fields[len(fields)-2] == "Mounted" && fields[len(fields)-1] == "on" { + fields[len(fields)-2] = "Mounted on" + fields = fields[:len(fields)-1] + for _, field := range fields { + fieldValues = append(fieldValues, table.Field{Name: field, Values: []string{}}) + } + // add an additional field + fieldValues = append(fieldValues, table.Field{Name: "Mount Options", Values: []string{}}) + continue + } + if len(fields) != len(fieldValues)-1 { + slog.Error("unexpected number of fields in df output", slog.String("line", line)) + return nil + } + for i, field := range fields { + fieldValues[i].Values = append(fieldValues[i].Values, field) + } + // get mount options for the current file system + var options string + for i, line := range strings.Split(outputs[script.FindMntScriptName].Stdout, "\n") { + if i == 0 { + continue + } + match := reFindmnt.FindStringSubmatch(line) + if match != nil { + target := match[1] + source := match[2] + if fields[0] == source && fields[5] == target { + options = match[4] + break + } + } + } + fieldValues[len(fieldValues)-1].Values = append(fieldValues[len(fieldValues)-1].Values, options) + } + return fieldValues +} diff --git a/cmd/telemetry/telemetry.go b/cmd/telemetry/telemetry.go index d42b1ec9..e3146332 100644 --- a/cmd/telemetry/telemetry.go +++ b/cmd/telemetry/telemetry.go @@ -96,17 +96,17 @@ const ( var telemetrySummaryTableName = "Telemetry Summary" var categories = []common.Category{ - {FlagName: flagCPUName, FlagVar: &flagCPU, DefaultValue: false, Help: "monitor cpu utilization", TableNames: []string{table.CPUUtilizationTelemetryTableName, table.UtilizationCategoriesTelemetryTableName}}, - {FlagName: flagIPCName, FlagVar: &flagIPC, DefaultValue: false, Help: "monitor IPC", TableNames: []string{table.IPCTelemetryTableName}}, - {FlagName: flagC6Name, FlagVar: &flagC6, DefaultValue: false, Help: "monitor C6 residency", TableNames: []string{table.C6TelemetryTableName}}, - {FlagName: flagFrequencyName, FlagVar: &flagFrequency, DefaultValue: false, Help: "monitor cpu frequency", TableNames: []string{table.FrequencyTelemetryTableName}}, - {FlagName: flagPowerName, FlagVar: &flagPower, DefaultValue: false, Help: "monitor power", TableNames: []string{table.PowerTelemetryTableName}}, - {FlagName: flagTemperatureName, FlagVar: &flagTemperature, DefaultValue: false, Help: "monitor temperature", TableNames: []string{table.TemperatureTelemetryTableName}}, - {FlagName: flagMemoryName, FlagVar: &flagMemory, DefaultValue: false, Help: "monitor memory", TableNames: []string{table.MemoryTelemetryTableName}}, - {FlagName: flagNetworkName, FlagVar: &flagNetwork, DefaultValue: false, Help: "monitor network", TableNames: []string{table.NetworkTelemetryTableName}}, - {FlagName: flagStorageName, FlagVar: &flagStorage, DefaultValue: false, Help: "monitor storage", TableNames: []string{table.DriveTelemetryTableName}}, - {FlagName: flagIRQRateName, FlagVar: &flagIRQRate, DefaultValue: false, Help: "monitor IRQ rate", TableNames: []string{table.IRQRateTelemetryTableName}}, - {FlagName: flagInstrMixName, FlagVar: &flagInstrMix, DefaultValue: false, Help: "monitor instruction mix", TableNames: []string{table.InstructionTelemetryTableName}}, + {FlagName: flagCPUName, FlagVar: &flagCPU, DefaultValue: false, Help: "monitor cpu utilization", Tables: []table.TableDefinition{tableDefinitions[CPUUtilizationTelemetryTableName], tableDefinitions[UtilizationCategoriesTelemetryTableName]}}, + {FlagName: flagIPCName, FlagVar: &flagIPC, DefaultValue: false, Help: "monitor IPC", Tables: []table.TableDefinition{tableDefinitions[IPCTelemetryTableName]}}, + {FlagName: flagC6Name, FlagVar: &flagC6, DefaultValue: false, Help: "monitor C6 residency", Tables: []table.TableDefinition{tableDefinitions[C6TelemetryTableName]}}, + {FlagName: flagFrequencyName, FlagVar: &flagFrequency, DefaultValue: false, Help: "monitor cpu frequency", Tables: []table.TableDefinition{tableDefinitions[FrequencyTelemetryTableName]}}, + {FlagName: flagPowerName, FlagVar: &flagPower, DefaultValue: false, Help: "monitor power", Tables: []table.TableDefinition{tableDefinitions[PowerTelemetryTableName]}}, + {FlagName: flagTemperatureName, FlagVar: &flagTemperature, DefaultValue: false, Help: "monitor temperature", Tables: []table.TableDefinition{tableDefinitions[TemperatureTelemetryTableName]}}, + {FlagName: flagMemoryName, FlagVar: &flagMemory, DefaultValue: false, Help: "monitor memory", Tables: []table.TableDefinition{tableDefinitions[MemoryTelemetryTableName]}}, + {FlagName: flagNetworkName, FlagVar: &flagNetwork, DefaultValue: false, Help: "monitor network", Tables: []table.TableDefinition{tableDefinitions[NetworkTelemetryTableName]}}, + {FlagName: flagStorageName, FlagVar: &flagStorage, DefaultValue: false, Help: "monitor storage", Tables: []table.TableDefinition{tableDefinitions[DriveTelemetryTableName]}}, + {FlagName: flagIRQRateName, FlagVar: &flagIRQRate, DefaultValue: false, Help: "monitor IRQ rate", Tables: []table.TableDefinition{tableDefinitions[IRQRateTelemetryTableName]}}, + {FlagName: flagInstrMixName, FlagVar: &flagInstrMix, DefaultValue: false, Help: "monitor instruction mix", Tables: []table.TableDefinition{tableDefinitions[InstructionTelemetryTableName]}}, } const ( @@ -270,15 +270,15 @@ func validateFlags(cmd *cobra.Command, args []string) error { } func runCmd(cmd *cobra.Command, args []string) error { - var tableNames []string + var tables []table.TableDefinition // add system summary table if not disabled if !flagNoSystemSummary { - tableNames = append(tableNames, table.BriefSysSummaryTableName) + tables = append(tables, common.TableDefinitions[common.BriefSysSummaryTableName]) } // add category tables for _, cat := range categories { if *cat.FlagVar || flagAll { - tableNames = append(tableNames, cat.TableNames...) + tables = append(tables, cat.Tables...) } } // confirm proper default for instrmix frequency @@ -292,7 +292,7 @@ func runCmd(cmd *cobra.Command, args []string) error { gaudiHlsmiPath := os.Getenv("PERFSPECT_GAUDI_HLSMI_PATH") // must be full path to hlsmi binary if gaudiHlsmiPath != "" { slog.Info("Gaudi telemetry enabled", slog.String("hlsmi_path", gaudiHlsmiPath)) - tableNames = append(tableNames, table.GaudiTelemetryTableName) + tables = append(tables, tableDefinitions[GaudiTelemetryTableName]) } // hidden feature - PDU telemetry, only enabled when four environment variables are set pduHost := os.Getenv("PERFSPECT_PDU_HOST") @@ -301,7 +301,7 @@ func runCmd(cmd *cobra.Command, args []string) error { pduOutlet := os.Getenv("PERFSPECT_PDU_OUTLET") if pduHost != "" && pduUser != "" && pduPassword != "" && pduOutlet != "" { slog.Info("PDU telemetry enabled", slog.String("host", pduHost), slog.String("outlet", pduOutlet)) - tableNames = append(tableNames, table.PDUTelemetryTableName) + tables = append(tables, tableDefinitions[PDUTelemetryTableName]) } // include telemetry summary table if all telemetry options are selected var summaryFunc common.SummaryFunc @@ -327,12 +327,28 @@ func runCmd(cmd *cobra.Command, args []string) error { "PDUPassword": pduPassword, "PDUOutlet": pduOutlet, }, - TableNames: tableNames, + Tables: tables, SummaryFunc: summaryFunc, SummaryTableName: telemetrySummaryTableName, - SummaryBeforeTableName: table.CPUUtilizationTelemetryTableName, + SummaryBeforeTableName: CPUUtilizationTelemetryTableName, InsightsFunc: insightsFunc, } + + report.RegisterHTMLRenderer(CPUUtilizationTelemetryTableName, cpuUtilizationTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(UtilizationCategoriesTelemetryTableName, utilizationCategoriesTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(IPCTelemetryTableName, ipcTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(C6TelemetryTableName, c6TelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(FrequencyTelemetryTableName, averageFrequencyTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(IRQRateTelemetryTableName, irqRateTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(DriveTelemetryTableName, driveTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(NetworkTelemetryTableName, networkTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(MemoryTelemetryTableName, memoryTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(PowerTelemetryTableName, powerTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(TemperatureTelemetryTableName, temperatureTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(InstructionTelemetryTableName, instructionTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(GaudiTelemetryTableName, gaudiTelemetryTableHTMLRenderer) + report.RegisterHTMLRenderer(PDUTelemetryTableName, pduTelemetryTableHTMLRenderer) + return reportingCommand.Run() } @@ -346,17 +362,17 @@ func getTableValues(allTableValues []table.TableValues, tableName string) table. } func summaryFromTableValues(allTableValues []table.TableValues, _ map[string]script.ScriptOutput) table.TableValues { - cpuUtil := getCPUAveragePercentage(getTableValues(allTableValues, table.UtilizationCategoriesTelemetryTableName), "%idle", true) - ipc := getCPUAveragePercentage(getTableValues(allTableValues, table.IPCTelemetryTableName), "Core (Avg.)", false) - c6 := getCPUAveragePercentage(getTableValues(allTableValues, table.C6TelemetryTableName), "Core (Avg.)", false) - avgCoreFreq := getMetricAverage(getTableValues(allTableValues, table.FrequencyTelemetryTableName), []string{"Core (Avg.)"}, "Time") + cpuUtil := getCPUAveragePercentage(getTableValues(allTableValues, UtilizationCategoriesTelemetryTableName), "%idle", true) + ipc := getCPUAveragePercentage(getTableValues(allTableValues, IPCTelemetryTableName), "Core (Avg.)", false) + c6 := getCPUAveragePercentage(getTableValues(allTableValues, C6TelemetryTableName), "Core (Avg.)", false) + avgCoreFreq := getMetricAverage(getTableValues(allTableValues, FrequencyTelemetryTableName), []string{"Core (Avg.)"}, "Time") pkgPower := getPkgAveragePower(allTableValues) pkgTemperature := getPkgAverageTemperature(allTableValues) - driveReads := getMetricAverage(getTableValues(allTableValues, table.DriveTelemetryTableName), []string{"kB_read/s"}, "Device") - driveWrites := getMetricAverage(getTableValues(allTableValues, table.DriveTelemetryTableName), []string{"kB_wrtn/s"}, "Device") - networkReads := getMetricAverage(getTableValues(allTableValues, table.NetworkTelemetryTableName), []string{"rxkB/s"}, "Time") - networkWrites := getMetricAverage(getTableValues(allTableValues, table.NetworkTelemetryTableName), []string{"txkB/s"}, "Time") - memAvail := getMetricAverage(getTableValues(allTableValues, table.MemoryTelemetryTableName), []string{"avail"}, "Time") + driveReads := getMetricAverage(getTableValues(allTableValues, DriveTelemetryTableName), []string{"kB_read/s"}, "Device") + driveWrites := getMetricAverage(getTableValues(allTableValues, DriveTelemetryTableName), []string{"kB_wrtn/s"}, "Device") + networkReads := getMetricAverage(getTableValues(allTableValues, NetworkTelemetryTableName), []string{"rxkB/s"}, "Time") + networkWrites := getMetricAverage(getTableValues(allTableValues, NetworkTelemetryTableName), []string{"txkB/s"}, "Time") + memAvail := getMetricAverage(getTableValues(allTableValues, MemoryTelemetryTableName), []string{"avail"}, "Time") return table.TableValues{ TableDefinition: table.TableDefinition{ Name: telemetrySummaryTableName, @@ -470,7 +486,7 @@ func getCPUAveragePercentage(tableValues table.TableValues, fieldName string, in } func getPkgAverageTemperature(allTableValues []table.TableValues) string { - tableValues := getTableValues(allTableValues, table.TemperatureTelemetryTableName) + tableValues := getTableValues(allTableValues, TemperatureTelemetryTableName) // number of packages can vary, so we need to find the average temperature across all packages if len(tableValues.Fields) == 0 { return "" @@ -503,7 +519,7 @@ func getPkgAverageTemperature(allTableValues []table.TableValues) string { } func getPkgAveragePower(allTableValues []table.TableValues) string { - tableValues := getTableValues(allTableValues, table.PowerTelemetryTableName) + tableValues := getTableValues(allTableValues, PowerTelemetryTableName) // number of packages can vary, so we need to find the average power across all packages if len(tableValues.Fields) == 0 { return "" diff --git a/cmd/telemetry/telemetry_tables.go b/cmd/telemetry/telemetry_tables.go new file mode 100644 index 00000000..f7fcef08 --- /dev/null +++ b/cmd/telemetry/telemetry_tables.go @@ -0,0 +1,1368 @@ +package telemetry + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "encoding/csv" + "fmt" + "log/slog" + "perfspect/internal/common" + "perfspect/internal/cpus" + "perfspect/internal/report" + "perfspect/internal/script" + "perfspect/internal/table" + "perfspect/internal/util" + "regexp" + "slices" + "sort" + "strconv" + "strings" + "time" +) + +// telemetry table names +const ( + CPUUtilizationTelemetryTableName = "CPU Utilization Telemetry" + UtilizationCategoriesTelemetryTableName = "Utilization Categories Telemetry" + IPCTelemetryTableName = "IPC Telemetry" + C6TelemetryTableName = "C6 Telemetry" + FrequencyTelemetryTableName = "Frequency Telemetry" + IRQRateTelemetryTableName = "IRQ Rate Telemetry" + InstructionTelemetryTableName = "Instruction Telemetry" + DriveTelemetryTableName = "Drive Telemetry" + NetworkTelemetryTableName = "Network Telemetry" + MemoryTelemetryTableName = "Memory Telemetry" + PowerTelemetryTableName = "Power Telemetry" + TemperatureTelemetryTableName = "Temperature Telemetry" + GaudiTelemetryTableName = "Gaudi Telemetry" + PDUTelemetryTableName = "PDU Telemetry" +) + +// telemetry table menu labels +const ( + CPUUtilizationTelemetryMenuLabel = "CPU Utilization" + UtilizationCategoriesTelemetryMenuLabel = "Utilization Categories" + IPCTelemetryMenuLabel = "IPC" + C6TelemetryMenuLabel = "C6" + FrequencyTelemetryMenuLabel = "Frequency" + IRQRateTelemetryMenuLabel = "IRQ Rate" + InstructionTelemetryMenuLabel = "Instruction" + DriveTelemetryMenuLabel = "Drive" + NetworkTelemetryMenuLabel = "Network" + MemoryTelemetryMenuLabel = "Memory" + PowerTelemetryMenuLabel = "Power" + TemperatureTelemetryMenuLabel = "Temperature" + GaudiTelemetryMenuLabel = "Gaudi" + PDUTelemetryMenuLabel = "PDU" +) + +var tableDefinitions = map[string]table.TableDefinition{ + // + // telemetry tables + // + CPUUtilizationTelemetryTableName: { + Name: CPUUtilizationTelemetryTableName, + MenuLabel: CPUUtilizationTelemetryMenuLabel, + HasRows: true, + ScriptNames: []string{ + script.MpstatTelemetryScriptName, + }, + FieldsFunc: cpuUtilizationTelemetryTableValues}, + UtilizationCategoriesTelemetryTableName: { + Name: UtilizationCategoriesTelemetryTableName, + MenuLabel: UtilizationCategoriesTelemetryMenuLabel, + HasRows: true, + ScriptNames: []string{ + script.MpstatTelemetryScriptName, + }, + FieldsFunc: utilizationCategoriesTelemetryTableValues}, + IPCTelemetryTableName: { + Name: IPCTelemetryTableName, + MenuLabel: IPCTelemetryMenuLabel, + Architectures: []string{cpus.X86Architecture}, + HasRows: true, + ScriptNames: []string{ + script.TurbostatTelemetryScriptName, + }, + FieldsFunc: ipcTelemetryTableValues}, + C6TelemetryTableName: { + Name: C6TelemetryTableName, + MenuLabel: C6TelemetryMenuLabel, + Architectures: []string{cpus.X86Architecture}, + HasRows: true, + ScriptNames: []string{ + script.TurbostatTelemetryScriptName, + }, + FieldsFunc: c6TelemetryTableValues}, + FrequencyTelemetryTableName: { + Name: FrequencyTelemetryTableName, + MenuLabel: FrequencyTelemetryMenuLabel, + Architectures: []string{cpus.X86Architecture}, + HasRows: true, + ScriptNames: []string{ + script.TurbostatTelemetryScriptName, + }, + FieldsFunc: frequencyTelemetryTableValues}, + IRQRateTelemetryTableName: { + Name: IRQRateTelemetryTableName, + MenuLabel: IRQRateTelemetryMenuLabel, + HasRows: true, + ScriptNames: []string{ + script.MpstatTelemetryScriptName, + }, + FieldsFunc: irqRateTelemetryTableValues}, + DriveTelemetryTableName: { + Name: DriveTelemetryTableName, + MenuLabel: DriveTelemetryMenuLabel, + HasRows: true, + ScriptNames: []string{ + script.IostatTelemetryScriptName, + }, + FieldsFunc: driveTelemetryTableValues}, + NetworkTelemetryTableName: { + Name: NetworkTelemetryTableName, + MenuLabel: NetworkTelemetryMenuLabel, + HasRows: true, + ScriptNames: []string{ + script.NetworkTelemetryScriptName, + }, + FieldsFunc: networkTelemetryTableValues}, + MemoryTelemetryTableName: { + Name: MemoryTelemetryTableName, + MenuLabel: MemoryTelemetryMenuLabel, + HasRows: true, + ScriptNames: []string{ + script.MemoryTelemetryScriptName, + }, + FieldsFunc: memoryTelemetryTableValues}, + PowerTelemetryTableName: { + Name: PowerTelemetryTableName, + MenuLabel: PowerTelemetryMenuLabel, + Architectures: []string{cpus.X86Architecture}, + HasRows: true, + ScriptNames: []string{ + script.TurbostatTelemetryScriptName, + }, + FieldsFunc: powerTelemetryTableValues}, + TemperatureTelemetryTableName: { + Name: TemperatureTelemetryTableName, + MenuLabel: TemperatureTelemetryMenuLabel, + Architectures: []string{cpus.X86Architecture}, + HasRows: true, + ScriptNames: []string{ + script.TurbostatTelemetryScriptName, + }, + FieldsFunc: temperatureTelemetryTableValues}, + InstructionTelemetryTableName: { + Name: InstructionTelemetryTableName, + MenuLabel: InstructionTelemetryMenuLabel, + Architectures: []string{cpus.X86Architecture}, + HasRows: true, + ScriptNames: []string{ + script.InstructionTelemetryScriptName, + }, + FieldsFunc: instructionTelemetryTableValues}, + GaudiTelemetryTableName: { + Name: GaudiTelemetryTableName, + MenuLabel: GaudiTelemetryMenuLabel, + Architectures: []string{cpus.X86Architecture}, + HasRows: true, + ScriptNames: []string{ + script.GaudiTelemetryScriptName, + }, + NoDataFound: "No Gaudi telemetry found. Gaudi devices and the hl-smi tool must be installed on the target system to collect Gaudi stats.", + FieldsFunc: gaudiTelemetryTableValues}, + PDUTelemetryTableName: { + Name: PDUTelemetryTableName, + MenuLabel: PDUTelemetryMenuLabel, + HasRows: true, + ScriptNames: []string{ + script.PDUTelemetryScriptName, + }, + FieldsFunc: pduTelemetryTableValues}, +} + +func cpuUtilizationTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + {Name: "CPU"}, + {Name: "CORE"}, + {Name: "SOCK"}, + {Name: "NODE"}, + {Name: "%usr"}, + {Name: "%nice"}, + {Name: "%sys"}, + {Name: "%iowait"}, + {Name: "%irq"}, + {Name: "%soft"}, + {Name: "%steal"}, + {Name: "%guest"}, + {Name: "%gnice"}, + {Name: "%idle"}, + } + reStat := regexp.MustCompile(`^(\d\d:\d\d:\d\d)\s+(\d+)\s+(\d+)\s+(\d+)\s+(-*\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$`) + for line := range strings.SplitSeq(outputs[script.MpstatTelemetryScriptName].Stdout, "\n") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + for i := range fields { + fields[i].Values = append(fields[i].Values, match[i+1]) + } + } + if len(fields[0].Values) == 0 { + return []table.Field{} + } + return fields +} + +func utilizationCategoriesTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + {Name: "%usr"}, + {Name: "%nice"}, + {Name: "%sys"}, + {Name: "%iowait"}, + {Name: "%irq"}, + {Name: "%soft"}, + {Name: "%steal"}, + {Name: "%guest"}, + {Name: "%gnice"}, + {Name: "%idle"}, + } + reStat := regexp.MustCompile(`^(\d\d:\d\d:\d\d)\s+all\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$`) + for line := range strings.SplitSeq(outputs[script.MpstatTelemetryScriptName].Stdout, "\n") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + for i := range fields { + fields[i].Values = append(fields[i].Values, match[i+1]) + } + } + if len(fields[0].Values) == 0 { + return []table.Field{} + } + return fields +} + +func irqRateTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + {Name: "CPU"}, + {Name: "HI/s"}, + {Name: "TIMER/s"}, + {Name: "NET_TX/s"}, + {Name: "NET_RX/s"}, + {Name: "BLOCK/s"}, + {Name: "IRQ_POLL/s"}, + {Name: "TASKLET/s"}, + {Name: "SCHED/s"}, + {Name: "HRTIMER/s"}, + {Name: "RCU/s"}, + } + reStat := regexp.MustCompile(`^(\d\d:\d\d:\d\d)\s+(\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$`) + for line := range strings.SplitSeq(outputs[script.MpstatTelemetryScriptName].Stdout, "\n") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + for i := range fields { + fields[i].Values = append(fields[i].Values, match[i+1]) + } + } + if len(fields[0].Values) == 0 { + return []table.Field{} + } + return fields +} + +func driveTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + {Name: "Device"}, + {Name: "tps"}, + {Name: "kB_read/s"}, + {Name: "kB_wrtn/s"}, + {Name: "kB_dscd/s"}, + } + // the time is on its own line, so we need to keep track of it + reTime := regexp.MustCompile(`^\d\d\d\d-\d\d-\d\dT(\d\d:\d\d:\d\d)`) + // don't capture the last three vals: "kB_read","kB_wrtn","kB_dscd" -- they aren't the same scale as the others + reStat := regexp.MustCompile(`^(\w+)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*\d+\s*\d+\s*\d+$`) + var time string + for line := range strings.SplitSeq(outputs[script.IostatTelemetryScriptName].Stdout, "\n") { + match := reTime.FindStringSubmatch(line) + if len(match) > 0 { + time = match[1] + continue + } + match = reStat.FindStringSubmatch(line) + if len(match) > 0 { + fields[0].Values = append(fields[0].Values, time) + for i := range fields[1:] { + fields[i+1].Values = append(fields[i+1].Values, match[i+1]) + } + } + } + if len(fields[0].Values) == 0 { + return []table.Field{} + } + return fields +} + +func networkTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + {Name: "IFACE"}, + {Name: "rxpck/s"}, + {Name: "txpck/s"}, + {Name: "rxkB/s"}, + {Name: "txkB/s"}, + } + // don't capture the last four vals: "rxcmp/s","txcmp/s","rxcmt/s","%ifutil" -- obscure more important vals + reStat := regexp.MustCompile(`^(\d+:\d+:\d+)\s*(\w*)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*\d+.\d+\s*\d+.\d+\s*\d+.\d+\s*\d+.\d+$`) + for line := range strings.SplitSeq(outputs[script.NetworkTelemetryScriptName].Stdout, "\n") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + for i := range fields { + fields[i].Values = append(fields[i].Values, match[i+1]) + } + } + if len(fields[0].Values) == 0 { + return []table.Field{} + } + return fields +} + +func memoryTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + {Name: "free"}, + {Name: "avail"}, + {Name: "used"}, + {Name: "buffers"}, + {Name: "cache"}, + {Name: "commit"}, + {Name: "active"}, + {Name: "inactive"}, + {Name: "dirty"}, + } + reStat := regexp.MustCompile(`^(\d+:\d+:\d+)\s*(\d+)\s*(\d+)\s*(\d+)\s*\d+\.\d+\s*(\d+)\s*(\d+)\s*(\d+)\s*\d+\.\d+\s*(\d+)\s*(\d+)\s*(\d+)$`) + for line := range strings.SplitSeq(outputs[script.MemoryTelemetryScriptName].Stdout, "\n") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + for i := range fields { + fields[i].Values = append(fields[i].Values, match[i+1]) + } + } + if len(fields[0].Values) == 0 { + return []table.Field{} + } + return fields +} + +func powerTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + } + packageRows, err := common.TurbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"PkgWatt", "RAMWatt"}) + if err != nil { + slog.Error(err.Error()) + return []table.Field{} + } + for i := range packageRows { + fields = append(fields, table.Field{Name: fmt.Sprintf("Package %d", i)}) + fields = append(fields, table.Field{Name: fmt.Sprintf("DRAM %d", i)}) + } + // for each package + numPackages := len(packageRows) + for i := range packageRows { + // traverse the rows + for _, row := range packageRows[i] { + if i == 0 { + fields[0].Values = append(fields[0].Values, row[0]) // Timestamp + } + // append the package power and DRAM power to the fields + fields[i*numPackages+1].Values = append(fields[i*numPackages+1].Values, row[1]) // Package power + fields[i*numPackages+2].Values = append(fields[i*numPackages+2].Values, row[2]) // DRAM power + } + } + if len(fields[0].Values) == 0 { + return []table.Field{} + } + return fields +} + +func temperatureTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + {Name: "Core (Avg.)"}, + } + platformRows, err := common.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"CoreTmp"}) + if err != nil { + slog.Error(err.Error()) + return []table.Field{} + } + packageRows, err := common.TurbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"PkgTmp"}) + if err != nil { + // not an error, just means no package rows (package temperature) + slog.Warn(err.Error()) + } + // add the package rows to the fields + for i := range packageRows { + fields = append(fields, table.Field{Name: fmt.Sprintf("Package %d", i)}) + } + // for each platform row + for i := range platformRows { + // append the timestamp to the fields + fields[0].Values = append(fields[0].Values, platformRows[i][0]) // Timestamp + // append the core temperature values to the fields + fields[1].Values = append(fields[1].Values, platformRows[i][1]) // Core temperature + } + // for each package + for i := range packageRows { + // traverse the rows + for _, row := range packageRows[i] { + // append the package temperature to the fields + fields[i+2].Values = append(fields[i+2].Values, row[1]) // Package temperature + } + } + if len(fields[0].Values) == 0 { + return []table.Field{} + } + return fields +} + +func frequencyTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + {Name: "Core (Avg.)"}, + } + platformRows, err := common.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"Bzy_MHz"}) + if err != nil { + slog.Error(err.Error()) + return []table.Field{} + } + packageRows, err := common.TurbostatPackageRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"UncMHz"}) + if err != nil { + // not an error, just means no package rows (uncore frequency) + slog.Warn(err.Error()) + } + // add the package rows to the fields + for i := range packageRows { + fields = append(fields, table.Field{Name: fmt.Sprintf("Uncore Package %d", i)}) + } + // for each platform row + for i := range platformRows { + // append the timestamp to the fields + fields[0].Values = append(fields[0].Values, platformRows[i][0]) // Timestamp + // append the core frequency values to the fields + fields[1].Values = append(fields[1].Values, platformRows[i][1]) // Core frequency + } + // for each package + for i := range packageRows { + // traverse the rows + for _, row := range packageRows[i] { + // append the package frequency to the fields + fields[i+2].Values = append(fields[i+2].Values, row[1]) // Package frequency + } + } + if len(fields[0].Values) == 0 { + return []table.Field{} + } + return fields +} + +func ipcTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + {Name: "Core (Avg.)"}, + } + platformRows, err := common.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"IPC"}) + if err != nil { + slog.Error(err.Error()) + return []table.Field{} + } + if len(platformRows) == 0 { + slog.Warn("no platform rows found in turbostat telemetry output") + return []table.Field{} + } + // for each platform row + for i := range platformRows { + // append the timestamp to the fields + fields[0].Values = append(fields[0].Values, platformRows[i][0]) // Timestamp + // append the core IPC values to the fields + fields[1].Values = append(fields[1].Values, platformRows[i][1]) // Core IPC + } + return fields +} + +func c6TelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + fields := []table.Field{ + {Name: "Time"}, + {Name: "Package (Avg.)"}, + {Name: "Core (Avg.)"}, + } + platformRows, err := common.TurbostatPlatformRows(outputs[script.TurbostatTelemetryScriptName].Stdout, []string{"C6%", "CPU%c6"}) + if err != nil { + slog.Error(err.Error()) + return []table.Field{} + } + if len(platformRows) == 0 { + slog.Warn("no platform rows found in turbostat telemetry output") + return []table.Field{} + } + // for each platform row + for i := range platformRows { + // append the timestamp to the fields + fields[0].Values = append(fields[0].Values, platformRows[i][0]) // Timestamp + // append the C6 residency values to the fields + fields[1].Values = append(fields[1].Values, platformRows[i][1]) // C6% + // append the CPU C6 residency values to the fields + fields[2].Values = append(fields[2].Values, platformRows[i][2]) // CPU%c6 + } + return fields +} + +func gaudiTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + // parse the CSV output + csvOutput := outputs[script.GaudiTelemetryScriptName].Stdout + if csvOutput == "" { + return []table.Field{} + } + r := csv.NewReader(strings.NewReader(csvOutput)) + rows, err := r.ReadAll() + if err != nil { + slog.Error(err.Error()) + return []table.Field{} + } + if len(rows) < 2 { + slog.Error("gaudi stats output is not in expected format") + return []table.Field{} + } + // build fields to match CSV output from hl_smi tool + fields := []table.Field{} + // first row is the header, extract field names + for _, fieldName := range rows[0] { + fields = append(fields, table.Field{Name: strings.TrimSpace(fieldName)}) + } + // values start in 2nd row + for _, row := range rows[1:] { + for i := range fields { + // reformat the timestamp field to only include the time + if i == 0 { + // parse the timestamp field's value + rowTime, err := time.Parse("Mon Jan 2 15:04:05 MST 2006", row[i]) + if err != nil { + err = fmt.Errorf("unable to parse Gaudi telemetry timestamp: %s", row[i]) + slog.Error(err.Error()) + return []table.Field{} + } + // reformat the timestamp field's value to include time only + timestamp := rowTime.Format("15:04:05") + fields[i].Values = append(fields[i].Values, timestamp) + } else { + fields[i].Values = append(fields[i].Values, strings.TrimSpace(row[i])) + } + } + } + return fields +} + +func pduTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + // extract PDU fields and their values from PDU telemetry script output + // output is CSV formatted: + // Timestamp,ActivePower(W) + // 18:32:38,123.45 + // 18:32:40,124.10 + // ... + fields := []table.Field{} + reader := csv.NewReader(strings.NewReader(outputs[script.PDUTelemetryScriptName].Stdout)) + records, err := reader.ReadAll() + if err != nil { + slog.Error("failed to read PDU telemetry CSV output", slog.String("error", err.Error())) + return []table.Field{} + } + if len(records) == 0 { + return []table.Field{} + } + // first row is the header + for _, header := range records[0] { + fields = append(fields, table.Field{Name: header, Values: []string{}}) + } + // subsequent rows are data + for _, record := range records[1:] { + if len(record) != len(fields) { + slog.Error("unexpected number of fields in PDU telemetry output", slog.Int("expected", len(fields)), slog.Int("got", len(record))) + return []table.Field{} + } + for i, value := range record { + fields[i].Values = append(fields[i].Values, value) + } + } + return fields +} + +func instructionTelemetryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + // first two lines are not part of the CSV output, they are the start time and interval + var startTime time.Time + var interval int + lines := strings.Split(outputs[script.InstructionTelemetryScriptName].Stdout, "\n") + if len(lines) < 4 { + slog.Warn("no data found in instruction mix output") + return []table.Field{} + } + // TIME + line := lines[0] + if !strings.HasPrefix(line, "TIME") { + slog.Error("instruction mix output is not in expected format, missing TIME") + return []table.Field{} + } else { + val := strings.Split(line, " ")[1] + var err error + startTime, err = time.Parse("15:04:05", val) + if err != nil { + slog.Error(fmt.Sprintf("unable to parse instruction mix start time: %s", val)) + return []table.Field{} + } + } + // INTERVAL + line = lines[1] + if !strings.HasPrefix(line, "INTERVAL") { + slog.Error("instruction mix output is not in expected format, missing INTERVAL") + return []table.Field{} + } else { + val := strings.Split(line, " ")[1] + var err error + interval, err = strconv.Atoi(val) + if err != nil { + slog.Error(fmt.Sprintf("unable to convert instruction mix interval to int: %s", val)) + return []table.Field{} + } + } + // remove blank lines that occur throughout the remaining lines + csvLines := []string{} + for _, line := range lines[2:] { // skip the TIME and INTERVAL lines + if line != "" { + csvLines = append(csvLines, line) + } + } + if len(csvLines) < 2 { + slog.Error("instruction mix CSV output is not in expected format, missing header and data") + return []table.Field{} + } + // if processwatch was killed, it may print a partial output line at the end + // check if the last line is a partial line by comparing the number of fields in the last line to the number of fields in the header + if len(strings.Split(csvLines[len(csvLines)-1], ",")) != len(strings.Split(csvLines[0], ",")) { + slog.Debug("removing partial line from instruction mix output", "line", csvLines[len(csvLines)-1], "lineNo", len(csvLines)-1) + csvLines = csvLines[:len(csvLines)-1] // remove the last line + } + // CSV + r := csv.NewReader(strings.NewReader(strings.Join(csvLines, "\n"))) + rows, err := r.ReadAll() + if err != nil { + slog.Error(err.Error()) + return []table.Field{} + } + if len(rows) < 2 { + slog.Error("instruction mix CSV output is not in expected format") + return []table.Field{} + } + fields := []table.Field{{Name: "Time"}} + // first row is the header, extract field names, skip the first three fields (interval, pid, name) + if len(rows[0]) < 3 { + slog.Error("not enough headers in instruction mix CSV output", slog.Any("headers", rows[0])) + return []table.Field{} + } + for _, field := range rows[0][3:] { + fields = append(fields, table.Field{Name: field}) + } + sample := -1 + // values start in 2nd row, we're only interested in the first row of the sample + for _, row := range rows[1:] { + if len(row) < 2+len(fields) { + continue + } + rowSample, err := strconv.Atoi(row[0]) + if err != nil { + slog.Error(fmt.Sprintf("unable to convert instruction mix sample to int: %s", row[0])) + continue + } + if rowSample != sample { // new sample + sample = rowSample + for i := range fields { + if i == 0 { + fields[i].Values = append(fields[i].Values, startTime.Add(time.Duration(interval+(sample*interval))*time.Second).Format("15:04:05")) + } else { + fields[i].Values = append(fields[i].Values, row[i+2]) + } + } + } + } + return fields +} + +func telemetryTableHTMLRenderer(tableValues table.TableValues, data [][]float64, datasetNames []string, chartConfig report.ChartTemplateStruct, datasetHiddenFlags []bool) string { + tsFieldIdx := 0 + var timestamps []string + for i := range tableValues.Fields[0].Values { + timestamp := tableValues.Fields[tsFieldIdx].Values[i] + if !slices.Contains(timestamps, timestamp) { // could be slow if list is long + timestamps = append(timestamps, timestamp) + } + } + return renderLineChart(timestamps, data, datasetNames, chartConfig, datasetHiddenFlags) +} + +// renderLineChart generates an HTML string for a line chart using the provided data and configuration. +// +// Parameters: +// +// xAxisLabels - Slice of strings representing the labels for the X axis. +// data - 2D slice of float64 values, where each inner slice represents a dataset's data points. +// datasetNames - Slice of strings representing the names of each dataset. +// config - chartTemplateStruct containing chart configuration options. +// datasetHiddenFlags - Slice of booleans indicating whether each dataset should be hidden initially. +// +// Returns: +// +// A string containing the rendered HTML for the line chart. +func renderLineChart(xAxisLabels []string, data [][]float64, datasetNames []string, config report.ChartTemplateStruct, datasetHiddenFlags []bool) string { + allFormattedPoints := []string{} + for dataIdx := range data { + formattedPoints := []string{} + for _, point := range data[dataIdx] { + formattedPoints = append(formattedPoints, fmt.Sprintf("%f", point)) + } + allFormattedPoints = append(allFormattedPoints, strings.Join(formattedPoints, ",")) + } + return report.RenderChart("line", allFormattedPoints, datasetNames, xAxisLabels, config, datasetHiddenFlags) +} + +func cpuUtilizationTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + data := [][]float64{} + datasetNames := []string{} + // collect the busy (100 - idle) values for each CPU + cpuBusyStats := make(map[int][]float64) + idleFieldIdx := len(tableValues.Fields) - 1 + cpuFieldIdx := 1 + for i := range tableValues.Fields[0].Values { + idle, err := strconv.ParseFloat(tableValues.Fields[idleFieldIdx].Values[i], 64) + if err != nil { + continue + } + busy := 100 - idle + cpu, err := strconv.Atoi(tableValues.Fields[cpuFieldIdx].Values[i]) + if err != nil { + continue + } + if _, ok := cpuBusyStats[cpu]; !ok { + cpuBusyStats[cpu] = []float64{} + } + cpuBusyStats[cpu] = append(cpuBusyStats[cpu], busy) + } + // sort map keys by cpu number + var keys []int + for cpu := range cpuBusyStats { + keys = append(keys, cpu) + } + sort.Ints(keys) + // build the data + for _, cpu := range keys { + if len(cpuBusyStats[cpu]) > 0 { + data = append(data, cpuBusyStats[cpu]) + datasetNames = append(datasetNames, fmt.Sprintf("CPU %d", cpu)) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "% Utilization", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "false", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "100", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} + +func utilizationCategoriesTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + data := [][]float64{} + datasetNames := []string{} + for _, field := range tableValues.Fields[1:] { + points := []float64{} + for _, val := range field.Values { + if val == "" { + break + } + util, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing percentage", slog.String("error", err.Error())) + return "" + } + points = append(points, util) + } + if len(points) > 0 { + data = append(data, points) + datasetNames = append(datasetNames, field.Name) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "% Utilization", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "100", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} + +func irqRateTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + data := [][]float64{} + datasetNames := []string{} + for _, field := range tableValues.Fields[2:] { // 1 data set per field, e.g., %usr, %nice, etc., skip Time and CPU fields + datasetNames = append(datasetNames, field.Name) + // sum the values in the field per timestamp, store the sum as a point + timeStamp := tableValues.Fields[0].Values[0] + points := []float64{} + total := 0.0 + for i := range field.Values { + if tableValues.Fields[0].Values[i] != timeStamp { // new timestamp? + points = append(points, total) + total = 0.0 + timeStamp = tableValues.Fields[0].Values[i] + } + val, err := strconv.ParseFloat(field.Values[i], 64) + if err != nil { + slog.Error("error parsing value", slog.String("error", err.Error())) + return "" + } + total += val + } + points = append(points, total) // add the point for the last timestamp + // save the points in the data slice + data = append(data, points) + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "IRQ/s", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "0", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} + +// driveTelemetryTableHTMLRenderer renders charts of drive statistics +// - one scatter chart per drive, showing the drive's utilization over time +// - each drive stat is a separate dataset within the chart +func driveTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + var out string + driveStats := make(map[string][][]string) + for i := range tableValues.Fields[0].Values { + drive := tableValues.Fields[1].Values[i] + if _, ok := driveStats[drive]; !ok { + driveStats[drive] = make([][]string, len(tableValues.Fields)-2) + } + for j := range len(tableValues.Fields) - 2 { + driveStats[drive][j] = append(driveStats[drive][j], tableValues.Fields[j+2].Values[i]) + } + } + var keys []string + for drive := range driveStats { + keys = append(keys, drive) + } + sort.Strings(keys) + for _, drive := range keys { + data := [][]float64{} + datasetNames := []string{} + for i, statVals := range driveStats[drive] { + points := []float64{} + for i, val := range statVals { + if val == "" { + slog.Error("empty stat value", slog.String("drive", drive), slog.Int("index", i)) + return "" + } + util, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing stat", slog.String("error", err.Error())) + return "" + } + points = append(points, util) + } + if len(points) > 0 { + data = append(data, points) + datasetNames = append(datasetNames, tableValues.Fields[i+2].Name) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "", + TitleText: drive, + DisplayTitle: "true", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "0", + } + out += telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) + } + return out +} + +// networkTelemetryTableHTMLRenderer renders charts of network device statistics +// - one scatter chart per network device, showing the device's utilization over time +// - each network stat is a separate dataset within the chart +func networkTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + var out string + nicStats := make(map[string][][]string) + for i := range tableValues.Fields[0].Values { + drive := tableValues.Fields[1].Values[i] + if _, ok := nicStats[drive]; !ok { + nicStats[drive] = make([][]string, len(tableValues.Fields)-2) + } + for j := range len(tableValues.Fields) - 2 { + nicStats[drive][j] = append(nicStats[drive][j], tableValues.Fields[j+2].Values[i]) + } + } + var keys []string + for drive := range nicStats { + keys = append(keys, drive) + } + sort.Strings(keys) + for _, nic := range keys { + data := [][]float64{} + datasetNames := []string{} + for i, statVals := range nicStats[nic] { + points := []float64{} + for i, val := range statVals { + if val == "" { + slog.Error("empty stat value", slog.String("nic", nic), slog.Int("index", i)) + return "" + } + util, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing stat", slog.String("error", err.Error())) + return "" + } + points = append(points, util) + } + if len(points) > 0 { + data = append(data, points) + datasetNames = append(datasetNames, tableValues.Fields[i+2].Name) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "", + TitleText: nic, + DisplayTitle: "true", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "0", + } + out += telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) + } + return out +} + +func memoryTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + data := [][]float64{} + datasetNames := []string{} + for _, field := range tableValues.Fields[1:] { + points := []float64{} + for _, val := range field.Values { + if val == "" { + break + } + stat, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing stat", slog.String("error", err.Error())) + return "" + } + points = append(points, stat) + } + if len(points) > 0 { + data = append(data, points) + datasetNames = append(datasetNames, field.Name) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "kilobytes", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "0", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} + +func averageFrequencyTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + data := [][]float64{} + datasetNames := []string{} + for _, field := range tableValues.Fields[1:] { + points := []float64{} + for _, val := range field.Values { + if val == "" { + break + } + stat, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing stat", slog.String("error", err.Error())) + return "" + } + points = append(points, stat) + } + if len(points) > 0 { + data = append(data, points) + datasetNames = append(datasetNames, field.Name) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "MHz", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "0", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} + +func powerTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + data := [][]float64{} + datasetNames := []string{} + for _, field := range tableValues.Fields[1:] { + points := []float64{} + for _, val := range field.Values { + if val == "" { + break + } + stat, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing stat", slog.String("error", err.Error())) + return "" + } + points = append(points, stat) + } + if len(points) > 0 { + data = append(data, points) + datasetNames = append(datasetNames, field.Name) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "Watts", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "0", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} + +func temperatureTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + data := [][]float64{} + datasetNames := []string{} + for _, field := range tableValues.Fields[1:] { + points := []float64{} + for _, val := range field.Values { + if val == "" { + break + } + stat, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing stat", slog.String("error", err.Error())) + return "" + } + points = append(points, stat) + } + if len(points) > 0 { + data = append(data, points) + datasetNames = append(datasetNames, field.Name) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "Celsius", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "0", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} + +func ipcTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + data := [][]float64{} + datasetNames := []string{} + for _, field := range tableValues.Fields[1:] { + points := []float64{} + for _, val := range field.Values { + if val == "" { + break + } + stat, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing stat", slog.String("error", err.Error())) + return "" + } + points = append(points, stat) + } + if len(points) > 0 { + data = append(data, points) + datasetNames = append(datasetNames, field.Name) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "IPC", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "0", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} + +func c6TelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + data := [][]float64{} + datasetNames := []string{} + for _, field := range tableValues.Fields[1:] { + points := []float64{} + for _, val := range field.Values { + if val == "" { + break + } + stat, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing stat", slog.String("error", err.Error())) + return "" + } + points = append(points, stat) + } + if len(points) > 0 { + data = append(data, points) + datasetNames = append(datasetNames, field.Name) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "% C6 Residency", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "0", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} + +// instructionTelemetryTableHTMLRenderer renders instruction set usage statistics. +// Each category is a separate dataset within the chart. +// Categories with zero total usage are hidden by default. +// Categories are sorted in two tiers: first, all non-zero categories are sorted alphabetically; +// then, all zero-sum categories are sorted alphabetically and placed after the non-zero categories. +func instructionTelemetryTableHTMLRenderer(tableValues table.TableValues, targetname string) string { + // Collect entries with their sums so we can sort per requirements + type instrEntry struct { + name string + points []float64 + sum float64 + } + entries := []instrEntry{} + for _, field := range tableValues.Fields[1:] { // skip timestamp field + points := []float64{} + sum := 0.0 + for _, val := range field.Values { + if val == "" { // end of data for this category + break + } + stat, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing stat", slog.String("error", err.Error())) + return "" + } + points = append(points, stat) + sum += stat + } + if len(points) > 0 { // only include categories with at least one point + entries = append(entries, instrEntry{name: field.Name, points: points, sum: sum}) + } + } + // Partition into non-zero and zero-sum groups + nonZero := []instrEntry{} + zero := []instrEntry{} + for _, e := range entries { + if e.sum > 0 { + nonZero = append(nonZero, e) + } else { + zero = append(zero, e) + } + } + sort.Slice(nonZero, func(i, j int) bool { return nonZero[i].name < nonZero[j].name }) + sort.Slice(zero, func(i, j int) bool { return zero[i].name < zero[j].name }) + ordered := append(nonZero, zero...) + data := make([][]float64, 0, len(ordered)) + datasetNames := make([]string, 0, len(ordered)) + hiddenFlags := make([]bool, 0, len(ordered)) + for _, e := range ordered { + data = append(data, e.points) + datasetNames = append(datasetNames, e.name) + // hide zero-sum categories by default + hiddenFlags = append(hiddenFlags, e.sum == 0) + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "% Samples", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "1", // extra tall due to large number of data sets + SuggestedMin: "0", + SuggestedMax: "0", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, hiddenFlags) +} + +func renderGaudiStatsChart(tableValues table.TableValues, chartStatFieldName string, titleText string, yAxisText string, suggestedMax string) string { + data := [][]float64{} + datasetNames := []string{} + // timestamp is in the first field + // find the module_id field index + moduleIdFieldIdx, err := table.GetFieldIndex("module_id", tableValues) + if err != nil { + slog.Error("no gaudi module_id field found") + return "" + } + // find the chartStatFieldName field index + chartStatFieldIndex, err := table.GetFieldIndex(chartStatFieldName, tableValues) + if err != nil { + slog.Error("no gaudi chartStatFieldName field found") + return "" + } + // group the data points by module_id + moduleStat := make(map[string][]float64) + for i := range tableValues.Fields[0].Values { + moduleId := tableValues.Fields[moduleIdFieldIdx].Values[i] + val, err := strconv.ParseFloat(tableValues.Fields[chartStatFieldIndex].Values[i], 64) + if err != nil { + slog.Error("error parsing utilization", slog.String("error", err.Error())) + return "" + } + if _, ok := moduleStat[moduleId]; !ok { + moduleStat[moduleId] = []float64{} + } + moduleStat[moduleId] = append(moduleStat[moduleId], val) + } + // sort the module ids + var moduleIds []string + for moduleId := range moduleStat { + moduleIds = append(moduleIds, moduleId) + } + sort.Strings(moduleIds) + // build the data + for _, moduleId := range moduleIds { + if len(moduleStat[moduleId]) > 0 { + data = append(data, moduleStat[moduleId]) + datasetNames = append(datasetNames, "module "+moduleId) + } + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: yAxisText, + TitleText: titleText, + DisplayTitle: "true", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: suggestedMax, + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} + +func gaudiTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + out := "" + out += renderGaudiStatsChart(tableValues, "utilization.aip [%]", "Utilization", "% Utilization", "100") + out += renderGaudiStatsChart(tableValues, "memory.free [MiB]", "Memory Free", "Memory (MiB)", "0") + out += renderGaudiStatsChart(tableValues, "memory.used [MiB]", "Memory Used", "Memory (MiB)", "0") + out += renderGaudiStatsChart(tableValues, "power.draw [W]", "Power", "Watts", "0") + out += renderGaudiStatsChart(tableValues, "temperature.aip [C]", "Temperature", "Temperature (C)", "0") + return out +} + +func pduTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { + data := [][]float64{} + for _, field := range tableValues.Fields[1:] { + points := []float64{} + for _, val := range field.Values { + if val == "" { + break + } + stat, err := strconv.ParseFloat(val, 64) + if err != nil { + slog.Error("error parsing stat", slog.String("error", err.Error())) + return "" + } + points = append(points, stat) + } + if len(points) > 0 { + data = append(data, points) + } + } + datasetNames := []string{} + for _, field := range tableValues.Fields[1:] { + datasetNames = append(datasetNames, field.Name) + } + chartConfig := report.ChartTemplateStruct{ + ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), + XaxisText: "Time", + YaxisText: "Watts", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + SuggestedMin: "0", + SuggestedMax: "0", + } + return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) +} diff --git a/internal/table/cache.go b/internal/common/cache.go similarity index 85% rename from internal/table/cache.go rename to internal/common/cache.go index 13671e68..adf5a63f 100644 --- a/internal/table/cache.go +++ b/internal/common/cache.go @@ -1,4 +1,4 @@ -package table +package common // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause @@ -27,7 +27,7 @@ func GetL3MSRMB(outputs map[string]script.ScriptOutput) (instance float64, total err = fmt.Errorf("L3 cache way count is zero") return 0, 0, err } - sockets := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) + sockets := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) if sockets == "" { return 0, 0, fmt.Errorf("failed to parse sockets from lscpu output") } @@ -69,7 +69,7 @@ func GetL3MSRMB(outputs map[string]script.ScriptOutput) (instance float64, total // GetL3LscpuMB returns the L3 cache size in MB as reported by lscpu. func GetL3LscpuMB(outputs map[string]script.ScriptOutput) (instance float64, total float64, err error) { - lscpuCache, err := parseLscpuCacheOutput(outputs[script.LscpuCacheScriptName].Stdout) + lscpuCache, err := ParseLscpuCacheOutput(outputs[script.LscpuCacheScriptName].Stdout) if err != nil { return 0, 0, err } @@ -88,11 +88,11 @@ func GetL3LscpuMB(outputs map[string]script.ScriptOutput) (instance float64, tot return instance, total, nil } -// l3FromOutput attempts to retrieve the L3 cache size in megabytes from the provided +// L3FromOutput attempts to retrieve the L3 cache size in megabytes from the provided // script outputs. It first tries to obtain the value using GetL3MSRMB. If that fails, // it falls back to using lscpu cache output. If both methods fail, it logs the errors and // returns an empty string. On success, it returns the formatted cache size as a string. -func l3FromOutput(outputs map[string]script.ScriptOutput) string { +func L3FromOutput(outputs map[string]script.ScriptOutput) string { l3InstanceMB, l3TotalMB, err := GetL3MSRMB(outputs) if err != nil { slog.Info("Could not get L3 size from MSR, falling back to lscpu", slog.String("error", err.Error())) @@ -102,24 +102,10 @@ func l3FromOutput(outputs map[string]script.ScriptOutput) string { return "" } } - return fmt.Sprintf("%s/%s", formatCacheSizeMB(l3InstanceMB), formatCacheSizeMB(l3TotalMB)) + return fmt.Sprintf("%s/%s", FormatCacheSizeMB(l3InstanceMB), FormatCacheSizeMB(l3TotalMB)) } -// l3InstanceFromOutput retrieves the L3 cache size per instance (per socket on Intel) in megabytes -func l3InstanceFromOutput(outputs map[string]script.ScriptOutput) string { - l3InstanceMB, _, err := GetL3MSRMB(outputs) - if err != nil { - slog.Info("Could not get L3 size from MSR, falling back to lscpu", slog.String("error", err.Error())) - l3InstanceMB, _, err = GetL3LscpuMB(outputs) - if err != nil { - slog.Error("Could not get L3 size from lscpu", slog.String("error", err.Error())) - return "" - } - } - return formatCacheSizeMB(l3InstanceMB) -} - -// l3PerCoreFromOutput calculates the amount of L3 cache (in MiB) available per core +// L3PerCoreFromOutput calculates the amount of L3 cache (in MiB) available per core // based on the provided script outputs. It first checks if the host is virtualized, // in which case it returns an empty string since the calculation is not applicable. // It parses the number of cores per socket and the number of sockets from the lscpu @@ -127,13 +113,13 @@ func l3InstanceFromOutput(outputs map[string]script.ScriptOutput) string { // back to parsing lscpu output if necessary. The result is formatted as a string // with up to three decimal places, followed by " MiB". If any required data cannot // be parsed, it logs an error and returns an empty string. -func l3PerCoreFromOutput(outputs map[string]script.ScriptOutput) string { - virtualization := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Virtualization.*:\s*(.+?)$`) +func L3PerCoreFromOutput(outputs map[string]script.ScriptOutput) string { + virtualization := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Virtualization.*:\s*(.+?)$`) if virtualization == "full" { slog.Info("Can't calculate L3 per Core on virtualized host.") return "" } - coresPerSocket, err := strconv.Atoi(valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket.*:\s*(.+?)$`)) + coresPerSocket, err := strconv.Atoi(ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket.*:\s*(.+?)$`)) if err != nil { slog.Error("failed to parse cores per socket", slog.String("error", err.Error())) return "" @@ -142,7 +128,7 @@ func l3PerCoreFromOutput(outputs map[string]script.ScriptOutput) string { slog.Error("cores per socket is zero") return "" } - sockets, err := strconv.Atoi(valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+?)$`)) + sockets, err := strconv.Atoi(ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+?)$`)) if err != nil { slog.Error("failed to parse sockets from lscpu output", slog.String("error", err.Error())) return "" @@ -161,12 +147,12 @@ func l3PerCoreFromOutput(outputs map[string]script.ScriptOutput) string { return "" } } - return formatCacheSizeMB(l3TotalMB / (float64(coresPerSocket) * float64(sockets))) + return FormatCacheSizeMB(l3TotalMB / (float64(coresPerSocket) * float64(sockets))) } -// formatCacheSizeMB formats a floating-point cache size value (in MB) as a string +// FormatCacheSizeMB formats a floating-point cache size value (in MB) as a string // with the "M" unit suffix. -func formatCacheSizeMB(size float64) string { +func FormatCacheSizeMB(size float64) string { val := strconv.FormatFloat(size, 'f', 3, 64) val = strings.TrimRight(val, "0") // trim trailing zeros val = strings.TrimRight(val, ".") // trim decimal point if trailing @@ -185,14 +171,14 @@ type lscpuCacheEntry struct { CoherencySize string } -// parseLscpuCacheOutput parses the output of `lscpu -C` (text/tabular) +// ParseLscpuCacheOutput parses the output of `lscpu -C` (text/tabular) // Example output: // NAME ONE-SIZE ALL-SIZE WAYS TYPE LEVEL SETS PHY-LINE COHERENCY-SIZE // L1d 48K 8.1M 12 Data 1 64 1 64 // L1i 64K 10.8M 16 Instruction 1 64 1 64 // L2 2M 344M 16 Unified 2 2048 1 64 // L3 336M 672M 16 Unified 3 344064 1 64 -func parseLscpuCacheOutput(LscpuCacheOutput string) (map[string]lscpuCacheEntry, error) { +func ParseLscpuCacheOutput(LscpuCacheOutput string) (map[string]lscpuCacheEntry, error) { trimmed := strings.TrimSpace(LscpuCacheOutput) if trimmed == "" { slog.Warn("lscpu cache output is empty") @@ -258,8 +244,8 @@ func parseLscpuCacheOutput(LscpuCacheOutput string) (map[string]lscpuCacheEntry, return out, nil } -// l1l2CacheSizeFromLscpuCache extracts the data cache size from the provided lscpuCacheEntry. -func l1l2CacheSizeFromLscpuCache(entry lscpuCacheEntry) string { +// L1l2CacheSizeFromLscpuCache extracts the data cache size from the provided lscpuCacheEntry. +func L1l2CacheSizeFromLscpuCache(entry lscpuCacheEntry) string { return entry.OneSize } diff --git a/internal/table/cache_test.go b/internal/common/cache_test.go similarity index 97% rename from internal/table/cache_test.go rename to internal/common/cache_test.go index d64a7268..a446c745 100644 --- a/internal/table/cache_test.go +++ b/internal/common/cache_test.go @@ -1,4 +1,4 @@ -package table +package common // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause @@ -98,7 +98,7 @@ func TestParseLscpuCacheOutput(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result, err := parseLscpuCacheOutput(tt.input) + result, err := ParseLscpuCacheOutput(tt.input) if tt.expectedError { require.Error(t, err) assert.Nil(t, result) diff --git a/internal/common/common.go b/internal/common/common.go index f446596b..e2b8cbbe 100644 --- a/internal/common/common.go +++ b/internal/common/common.go @@ -51,14 +51,14 @@ type FlagGroup struct { type TargetScriptOutputs struct { TargetName string ScriptOutputs map[string]script.ScriptOutput - TableNames []string + Tables []table.TableDefinition } func (tso *TargetScriptOutputs) GetScriptOutputs() map[string]script.ScriptOutput { return tso.ScriptOutputs } -func (tso *TargetScriptOutputs) GetTableNames() []string { - return tso.TableNames +func (tso *TargetScriptOutputs) GetTables() []table.TableDefinition { + return tso.Tables } const ( @@ -68,7 +68,7 @@ const ( type Category struct { FlagName string - TableNames []string + Tables []table.TableDefinition FlagVar *bool DefaultValue bool Help string @@ -91,7 +91,7 @@ type AdhocFunc func(AppContext, map[string]script.ScriptOutput, target.Target, p type ReportingCommand struct { Cmd *cobra.Command ReportNamePost string - TableNames []string + Tables []table.TableDefinition ScriptParams map[string]string SummaryFunc SummaryFunc SummaryTableName string @@ -151,7 +151,7 @@ func (rc *ReportingCommand) Run() error { // get the targets var targetErrs []error var err error - myTargets, targetErrs, err = GetTargets(rc.Cmd, elevatedPrivilegesRequired(rc.TableNames), false, localTempDir) + myTargets, targetErrs, err = GetTargets(rc.Cmd, elevatedPrivilegesRequired(rc.Tables), false, localTempDir) if err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) slog.Error(err.Error()) @@ -196,7 +196,7 @@ func (rc *ReportingCommand) Run() error { myTargets = slices.Delete(myTargets, indicesToRemove[i], indicesToRemove[i]+1) } // collect data from targets - orderedTargetScriptOutputs, err = outputsFromTargets(rc.Cmd, myTargets, rc.TableNames, rc.ScriptParams, multiSpinner.Status, localTempDir) + orderedTargetScriptOutputs, err = outputsFromTargets(rc.Cmd, myTargets, rc.Tables, rc.ScriptParams, multiSpinner.Status, localTempDir) if err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) slog.Error(err.Error()) @@ -327,7 +327,7 @@ func FlagValidationError(cmd *cobra.Command, msg string) error { func (rc *ReportingCommand) createRawReports(appContext AppContext, orderedTargetScriptOutputs []TargetScriptOutputs) ([]string, error) { var reports []string for _, targetScriptOutputs := range orderedTargetScriptOutputs { - reportBytes, err := report.CreateRawReport(rc.TableNames, targetScriptOutputs.ScriptOutputs, targetScriptOutputs.TargetName) + reportBytes, err := report.CreateRawReport(rc.Tables, targetScriptOutputs.ScriptOutputs, targetScriptOutputs.TargetName) if err != nil { err = fmt.Errorf("failed to create raw report: %w", err) return reports, err @@ -365,7 +365,7 @@ func (rc *ReportingCommand) createReports(appContext AppContext, orderedTargetSc allTargetsTableValues := make([][]table.TableValues, 0) for _, targetScriptOutputs := range orderedTargetScriptOutputs { // process the tables, i.e., get field values from script output - allTableValues, err := table.ProcessTables(targetScriptOutputs.TableNames, targetScriptOutputs.ScriptOutputs) + allTableValues, err := table.ProcessTables(targetScriptOutputs.Tables, targetScriptOutputs.ScriptOutputs) if err != nil { err = fmt.Errorf("failed to process collected data: %w", err) return nil, err @@ -408,7 +408,7 @@ func (rc *ReportingCommand) createReports(appContext AppContext, orderedTargetSc }) // create the report(s) for _, format := range formats { - reportBytes, err := report.Create(format, allTableValues, targetScriptOutputs.TargetName) + reportBytes, err := report.Create(format, allTableValues, targetScriptOutputs.TargetName, rc.SummaryTableName) if err != nil { err = fmt.Errorf("failed to create report: %w", err) return nil, err @@ -446,7 +446,7 @@ func (rc *ReportingCommand) createReports(appContext AppContext, orderedTargetSc if !slices.Contains(formats, format) { continue } - reportBytes, err := report.CreateMultiTarget(format, allTargetsTableValues, targetNames, mergedTableNames) + reportBytes, err := report.CreateMultiTarget(format, allTargetsTableValues, targetNames, mergedTableNames, rc.SummaryTableName) if err != nil { err = fmt.Errorf("failed to create multi-target %s report: %w", format, err) return nil, err @@ -478,49 +478,51 @@ func extractTableNamesFromValues(allTargetsTableValues [][]table.TableValues) [] } // outputsFromInput reads the raw file(s) and returns the data in the order of the raw files +// TODO: this won't work post re-factor func outputsFromInput(summaryTableName string) ([]TargetScriptOutputs, error) { - orderedTargetScriptOutputs := []TargetScriptOutputs{} - tableNames := []string{} // use the table names from the raw files - // read the raw file(s) as JSON - rawReports, err := report.ReadRawReports(FlagInput) - if err != nil { - err = fmt.Errorf("failed to read raw file(s): %w", err) - return nil, err - } - for _, rawReport := range rawReports { - for _, tableName := range rawReport.TableNames { // just in case someone tries to use the raw files that were collected with a different set of categories - // filter out tables that we add after processing - if tableName == TableNameInsights || tableName == TableNamePerfspect || tableName == summaryTableName { - continue - } - tableNames = util.UniqueAppend(tableNames, tableName) - } - orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, TargetScriptOutputs{TargetName: rawReport.TargetName, ScriptOutputs: rawReport.ScriptOutputs, TableNames: tableNames}) - } - return orderedTargetScriptOutputs, nil + return nil, fmt.Errorf("outputsFromInput not implemented post refactor") + // orderedTargetScriptOutputs := []TargetScriptOutputs{} + // tables := []table.TableDefinition{} + // // read the raw file(s) as JSON + // rawReports, err := report.ReadRawReports(FlagInput) + // if err != nil { + // err = fmt.Errorf("failed to read raw file(s): %w", err) + // return nil, err + // } + // for _, rawReport := range rawReports { + // for _, tableName := range rawReport.TableNames { // just in case someone tries to use the raw files that were collected with a different set of categories + // // filter out tables that we add after processing + // if tableName == TableNameInsights || tableName == TableNamePerfspect || tableName == summaryTableName { + // continue + // } + // tables = append(tables, table.GetTableByName(tableName)) + // } + // orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, TargetScriptOutputs{TargetName: rawReport.TargetName, ScriptOutputs: rawReport.ScriptOutputs, Tables: tables}) + // } + // return orderedTargetScriptOutputs, nil } // outputsFromTargets runs the scripts on the targets and returns the data in the order of the targets -func outputsFromTargets(cmd *cobra.Command, myTargets []target.Target, tableNames []string, scriptParams map[string]string, statusUpdate progress.MultiSpinnerUpdateFunc, localTempDir string) ([]TargetScriptOutputs, error) { +func outputsFromTargets(cmd *cobra.Command, myTargets []target.Target, tables []table.TableDefinition, scriptParams map[string]string, statusUpdate progress.MultiSpinnerUpdateFunc, localTempDir string) ([]TargetScriptOutputs, error) { orderedTargetScriptOutputs := []TargetScriptOutputs{} channelTargetScriptOutputs := make(chan TargetScriptOutputs) channelError := make(chan error) // create the list of tables and associated scripts for each target - targetTableNames := [][]string{} + targetTables := [][]table.TableDefinition{} targetScriptNames := [][]string{} for targetIdx, target := range myTargets { - targetTableNames = append(targetTableNames, []string{}) + targetTables = append(targetTables, []table.TableDefinition{}) targetScriptNames = append(targetScriptNames, []string{}) - for _, tableName := range tableNames { - if table.IsTableForTarget(tableName, target) { + for _, tbl := range tables { + if table.IsTableForTarget(tbl, target) { // add table to list of tables to collect - targetTableNames[targetIdx] = util.UniqueAppend(targetTableNames[targetIdx], tableName) + targetTables[targetIdx] = append(targetTables[targetIdx], tbl) // add scripts to list of scripts to run - for _, scriptName := range table.GetScriptNamesForTable(tableName) { + for _, scriptName := range tbl.ScriptNames { targetScriptNames[targetIdx] = util.UniqueAppend(targetScriptNames[targetIdx], scriptName) } } else { - slog.Info("table not supported for target", slog.String("table", tableName), slog.String("target", target.GetName())) + slog.Info("table not supported for target", slog.String("table", tbl.Name), slog.String("target", target.GetName())) } } } @@ -549,7 +551,7 @@ func outputsFromTargets(cmd *cobra.Command, myTargets []target.Target, tableName for targetIdx, target := range myTargets { for _, targetScriptOutputs := range allTargetScriptOutputs { if targetScriptOutputs.TargetName == target.GetName() { - targetScriptOutputs.TableNames = targetTableNames[targetIdx] + targetScriptOutputs.Tables = targetTables[targetIdx] orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, targetScriptOutputs) break } @@ -559,10 +561,9 @@ func outputsFromTargets(cmd *cobra.Command, myTargets []target.Target, tableName } // elevatedPrivilegesRequired returns true if any of the scripts needed for the tables require elevated privileges -func elevatedPrivilegesRequired(tableNames []string) bool { - for _, tableName := range tableNames { - // add scripts to list of scripts to run - for _, scriptName := range table.GetScriptNamesForTable(tableName) { +func elevatedPrivilegesRequired(tables []table.TableDefinition) bool { + for _, tbl := range tables { + for _, scriptName := range tbl.ScriptNames { script := script.GetScriptByName(scriptName) if script.Superuser { return true diff --git a/internal/table/frequency.go b/internal/common/frequency.go similarity index 82% rename from internal/table/frequency.go rename to internal/common/frequency.go index fb2b9ce0..5b968731 100644 --- a/internal/table/frequency.go +++ b/internal/common/frequency.go @@ -1,4 +1,4 @@ -package table +package common import ( "fmt" @@ -14,12 +14,12 @@ import ( // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -// baseFrequencyFromOutput gets base core frequency +// BaseFrequencyFromOutput gets base core frequency // // 1st option) /sys/devices/system/cpu/cpu0/cpufreq/base_frequency // 2nd option) from dmidecode "Current Speed" // 3nd option) parse it from the model name -func baseFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { +func BaseFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { cmdout := strings.TrimSpace(outputs[script.BaseFrequencyScriptName].Stdout) if cmdout != "" { freqf, err := strconv.ParseFloat(cmdout, 64) @@ -28,7 +28,7 @@ func baseFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { return fmt.Sprintf("%.1fGHz", freqf) } } - currentSpeedVal := valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "4", `Current Speed:\s(.*)$`) + currentSpeedVal := ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "4", `Current Speed:\s(.*)$`) tokens := strings.Split(currentSpeedVal, " ") if len(tokens) == 2 { num, err := strconv.ParseFloat(tokens[0], 64) @@ -42,7 +42,7 @@ func baseFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { } } // the frequency (if included) is at the end of the model name in lscpu's output - modelName := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name.*:\s*(.+?)$`) + modelName := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name.*:\s*(.+?)$`) tokens = strings.Split(modelName, " ") if len(tokens) > 0 { lastToken := tokens[len(tokens)-1] @@ -91,7 +91,7 @@ func padFrequencies(freqs []int, desiredLength int) ([]int, error) { return freqs, nil } -// getSpecFrequencyBuckets +// GetSpecFrequencyBuckets gets the core frequency buckets from the script output // returns slice of rows // first row is header // each row is a slice of strings @@ -101,7 +101,7 @@ func padFrequencies(freqs []int, desiredLength int) ([]int, error) { // "64-85", "32-43", "3.5", "3.5", "3.3", "3.2", "3.1" // ... // the "cores per die" column is only present for some architectures -func getSpecFrequencyBuckets(outputs map[string]script.ScriptOutput) ([][]string, error) { +func GetSpecFrequencyBuckets(outputs map[string]script.ScriptOutput) ([][]string, error) { arch := UarchFromOutput(outputs) if arch == "" { return nil, fmt.Errorf("uarch is required") @@ -230,14 +230,14 @@ func getSpecFrequencyBuckets(outputs map[string]script.ScriptOutput) ([][]string return specCoreFreqs, nil } -// expandTurboFrequencies expands the turbo frequencies to a list of frequencies +// ExpandTurboFrequencies expands the turbo frequencies to a list of frequencies // input is the output of getSpecFrequencyBuckets, e.g.: // "cores", "cores per die", "sse", "avx2", "avx512", "avx512h", "amx" // "0-41", "0-20", "3.5", "3.5", "3.3", "3.2", "3.1" // "42-63", "21-31", "3.5", "3.5", "3.3", "3.2", "3.1" // ... // output is the expanded list of the frequencies for the requested ISA -func expandTurboFrequencies(specFrequencyBuckets [][]string, isa string) ([]string, error) { +func ExpandTurboFrequencies(specFrequencyBuckets [][]string, isa string) ([]string, error) { if len(specFrequencyBuckets) < 2 || len(specFrequencyBuckets[0]) < 2 { return nil, fmt.Errorf("unable to parse core frequency buckets") } @@ -270,12 +270,12 @@ func expandTurboFrequencies(specFrequencyBuckets [][]string, isa string) ([]stri return freqs, nil } -// maxFrequencyFromOutputs gets max core frequency +// MaxFrequencyFromOutput gets max core frequency // // 1st option) /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq // 2nd option) from MSR/tpmi // 3rd option) from dmidecode "Max Speed" -func maxFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { +func MaxFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { cmdout := strings.TrimSpace(outputs[script.MaximumFrequencyScriptName].Stdout) if cmdout != "" { freqf, err := strconv.ParseFloat(cmdout, 64) @@ -285,18 +285,18 @@ func maxFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { } } // get the max frequency from the MSR/tpmi - specCoreFrequencies, err := getSpecFrequencyBuckets(outputs) + specCoreFrequencies, err := GetSpecFrequencyBuckets(outputs) if err == nil { - sseFreqs := getSSEFreqsFromBuckets(specCoreFrequencies) + sseFreqs := GetSSEFreqsFromBuckets(specCoreFrequencies) if len(sseFreqs) > 0 { // max (single-core) frequency is the first SSE frequency return sseFreqs[0] + "GHz" } } - return valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "4", `Max Speed:\s(.*)`) + return ValFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "4", `Max Speed:\s(.*)`) } -func getSSEFreqsFromBuckets(buckets [][]string) []string { +func GetSSEFreqsFromBuckets(buckets [][]string) []string { if len(buckets) < 2 { return nil } @@ -321,12 +321,12 @@ func getSSEFreqsFromBuckets(buckets [][]string) []string { return sse } -func allCoreMaxFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { - specCoreFrequencies, err := getSpecFrequencyBuckets(outputs) +func AllCoreMaxFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { + specCoreFrequencies, err := GetSpecFrequencyBuckets(outputs) if err != nil { return "" } - sseFreqs := getSSEFreqsFromBuckets(specCoreFrequencies) + sseFreqs := GetSSEFreqsFromBuckets(specCoreFrequencies) if len(sseFreqs) < 1 { return "" } @@ -334,47 +334,7 @@ func allCoreMaxFrequencyFromOutput(outputs map[string]script.ScriptOutput) strin return sseFreqs[len(sseFreqs)-1] + "GHz" } -// sseFrequenciesFromOutput gets the bucketed SSE frequencies from the output -// and returns a compact string representation with consolidated ranges, e.g.: -// "1-40/3.5, 41-60/3.4, 61-86/3.2" -func sseFrequenciesFromOutput(outputs map[string]script.ScriptOutput) string { - specCoreFrequencies, err := getSpecFrequencyBuckets(outputs) - if err != nil { - return "" - } - sseFreqs := getSSEFreqsFromBuckets(specCoreFrequencies) - if len(sseFreqs) < 1 { - return "" - } - - var result []string - i := 1 - for i < len(specCoreFrequencies) { - startIdx := i - currentFreq := sseFreqs[i-1] - - // Find consecutive buckets with the same frequency - for i < len(specCoreFrequencies) && sseFreqs[i-1] == currentFreq { - i++ - } - endIdx := i - 1 - - // Extract start and end core numbers from the ranges - startRange := strings.Split(specCoreFrequencies[startIdx][0], "-")[0] - endRange := strings.Split(specCoreFrequencies[endIdx][0], "-")[1] - - // Format the consolidated range - if startRange == endRange { - result = append(result, fmt.Sprintf("%s/%s", startRange, currentFreq)) - } else { - result = append(result, fmt.Sprintf("%s-%s/%s", startRange, endRange, currentFreq)) - } - } - - return strings.Join(result, ", ") -} - -func uncoreMinMaxDieFrequencyFromOutput(maxFreq bool, computeDie bool, outputs map[string]script.ScriptOutput) string { +func UncoreMinMaxDieFrequencyFromOutput(maxFreq bool, computeDie bool, outputs map[string]script.ScriptOutput) string { // find the first die that matches requrested die type (compute or I/O) re := regexp.MustCompile(`Read bits \d+:\d+ value (\d+) from TPMI ID .* for entry (\d+) in instance (\d+)`) var instance, entry string @@ -431,7 +391,7 @@ func uncoreMinMaxDieFrequencyFromOutput(maxFreq bool, computeDie bool, outputs m return fmt.Sprintf("%.1fGHz", float64(parsed)/10) } -func uncoreMinMaxFrequencyFromOutput(maxFreq bool, outputs map[string]script.ScriptOutput) string { +func UncoreMinMaxFrequencyFromOutput(maxFreq bool, outputs map[string]script.ScriptOutput) string { var parsed int64 var err error var scriptName string @@ -454,10 +414,10 @@ func uncoreMinMaxFrequencyFromOutput(maxFreq bool, outputs map[string]script.Scr return fmt.Sprintf("%.1fGHz", float64(parsed)/10) } -func uncoreMinFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { - return uncoreMinMaxFrequencyFromOutput(false, outputs) +func UncoreMinFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { + return UncoreMinMaxFrequencyFromOutput(false, outputs) } -func uncoreMaxFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { - return uncoreMinMaxFrequencyFromOutput(true, outputs) +func UncoreMaxFrequencyFromOutput(outputs map[string]script.ScriptOutput) string { + return UncoreMinMaxFrequencyFromOutput(true, outputs) } diff --git a/internal/table/frequency_test.go b/internal/common/frequency_test.go similarity index 98% rename from internal/table/frequency_test.go rename to internal/common/frequency_test.go index 275d104b..1483a513 100644 --- a/internal/table/frequency_test.go +++ b/internal/common/frequency_test.go @@ -1,4 +1,4 @@ -package table +package common // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause @@ -196,7 +196,7 @@ func TestExpandTurboFrequencies(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := expandTurboFrequencies(tt.buckets, tt.isa) + got, err := ExpandTurboFrequencies(tt.buckets, tt.isa) if (err != nil) != tt.expectErr { t.Errorf("expandTurboFrequencies() error = %v, expectErr %v", err, tt.expectErr) return diff --git a/internal/table/nic.go b/internal/common/nic.go similarity index 95% rename from internal/table/nic.go rename to internal/common/nic.go index 8e6fa47d..765f3bcc 100644 --- a/internal/table/nic.go +++ b/internal/common/nic.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package table +package common import ( "fmt" @@ -42,7 +42,7 @@ type nicInfo struct { RPSCPUs map[string]string } -func parseNicInfo(scriptOutput string) []nicInfo { +func ParseNicInfo(scriptOutput string) []nicInfo { var nics []nicInfo for nicOutput := range strings.SplitSeq(scriptOutput, "----------------------------------------") { if strings.TrimSpace(nicOutput) == "" { @@ -222,8 +222,8 @@ func extractFunction(busAddr string) int { return funcNum } -func nicIRQMappingsFromOutput(outputs map[string]script.ScriptOutput) [][]string { - nics := parseNicInfo(outputs[script.NicInfoScriptName].Stdout) +func NICIrqMappingsFromOutput(outputs map[string]script.ScriptOutput) [][]string { + nics := ParseNicInfo(outputs[script.NicInfoScriptName].Stdout) if len(nics) == 0 { return nil } @@ -238,8 +238,8 @@ func nicIRQMappingsFromOutput(outputs map[string]script.ScriptOutput) [][]string return nicIRQMappings } -func nicSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - nics := parseNicInfo(outputs[script.NicInfoScriptName].Stdout) +func NICSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + nics := ParseNicInfo(outputs[script.NicInfoScriptName].Stdout) if len(nics) == 0 { return "N/A" } diff --git a/internal/table/nic_test.go b/internal/common/nic_test.go similarity index 88% rename from internal/table/nic_test.go rename to internal/common/nic_test.go index c5e4ea67..2e35a191 100644 --- a/internal/table/nic_test.go +++ b/internal/common/nic_test.go @@ -1,10 +1,9 @@ -package table +package common // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause import ( - "perfspect/internal/script" "testing" ) @@ -183,7 +182,7 @@ tx-usecs: 1 Adaptive RX: off TX: off ----------------------------------------` - nics := parseNicInfo(sampleOutput) + nics := ParseNicInfo(sampleOutput) if len(nics) != 4 { t.Fatalf("Expected 4 NICs, got %d", len(nics)) @@ -215,91 +214,8 @@ Adaptive RX: off TX: off } } -func TestNicTableValuesWithCardPort(t *testing.T) { - // Sample output simulating the scenario from the issue - sampleOutput := `Interface: eth2 -bus-info: 0000:32:00.0 -Vendor: Intel Corporation -Model: Ethernet Controller 10G X550T -Speed: 1000Mb/s -Link detected: yes ----------------------------------------- -Interface: eth3 -bus-info: 0000:32:00.1 -Vendor: Intel Corporation -Model: Ethernet Controller 10G X550T -Speed: Unknown! -Link detected: no ----------------------------------------- -Interface: eth0 -bus-info: 0000:c0:00.0 -Vendor: Intel Corporation -Model: Ethernet Controller E810-C for QSFP -Speed: 100000Mb/s -Link detected: yes ----------------------------------------- -Interface: eth1 -bus-info: 0000:c0:00.1 -Vendor: Intel Corporation -Model: Ethernet Controller E810-C for QSFP -Speed: 100000Mb/s -Link detected: yes -----------------------------------------` - - outputs := map[string]script.ScriptOutput{ - script.NicInfoScriptName: {Stdout: sampleOutput}, - } - - fields := nicTableValues(outputs) - - // Find the "Card / Port" field - var cardPortField Field - found := false - for _, field := range fields { - if field.Name == "Card / Port" { - cardPortField = field - found = true - break - } - } - - if !found { - t.Fatal("Card / Port field not found in NIC table") - } - - // Verify we have 4 entries - if len(cardPortField.Values) != 4 { - t.Fatalf("Expected 4 Card / Port values, got %d", len(cardPortField.Values)) - } - - // Find the Name field to match values - var nameField Field - for _, field := range fields { - if field.Name == "Name" { - nameField = field - break - } - } - - // Verify card/port assignments - expectedCardPort := map[string]string{ - "eth2": "1 / 1", - "eth3": "1 / 2", - "eth0": "2 / 1", - "eth1": "2 / 2", - } - - for i, name := range nameField.Values { - expected := expectedCardPort[name] - actual := cardPortField.Values[i] - if actual != expected { - t.Errorf("NIC %s: expected Card / Port %q, got %q", name, expected, actual) - } - } -} - func TestParseNicInfo(t *testing.T) { - nics := parseNicInfo(nicinfo) + nics := ParseNicInfo(nicinfo) if len(nics) != 3 { t.Errorf("expected 3 NICs, got %d", len(nics)) } @@ -435,7 +351,7 @@ rx-usecs: 50 tx-usecs: 50 ---------------------------------------- ` - nics := parseNicInfo(nicinfoWithVF) + nics := ParseNicInfo(nicinfoWithVF) if len(nics) != 2 { t.Fatalf("expected 2 NICs, got %d", len(nics)) } diff --git a/internal/table/power.go b/internal/common/power.go similarity index 85% rename from internal/table/power.go rename to internal/common/power.go index c1a32071..90da61a1 100644 --- a/internal/table/power.go +++ b/internal/common/power.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package table +package common import ( "encoding/csv" @@ -11,125 +11,10 @@ import ( "strings" "perfspect/internal/script" + "perfspect/internal/table" ) -func elcFieldValuesFromOutput(outputs map[string]script.ScriptOutput) (fieldValues []Field) { - if outputs[script.ElcScriptName].Stdout == "" { - return - } - r := csv.NewReader(strings.NewReader(outputs[script.ElcScriptName].Stdout)) - rows, err := r.ReadAll() - if err != nil { - return - } - if len(rows) < 2 { - return - } - // first row is headers - for fieldNamesIndex, fieldName := range rows[0] { - values := []string{} - // value rows - for _, row := range rows[1:] { - values = append(values, row[fieldNamesIndex]) - } - fieldValues = append(fieldValues, Field{Name: fieldName, Values: values}) - } - - // let's add an interpretation of the values in an additional column - values := []string{} - // value rows - for _, row := range rows[1:] { - var mode string - if row[2] == "IO" { - if row[5] == "0" && row[6] == "0" && row[7] == "0" { - mode = "Latency Optimized" - } else if row[5] == "800" && row[6] == "10" && row[7] == "94" { - mode = "Default" - } else { - mode = "Custom" - } - } else { // COMPUTE - switch row[5] { - case "0": - mode = "Latency Optimized" - case "1200": - mode = "Default" - default: - mode = "Custom" - } - } - values = append(values, mode) - } - fieldValues = append(fieldValues, Field{Name: "Mode", Values: values}) - return -} - -func elcSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - fieldValues := elcFieldValuesFromOutput(outputs) - if len(fieldValues) == 0 { - return "" - } - if len(fieldValues) < 10 { - return "" - } - if len(fieldValues[9].Values) == 0 { - return "" - } - summary := fieldValues[9].Values[0] - for _, value := range fieldValues[9].Values[1:] { - if value != summary { - return "mixed" - } - } - return summary -} - -// epbFromOutput gets EPB value from script outputs -func epbFromOutput(outputs map[string]script.ScriptOutput) string { - if outputs[script.EpbScriptName].Exitcode != 0 || len(outputs[script.EpbScriptName].Stdout) == 0 { - slog.Warn("EPB scripts failed or produced no output") - return "" - } - epb := strings.TrimSpace(outputs[script.EpbScriptName].Stdout) - msr, err := strconv.ParseInt(epb, 16, 0) - if err != nil { - slog.Error("failed to parse EPB value", slog.String("error", err.Error()), slog.String("epb", epb)) - return "" - } - return epbValToLabel(int(msr)) -} - -func epbValToLabel(msr int) string { - var val string - if msr >= 0 && msr <= 3 { - val = "Performance" - } else if msr >= 4 && msr <= 7 { - val = "Balanced Performance" - } else if msr >= 8 && msr <= 11 { - val = "Balanced Energy" - } else if msr >= 12 { - val = "Energy Efficient" - } - return fmt.Sprintf("%s (%d)", val, msr) -} - -func eppValToLabel(msr int) string { - var val string - if msr == 128 { - val = "Normal" - } else if msr < 128 && msr > 64 { - val = "Balanced Performance" - } else if msr <= 64 { - val = "Performance" - } else if msr > 128 && msr < 192 { - val = "Balanced Powersave" - } else { - val = "Powersave" - } - return fmt.Sprintf("%s (%d)", val, msr) -} - -// eppFromOutput gets EPP value from script outputs +// EPPFromOutput gets EPP value from script outputs // IF 0x774[42] is '1' AND 0x774[60] is '0' // THEN // @@ -138,7 +23,7 @@ func eppValToLabel(msr int) string { // ELSE // // get EPP from 0x774 (per core) -func eppFromOutput(outputs map[string]script.ScriptOutput) string { +func EPPFromOutput(outputs map[string]script.ScriptOutput) string { // if we couldn't get the EPP values, return empty string if outputs[script.EppValidScriptName].Exitcode != 0 || len(outputs[script.EppValidScriptName].Stdout) == 0 || outputs[script.EppPackageControlScriptName].Exitcode != 0 || len(outputs[script.EppPackageControlScriptName].Stdout) == 0 || @@ -211,13 +96,43 @@ func eppFromOutput(outputs map[string]script.ScriptOutput) string { } } -type cstateInfo struct { - Name string - Status string +// EPBFromOutput gets EPB value from script outputs +func EPBFromOutput(outputs map[string]script.ScriptOutput) string { + if outputs[script.EpbScriptName].Exitcode != 0 || len(outputs[script.EpbScriptName].Stdout) == 0 { + slog.Warn("EPB scripts failed or produced no output") + return "" + } + epb := strings.TrimSpace(outputs[script.EpbScriptName].Stdout) + msr, err := strconv.ParseInt(epb, 16, 0) + if err != nil { + slog.Error("failed to parse EPB value", slog.String("error", err.Error()), slog.String("epb", epb)) + return "" + } + return epbValToLabel(int(msr)) +} + +func ELCSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + fieldValues := ELCFieldValuesFromOutput(outputs) + if len(fieldValues) == 0 { + return "" + } + if len(fieldValues) < 10 { + return "" + } + if len(fieldValues[9].Values) == 0 { + return "" + } + summary := fieldValues[9].Values[0] + for _, value := range fieldValues[9].Values[1:] { + if value != summary { + return "mixed" + } + } + return summary } -func c6FromOutput(outputs map[string]script.ScriptOutput) string { - cstatesInfo := cstatesFromOutput(outputs) +func C6FromOutput(outputs map[string]script.ScriptOutput) string { + cstatesInfo := CstatesFromOutput(outputs) if cstatesInfo == nil { return "" } @@ -229,8 +144,25 @@ func c6FromOutput(outputs map[string]script.ScriptOutput) string { return "" } -func cstatesFromOutput(outputs map[string]script.ScriptOutput) []cstateInfo { - var cstatesInfo []cstateInfo +func CstatesSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + cstatesInfo := CstatesFromOutput(outputs) + if cstatesInfo == nil { + return "" + } + summaryParts := []string{} + for _, cstateInfo := range cstatesInfo { + summaryParts = append(summaryParts, fmt.Sprintf("%s: %s", cstateInfo.Name, cstateInfo.Status)) + } + return strings.Join(summaryParts, ", ") +} + +type CstateInfo struct { + Name string + Status string +} + +func CstatesFromOutput(outputs map[string]script.ScriptOutput) []CstateInfo { + var cstatesInfo []CstateInfo output := outputs[script.CstatesScriptName].Stdout for line := range strings.SplitSeq(output, "\n") { if line == "" { @@ -240,19 +172,88 @@ func cstatesFromOutput(outputs map[string]script.ScriptOutput) []cstateInfo { if len(parts) != 2 { return nil } - cstatesInfo = append(cstatesInfo, cstateInfo{Name: parts[0], Status: parts[1]}) + cstatesInfo = append(cstatesInfo, CstateInfo{Name: parts[0], Status: parts[1]}) } return cstatesInfo } -func cstatesSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - cstatesInfo := cstatesFromOutput(outputs) - if cstatesInfo == nil { - return "" +func ELCFieldValuesFromOutput(outputs map[string]script.ScriptOutput) (fieldValues []table.Field) { + if outputs[script.ElcScriptName].Stdout == "" { + return } - summaryParts := []string{} - for _, cstateInfo := range cstatesInfo { - summaryParts = append(summaryParts, fmt.Sprintf("%s: %s", cstateInfo.Name, cstateInfo.Status)) + r := csv.NewReader(strings.NewReader(outputs[script.ElcScriptName].Stdout)) + rows, err := r.ReadAll() + if err != nil { + return } - return strings.Join(summaryParts, ", ") + if len(rows) < 2 { + return + } + // first row is headers + for fieldNamesIndex, fieldName := range rows[0] { + values := []string{} + // value rows + for _, row := range rows[1:] { + values = append(values, row[fieldNamesIndex]) + } + fieldValues = append(fieldValues, table.Field{Name: fieldName, Values: values}) + } + + // let's add an interpretation of the values in an additional column + values := []string{} + // value rows + for _, row := range rows[1:] { + var mode string + if row[2] == "IO" { + if row[5] == "0" && row[6] == "0" && row[7] == "0" { + mode = "Latency Optimized" + } else if row[5] == "800" && row[6] == "10" && row[7] == "94" { + mode = "Default" + } else { + mode = "Custom" + } + } else { // COMPUTE + switch row[5] { + case "0": + mode = "Latency Optimized" + case "1200": + mode = "Default" + default: + mode = "Custom" + } + } + values = append(values, mode) + } + fieldValues = append(fieldValues, table.Field{Name: "Mode", Values: values}) + return +} + +func epbValToLabel(msr int) string { + var val string + if msr >= 0 && msr <= 3 { + val = "Performance" + } else if msr >= 4 && msr <= 7 { + val = "Balanced Performance" + } else if msr >= 8 && msr <= 11 { + val = "Balanced Energy" + } else if msr >= 12 { + val = "Energy Efficient" + } + return fmt.Sprintf("%s (%d)", val, msr) +} + +func eppValToLabel(msr int) string { + var val string + if msr == 128 { + val = "Normal" + } else if msr < 128 && msr > 64 { + val = "Balanced Performance" + } else if msr <= 64 { + val = "Performance" + } else if msr > 128 && msr < 192 { + val = "Balanced Powersave" + } else { + val = "Powersave" + } + return fmt.Sprintf("%s (%d)", val, msr) } diff --git a/internal/table/prefetcher.go b/internal/common/prefetcher.go similarity index 90% rename from internal/table/prefetcher.go rename to internal/common/prefetcher.go index c4084cbc..4a633ae4 100644 --- a/internal/table/prefetcher.go +++ b/internal/common/prefetcher.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package table +package common import ( "fmt" @@ -42,7 +42,7 @@ const ( PrefetcherLLCStreamName = "LLC Stream" ) -var prefetcherDefinitions = []PrefetcherDefinition{ +var PrefetcherDefinitions = []PrefetcherDefinition{ { ShortName: PrefetcherL2HWName, Description: "L2 Hardware (MLC Streamer) fetches additional lines of code or data into the L2 cache.", @@ -125,7 +125,7 @@ var prefetcherDefinitions = []PrefetcherDefinition{ // GetPrefetcherDefByName returns the Prefetcher definition by its short name. // It returns error if the Prefetcher is not found. func GetPrefetcherDefByName(name string) (PrefetcherDefinition, error) { - for _, p := range prefetcherDefinitions { + for _, p := range PrefetcherDefinitions { if p.ShortName == name { return p, nil } @@ -135,10 +135,10 @@ func GetPrefetcherDefByName(name string) (PrefetcherDefinition, error) { // GetPrefetcherDefinitions returns all Prefetcher definitions. func GetPrefetcherDefinitions() []PrefetcherDefinition { - return prefetcherDefinitions + return PrefetcherDefinitions } -func isPrefetcherEnabled(msrValue string, bit int) (bool, error) { +func IsPrefetcherEnabled(msrValue string, bit int) (bool, error) { if msrValue == "" { return false, fmt.Errorf("msrValue is empty") } @@ -151,14 +151,14 @@ func isPrefetcherEnabled(msrValue string, bit int) (bool, error) { return bitMask&msrInt == 0, nil } -func prefetchersFromOutput(outputs map[string]script.ScriptOutput) [][]string { +func PrefetchersFromOutput(outputs map[string]script.ScriptOutput) [][]string { out := make([][]string, 0) uarch := UarchFromOutput(outputs) if uarch == "" { // uarch is required return [][]string{} } - for _, pf := range prefetcherDefinitions { + for _, pf := range PrefetcherDefinitions { if slices.Contains(pf.Uarchs, "all") || slices.Contains(pf.Uarchs, uarch[:3]) { var scriptName string switch pf.Msr { @@ -172,12 +172,12 @@ func prefetchersFromOutput(outputs map[string]script.ScriptOutput) [][]string { slog.Error("unknown msr for prefetcher", slog.String("msr", fmt.Sprintf("0x%x", pf.Msr))) continue } - msrVal := valFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) + msrVal := ValFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) if msrVal == "" { continue } var enabledDisabled string - enabled, err := isPrefetcherEnabled(msrVal, pf.Bit) + enabled, err := IsPrefetcherEnabled(msrVal, pf.Bit) if err != nil { slog.Warn("error checking prefetcher enabled status", slog.String("error", err.Error())) continue @@ -193,14 +193,14 @@ func prefetchersFromOutput(outputs map[string]script.ScriptOutput) [][]string { return out } -func prefetchersSummaryFromOutput(outputs map[string]script.ScriptOutput) string { +func PrefetchersSummaryFromOutput(outputs map[string]script.ScriptOutput) string { uarch := UarchFromOutput(outputs) if uarch == "" { // uarch is required return "" } var prefList []string - for _, pf := range prefetcherDefinitions { + for _, pf := range PrefetcherDefinitions { if slices.Contains(pf.Uarchs, "all") || slices.Contains(pf.Uarchs, uarch[:3]) { var scriptName string switch pf.Msr { @@ -214,12 +214,12 @@ func prefetchersSummaryFromOutput(outputs map[string]script.ScriptOutput) string slog.Error("unknown msr for prefetcher", slog.String("msr", fmt.Sprintf("0x%x", pf.Msr))) continue } - msrVal := valFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) + msrVal := ValFromRegexSubmatch(outputs[scriptName].Stdout, `^([0-9a-fA-F]+)`) if msrVal == "" { continue } var enabledDisabled string - enabled, err := isPrefetcherEnabled(msrVal, pf.Bit) + enabled, err := IsPrefetcherEnabled(msrVal, pf.Bit) if err != nil { slog.Warn("error checking prefetcher enabled status", slog.String("error", err.Error())) continue diff --git a/internal/table/storage.go b/internal/common/storage.go similarity index 57% rename from internal/table/storage.go rename to internal/common/storage.go index f006a257..e445a3be 100644 --- a/internal/table/storage.go +++ b/internal/common/storage.go @@ -1,7 +1,7 @@ // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause -package table +package common import ( "fmt" @@ -29,7 +29,7 @@ type diskInfo struct { MaxLinkWidth string } -func diskInfoFromOutput(outputs map[string]script.ScriptOutput) []diskInfo { +func DiskInfoFromOutput(outputs map[string]script.ScriptOutput) []diskInfo { diskInfos := []diskInfo{} for i, line := range strings.Split(outputs[script.DiskInfoScriptName].Stdout, "\n") { // first line is the header @@ -71,55 +71,8 @@ func diskInfoFromOutput(outputs map[string]script.ScriptOutput) []diskInfo { return diskInfos } -func filesystemFieldValuesFromOutput(outputs map[string]script.ScriptOutput) []Field { - fieldValues := []Field{} - reFindmnt := regexp.MustCompile(`(.*)\s(.*)\s(.*)\s(.*)`) - for i, line := range strings.Split(outputs[script.DfScriptName].Stdout, "\n") { - if line == "" { - continue - } - fields := strings.Fields(line) - // "Mounted On" gets split into two fields, rejoin - if i == 0 && fields[len(fields)-2] == "Mounted" && fields[len(fields)-1] == "on" { - fields[len(fields)-2] = "Mounted on" - fields = fields[:len(fields)-1] - for _, field := range fields { - fieldValues = append(fieldValues, Field{Name: field, Values: []string{}}) - } - // add an additional field - fieldValues = append(fieldValues, Field{Name: "Mount Options", Values: []string{}}) - continue - } - if len(fields) != len(fieldValues)-1 { - slog.Error("unexpected number of fields in df output", slog.String("line", line)) - return nil - } - for i, field := range fields { - fieldValues[i].Values = append(fieldValues[i].Values, field) - } - // get mount options for the current file system - var options string - for i, line := range strings.Split(outputs[script.FindMntScriptName].Stdout, "\n") { - if i == 0 { - continue - } - match := reFindmnt.FindStringSubmatch(line) - if match != nil { - target := match[1] - source := match[2] - if fields[0] == source && fields[5] == target { - options = match[4] - break - } - } - } - fieldValues[len(fieldValues)-1].Values = append(fieldValues[len(fieldValues)-1].Values, options) - } - return fieldValues -} - -func diskSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - disks := diskInfoFromOutput(outputs) +func DiskSummaryFromOutput(outputs map[string]script.ScriptOutput) string { + disks := DiskInfoFromOutput(outputs) if len(disks) == 0 { return "N/A" } diff --git a/internal/common/table_defs.go b/internal/common/table_defs.go new file mode 100644 index 00000000..705a2c47 --- /dev/null +++ b/internal/common/table_defs.go @@ -0,0 +1,68 @@ +package common + +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +import ( + "perfspect/internal/script" + "perfspect/internal/table" + "strings" +) + +const BriefSysSummaryTableName = "Brief System Summary" + +var TableDefinitions = map[string]table.TableDefinition{ + BriefSysSummaryTableName: { + Name: BriefSysSummaryTableName, + MenuLabel: BriefSysSummaryTableName, + HasRows: false, + ScriptNames: []string{ + script.HostnameScriptName, + script.DateScriptName, + script.LscpuScriptName, + script.LscpuCacheScriptName, + script.LspciBitsScriptName, + script.LspciDevicesScriptName, + script.MaximumFrequencyScriptName, + script.SpecCoreFrequenciesScriptName, + script.MeminfoScriptName, + script.NicInfoScriptName, + script.DiskInfoScriptName, + script.UnameScriptName, + script.EtcReleaseScriptName, + script.PackagePowerLimitName, + script.EpbScriptName, + script.ScalingDriverScriptName, + script.ScalingGovernorScriptName, + script.CstatesScriptName, + script.ElcScriptName, + }, + FieldsFunc: briefSummaryTableValues}, +} + +func briefSummaryTableValues(outputs map[string]script.ScriptOutput) []table.Field { + return []table.Field{ + {Name: "Host Name", Values: []string{strings.TrimSpace(outputs[script.HostnameScriptName].Stdout)}}, // Hostname + {Name: "Time", Values: []string{strings.TrimSpace(outputs[script.DateScriptName].Stdout)}}, // Date + {Name: "CPU Model", Values: []string{ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^[Mm]odel name:\s*(.+)$`)}}, // Lscpu + {Name: "Microarchitecture", Values: []string{UarchFromOutput(outputs)}}, // Lscpu, LspciBits, LspciDevices + {Name: "TDP", Values: []string{TDPFromOutput(outputs)}}, // PackagePowerLimit + {Name: "Sockets", Values: []string{ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`)}}, // Lscpu + {Name: "Cores per Socket", Values: []string{ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`)}}, // Lscpu + {Name: "Hyperthreading", Values: []string{HyperthreadingFromOutput(outputs)}}, // Lscpu, LspciBits, LspciDevices + {Name: "CPUs", Values: []string{ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(s\):\s*(.+)$`)}}, // Lscpu + {Name: "NUMA Nodes", Values: []string{ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`)}}, // Lscpu + {Name: "Scaling Driver", Values: []string{strings.TrimSpace(outputs[script.ScalingDriverScriptName].Stdout)}}, // ScalingDriver + {Name: "Scaling Governor", Values: []string{strings.TrimSpace(outputs[script.ScalingGovernorScriptName].Stdout)}}, // ScalingGovernor + {Name: "C-states", Values: []string{CstatesSummaryFromOutput(outputs)}}, // Cstates + {Name: "Maximum Frequency", Values: []string{MaxFrequencyFromOutput(outputs)}, Description: "The highest speed a single core can reach with Turbo Boost."}, // MaximumFrequency, SpecCoreFrequencies, + {Name: "All-core Maximum Frequency", Values: []string{AllCoreMaxFrequencyFromOutput(outputs)}, Description: "The highest speed all cores can reach simultaneously with Turbo Boost."}, // Lscpu, LspciBits, LspciDevices, SpecCoreFrequencies + {Name: "Energy Performance Bias", Values: []string{EPBFromOutput(outputs)}}, // EpbSource, EpbBIOS, EpbOS + {Name: "Efficiency Latency Control", Values: []string{ELCSummaryFromOutput(outputs)}}, // Elc + {Name: "MemTotal", Values: []string{ValFromRegexSubmatch(outputs[script.MeminfoScriptName].Stdout, `^MemTotal:\s*(.+?)$`)}}, // Meminfo + {Name: "NIC", Values: []string{NICSummaryFromOutput(outputs)}}, // Lshw, NicInfo + {Name: "Disk", Values: []string{DiskSummaryFromOutput(outputs)}}, // DiskInfo, Hdparm + {Name: "OS", Values: []string{OperatingSystemFromOutput(outputs)}}, // EtcRelease + {Name: "Kernel", Values: []string{ValFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`)}}, // Uname + } +} diff --git a/internal/common/table_helpers.go b/internal/common/table_helpers.go new file mode 100644 index 00000000..b3b36e30 --- /dev/null +++ b/internal/common/table_helpers.go @@ -0,0 +1,284 @@ +// Copyright (C) 2021-2025 Intel Corporation +// SPDX-License-Identifier: BSD-3-Clause + +// table_helpers.go contains base helper functions that are used to extract values from the output of the scripts. + +package common + +import ( + "fmt" + "log/slog" + "perfspect/internal/cpus" + "perfspect/internal/script" + "perfspect/internal/util" + "regexp" + "strconv" + "strings" +) + +// ValFromRegexSubmatch searches for a regex pattern in the given output string and returns the first captured group. +// If no match is found, an empty string is returned. +func ValFromRegexSubmatch(output string, regex string) string { + re := regexp.MustCompile(regex) + for line := range strings.SplitSeq(output, "\n") { + match := re.FindStringSubmatch(strings.TrimSpace(line)) + if len(match) > 1 { + return match[1] + } + } + return "" +} + +// ValsFromRegexSubmatch extracts the captured groups from each line in the output +// that matches the given regular expression. +// It returns a slice of strings containing the captured values. +func ValsFromRegexSubmatch(output string, regex string) []string { + var vals []string + re := regexp.MustCompile(regex) + for line := range strings.SplitSeq(output, "\n") { + match := re.FindStringSubmatch(strings.TrimSpace(line)) + if len(match) > 1 { + vals = append(vals, match[1]) + } + } + return vals +} + +// ValsArrayFromRegexSubmatch returns all matches for all capture groups in regex +func ValsArrayFromRegexSubmatch(output string, regex string) (vals [][]string) { + re := regexp.MustCompile(regex) + for line := range strings.SplitSeq(output, "\n") { + match := re.FindStringSubmatch(line) + if len(match) > 1 { + vals = append(vals, match[1:]) + } + } + return +} + +// ValFromDmiDecodeRegexSubmatch extracts a value from the DMI decode output using a regular expression. +// It takes the DMI decode output, the DMI type, and the regular expression as input parameters. +// It returns the extracted value as a string. +func ValFromDmiDecodeRegexSubmatch(dmiDecodeOutput string, dmiType string, regex string) string { + return ValFromRegexSubmatch(GetDmiDecodeType(dmiDecodeOutput, dmiType), regex) +} + +func ValsArrayFromDmiDecodeRegexSubmatch(dmiDecodeOutput string, dmiType string, regexes ...string) (vals [][]string) { + var res []*regexp.Regexp + for _, r := range regexes { + re := regexp.MustCompile(r) + res = append(res, re) + } + for _, entry := range GetDmiDecodeEntries(dmiDecodeOutput, dmiType) { + row := make([]string, len(res)) + for _, line := range entry { + for i, re := range res { + match := re.FindStringSubmatch(strings.TrimSpace(line)) + if len(match) > 1 { + row[i] = match[1] + } + } + } + vals = append(vals, row) + } + return +} + +// GetDmiDecodeType extracts the lines from the given `dmiDecodeOutput` that belong to the specified `dmiType`. +func GetDmiDecodeType(dmiDecodeOutput string, dmiType string) string { + var lines []string + start := false + for line := range strings.SplitSeq(dmiDecodeOutput, "\n") { + if start && strings.HasPrefix(line, "Handle ") { + start = false + } + if strings.Contains(line, "DMI type "+dmiType+",") { + start = true + } + if start { + lines = append(lines, line) + } + } + return strings.Join(lines, "\n") +} + +// GetDmiDecodeEntries extracts the entries from the given `dmiDecodeOutput` that belong to the specified `dmiType`. +func GetDmiDecodeEntries(dmiDecodeOutput string, dmiType string) (entries [][]string) { + lines := strings.Split(dmiDecodeOutput, "\n") + var entry []string + typeMatch := false + for _, line := range lines { + if strings.HasPrefix(line, "Handle ") { + if strings.Contains(line, "DMI type "+dmiType+",") { + // type match + typeMatch = true + entry = []string{} + } else { + // not a type match + typeMatch = false + } + } + if !typeMatch { + continue + } + if line == "" { + // end of type match entry + entries = append(entries, entry) + } else { + // a line in the entry + entry = append(entry, line) + } + } + return +} + +// GetSectionsFromOutput parses output into sections, where the section name +// is the key in a map and the section content is the value +// sections are delimited by lines of the form ##########
########## +// example: +// ##########
########## +//
+//
+// ##########
########## +//
+// +// returns a map of section name to section content +// if the output is empty or contains no section headers, returns an empty map +// if a section contains no content, the value for that section is an empty string +func GetSectionsFromOutput(output string) map[string]string { + sections := make(map[string]string) + re := regexp.MustCompile(`^########## (.+?) ##########$`) + var sectionName string + for line := range strings.SplitSeq(output, "\n") { + // check if the line is a section header + match := re.FindStringSubmatch(line) + if match != nil { + // if the section name isn't in the map yet, add it + if _, ok := sections[match[1]]; !ok { + sections[match[1]] = "" + } + // save the section name + sectionName = match[1] + continue + } + if sectionName != "" { + sections[sectionName] += line + "\n" + } + } + return sections +} + +// SectionValueFromOutput returns the content of a section from the output +// if the section doesn't exist, returns an empty string +// if the section exists but has no content, returns an empty string +func SectionValueFromOutput(output string, sectionName string) string { + sections := GetSectionsFromOutput(output) + if len(sections) == 0 { + slog.Warn("no sections in output") + return "" + } + if _, ok := sections[sectionName]; !ok { + slog.Warn("section not found in output", slog.String("section", sectionName)) + return "" + } + if sections[sectionName] == "" { + slog.Warn("No content for section:", slog.String("section", sectionName)) + return "" + } + return sections[sectionName] +} + +// UarchFromOutput returns the architecture of the CPU that matches family, model, stepping, +// capid4, and devices information from the output or an empty string, if no match is found. +func UarchFromOutput(outputs map[string]script.ScriptOutput) string { + family := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) + model := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) + stepping := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) + capid4 := ValFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) + devices := ValFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) + cpu, err := cpus.GetCPUExtended(family, model, stepping, capid4, devices) + if err == nil { + return cpu.MicroArchitecture + } + return "" +} + +func HyperthreadingFromOutput(outputs map[string]script.ScriptOutput) string { + family := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) + model := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) + stepping := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) + sockets := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) + coresPerSocket := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`) + cpuCount := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(.*:\s*(.+?)$`) + onlineCpus := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^On-line CPU\(s\) list:\s*(.+)$`) + threadsPerCore := ValFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Thread\(s\) per core:\s*(.+)$`) + + numCPUs, err := strconv.Atoi(cpuCount) // logical CPUs + if err != nil { + slog.Error("error parsing cpus from lscpu") + return "" + } + onlineCpusList, err := util.SelectiveIntRangeToIntList(onlineCpus) // logical online CPUs + numOnlineCpus := len(onlineCpusList) + if err != nil { + slog.Error("error parsing online cpus from lscpu") + numOnlineCpus = 0 // set to 0 to indicate parsing failed, will use numCPUs instead + } + numThreadsPerCore, err := strconv.Atoi(threadsPerCore) // logical threads per core + if err != nil { + slog.Error("error parsing threads per core from lscpu") + numThreadsPerCore = 0 + } + numSockets, err := strconv.Atoi(sockets) + if err != nil { + slog.Error("error parsing sockets from lscpu") + return "" + } + numCoresPerSocket, err := strconv.Atoi(coresPerSocket) // physical cores + if err != nil { + slog.Error("error parsing cores per sockets from lscpu") + return "" + } + cpu, err := cpus.GetCPUExtended(family, model, stepping, "", "") + if err != nil { + return "" + } + if numOnlineCpus > 0 && numOnlineCpus < numCPUs { + // if online CPUs list is available, use it to determine the number of CPUs + // supersedes lscpu output of numCPUs which counts CPUs on the system, not online CPUs + numCPUs = numOnlineCpus + } + if cpu.LogicalThreadCount < 2 { + return "N/A" + } else if numThreadsPerCore == 1 { + // if threads per core is 1, hyperthreading is disabled + return "Disabled" + } else if numThreadsPerCore >= 2 { + // if threads per core is greater than or equal to 2, hyperthreading is enabled + return "Enabled" + } else if numCPUs > numCoresPerSocket*numSockets { + // if the threads per core attribute is not available, we can still check if hyperthreading is enabled + // by checking if the number of logical CPUs is greater than the number of physical cores + return "Enabled" + } else { + return "Disabled" + } +} + +func OperatingSystemFromOutput(outputs map[string]script.ScriptOutput) string { + os := ValFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^PRETTY_NAME=\"(.+?)\"`) + centos := ValFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^(CentOS Linux release .*)`) + if centos != "" { + os = centos + } + return os +} + +func TDPFromOutput(outputs map[string]script.ScriptOutput) string { + msrHex := strings.TrimSpace(outputs[script.PackagePowerLimitName].Stdout) + msr, err := strconv.ParseInt(msrHex, 16, 0) + if err != nil || msr == 0 { + return "" + } + return fmt.Sprint(msr/8) + "W" +} diff --git a/internal/table/cpu_test.go b/internal/common/table_helpers_test.go similarity index 64% rename from internal/table/cpu_test.go rename to internal/common/table_helpers_test.go index f14b7885..bd7237a9 100644 --- a/internal/table/cpu_test.go +++ b/internal/common/table_helpers_test.go @@ -1,13 +1,162 @@ -package table +package common // Copyright (C) 2021-2025 Intel Corporation // SPDX-License-Identifier: BSD-3-Clause import ( "perfspect/internal/script" + "reflect" "testing" ) +func TestGetSectionsFromOutput(t *testing.T) { + tests := []struct { + name string + output string + want map[string]string + }{ + { + name: "Valid sections with content", + output: `########## Section A ########## +Content A1 +Content A2 +########## Section B ########## +Content B1 +Content B2 +########## Section C ########## +Content C1`, + want: map[string]string{ + "Section A": "Content A1\nContent A2\n", + "Section B": "Content B1\nContent B2\n", + "Section C": "Content C1\n", + }, + }, + { + name: "Valid sections with empty content", + output: `########## Section A ########## +########## Section B ########## +########## Section C ##########`, + want: map[string]string{ + "Section A": "", + "Section B": "", + "Section C": "", + }, + }, + { + name: "No sections", + output: "No section headers here", + want: map[string]string{}, + }, + { + name: "Empty output", + output: ``, + want: map[string]string{}, + }, + { + name: "Empty lines in output", + output: "\n\n\n", + want: map[string]string{}, + }, + { + name: "Section with trailing newlines", + output: `########## Section A ########## + +Content A1 + +########## Section B ########## +Content B1`, + want: map[string]string{ + "Section A": "\nContent A1\n\n", + "Section B": "Content B1\n", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := GetSectionsFromOutput(tt.output) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("getSectionsFromOutput() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestSectionValueFromOutput(t *testing.T) { + tests := []struct { + name string + output string + sectionName string + want string + }{ + { + name: "Section A exists with content", + output: `########## Section A ########## +Content A1 +Content A2 +########## Section B ########## +Content B1 +Content B2`, + sectionName: "Section A", + want: "Content A1\nContent A2\n", + }, + { + name: "Section B exists with content", + output: `########## Section A ########## +Content A1 +Content A2 +########## Section B ########## +Content B1 +Content B2`, + sectionName: "Section B", + want: "Content B1\nContent B2\n", + }, + { + name: "Section exists with no content", + output: `########## Section A ########## +########## Section B ########## +Content B1`, + sectionName: "Section A", + want: "", + }, + { + name: "Section does not exist", + output: `########## Section A ########## +Content A1 +########## Section B ########## +Content B1`, + sectionName: "Section C", + want: "", + }, + { + name: "Empty output", + output: "", + sectionName: "Section A", + want: "", + }, + { + name: "Section with trailing newlines", + output: `########## Section A ########## + +Content A1 + +########## Section B ########## +Content B1`, + sectionName: "Section A", + want: "\nContent A1\n\n", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := SectionValueFromOutput(tt.output, tt.sectionName) + if got != tt.want { + t.Errorf("sectionValueFromOutput() = %v, want %v", got, tt.want) + } + }) + } +} + func TestHyperthreadingFromOutput(t *testing.T) { tests := []struct { name string @@ -258,7 +407,7 @@ On-line CPU(s) list: 0-7,32-39 }, } - result := hyperthreadingFromOutput(outputs) + result := HyperthreadingFromOutput(outputs) if result != tt.wantResult { t.Errorf("hyperthreadingFromOutput() = %q, want %q", result, tt.wantResult) } diff --git a/internal/table/turbostat.go b/internal/common/turbostat.go similarity index 93% rename from internal/table/turbostat.go rename to internal/common/turbostat.go index 810b659c..679aebfa 100644 --- a/internal/table/turbostat.go +++ b/internal/common/turbostat.go @@ -1,4 +1,4 @@ -package table +package common import ( "fmt" @@ -86,11 +86,11 @@ func parseTurbostatOutput(output string) ([]map[string]string, error) { return rows, nil } -// turbostatPlatformRows parses the output of the turbostat script and returns the rows +// TurbostatPlatformRows parses the output of the turbostat script and returns the rows // for the platform (summary) only, for the specified field names. // The "platform" rows are those where Package, Die, Core, and CPU are all "-". // The first column is the sample time, and the rest are the values for the specified fields. -func turbostatPlatformRows(turboStatScriptOutput string, fieldNames []string) ([][]string, error) { +func TurbostatPlatformRows(turboStatScriptOutput string, fieldNames []string) ([][]string, error) { if len(fieldNames) == 0 { err := fmt.Errorf("no field names provided") slog.Error(err.Error()) @@ -141,11 +141,10 @@ func isPlatformRow(row map[string]string) bool { return true } -// turbostatPackageRows -// parses the output of the turbostat script and returns the rows +// TurbostatPackageRows parses the output of the turbostat script and returns the rows // for each package, for the specified field names. // The first column is the sample time, and the rest are the values for the specified fields. -func turbostatPackageRows(turboStatScriptOutput string, fieldNames []string) ([][][]string, error) { +func TurbostatPackageRows(turboStatScriptOutput string, fieldNames []string) ([][][]string, error) { if len(fieldNames) == 0 { err := fmt.Errorf("no field names provided") return nil, err @@ -211,8 +210,8 @@ func isPackageRow(row map[string]string) bool { return false } -// maxTotalPackagePowerFromOutput calculates the maximum total package power from the turbostat output. -func maxTotalPackagePowerFromOutput(turbostatOutput string) string { +// MaxTotalPackagePowerFromOutput calculates the maximum total package power from the turbostat output. +func MaxTotalPackagePowerFromOutput(turbostatOutput string) string { rows, err := parseTurbostatOutput(turbostatOutput) if err != nil { slog.Error("unable to parse turbostat output", slog.String("error", err.Error())) @@ -252,8 +251,8 @@ func maxTotalPackagePowerFromOutput(turbostatOutput string) string { return fmt.Sprintf("%.2f Watts", maxPower) } -// minTotalPackagePowerFromOutput calculates the minimum total package power from the turbostat output. -func minTotalPackagePowerFromOutput(turbostatOutput string) string { +// MinTotalPackagePowerFromOutput calculates the minimum total package power from the turbostat output. +func MinTotalPackagePowerFromOutput(turbostatOutput string) string { rows, err := parseTurbostatOutput(turbostatOutput) if err != nil { slog.Error("unable to parse turbostat output", slog.String("error", err.Error())) @@ -287,8 +286,8 @@ func minTotalPackagePowerFromOutput(turbostatOutput string) string { return fmt.Sprintf("%.2f Watts", minPower) } -// maxPackageTemperatureFromOutput calculates the maximum package temperature from the turbostat output. -func maxPackageTemperatureFromOutput(turbostatOutput string) string { +// MaxPackageTemperatureFromOutput calculates the maximum package temperature from the turbostat output. +func MaxPackageTemperatureFromOutput(turbostatOutput string) string { rows, err := parseTurbostatOutput(turbostatOutput) if err != nil { slog.Error("unable to parse turbostat output", slog.String("error", err.Error())) diff --git a/internal/table/turbostat_test.go b/internal/common/turbostat_test.go similarity index 98% rename from internal/table/turbostat_test.go rename to internal/common/turbostat_test.go index eb633e82..540097f1 100644 --- a/internal/table/turbostat_test.go +++ b/internal/common/turbostat_test.go @@ -1,4 +1,4 @@ -package table +package common import ( "reflect" @@ -120,7 +120,7 @@ func TestTurbostatPlatformRows(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := turbostatPlatformRows(tt.turbostatOutput, tt.fieldNames) + got, err := TurbostatPlatformRows(tt.turbostatOutput, tt.fieldNames) if (err != nil) != tt.expectErr { t.Errorf("turbostatSummaryRows() error = %v, expectErr %v", err, tt.expectErr) return @@ -243,7 +243,7 @@ Package Core CPU PkgWatt for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := maxTotalPackagePowerFromOutput(tt.turbostatOutput) + got := MaxTotalPackagePowerFromOutput(tt.turbostatOutput) if got != tt.want { t.Errorf("maxTotalPackagePowerFromOutput() = %q, want %q", got, tt.want) } @@ -335,7 +335,7 @@ Package Core CPU PkgWatt for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := minTotalPackagePowerFromOutput(tt.turbostatOutput) + got := MinTotalPackagePowerFromOutput(tt.turbostatOutput) if got != tt.want { t.Errorf("minTotalPackagePowerFromOutput() = %q, want %q", got, tt.want) } @@ -426,7 +426,7 @@ Package Core CPU PkgTmp for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := maxPackageTemperatureFromOutput(tt.turbostatOutput) + got := MaxPackageTemperatureFromOutput(tt.turbostatOutput) if got != tt.want { t.Errorf("maxPackageTemperatureFromOutput() = %q, want %q", got, tt.want) } @@ -564,7 +564,7 @@ Package Core CPU Avg_MHz Busy% for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := turbostatPackageRows(tt.turbostatOutput, tt.fieldNames) + got, err := TurbostatPackageRows(tt.turbostatOutput, tt.fieldNames) if (err != nil) != tt.wantErr { t.Errorf("turbostatPackageRows() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/internal/report/render_excel.go b/internal/report/render_excel.go index c3edf09e..1d72665f 100644 --- a/internal/report/render_excel.go +++ b/internal/report/render_excel.go @@ -52,7 +52,7 @@ func renderXlsxTable(tableValues table.TableValues, f *excelize.File, sheetName _ = f.SetCellStyle(sheetName, cellName(col, *row), cellName(col, *row), tableNameStyle) *row++ if len(tableValues.Fields) == 0 || len(tableValues.Fields[0].Values) == 0 { - msg := noDataFound + msg := NoDataFound if tableValues.NoDataFound != "" { msg = tableValues.NoDataFound } @@ -128,7 +128,7 @@ func renderXlsxTableMultiTarget(targetTableValues []table.TableValues, targetNam // if no data found, print a message and skip to the next target if len(targetTableValues[targetIdx].Fields) == 0 || len(targetTableValues[targetIdx].Fields[0].Values) == 0 { - msg := noDataFound + msg := NoDataFound if targetTableValues[targetIdx].NoDataFound != "" { msg = targetTableValues[targetIdx].NoDataFound } @@ -223,7 +223,7 @@ const ( XlsxBriefSheetName = "Brief" ) -func createXlsxReport(allTableValues []table.TableValues) (out []byte, err error) { +func createXlsxReport(allTableValues []table.TableValues, systemSummaryTableName string) (out []byte, err error) { f := excelize.NewFile() sheetName := XlsxPrimarySheetName _ = f.SetSheetName("Sheet1", sheetName) @@ -231,7 +231,7 @@ func createXlsxReport(allTableValues []table.TableValues) (out []byte, err error _ = f.SetColWidth(sheetName, "B", "L", 25) row := 1 for _, tableValues := range allTableValues { - if tableValues.Name == table.SystemSummaryTableName { + if tableValues.Name == systemSummaryTableName { row := 1 sheetName := XlsxBriefSheetName _, _ = f.NewSheet(sheetName) @@ -252,7 +252,7 @@ func createXlsxReport(allTableValues []table.TableValues) (out []byte, err error return } -func createXlsxReportMultiTarget(allTargetsTableValues [][]table.TableValues, targetNames []string, allTableNames []string) (out []byte, err error) { +func createXlsxReportMultiTarget(allTargetsTableValues [][]table.TableValues, targetNames []string, allTableNames []string, systemSummaryTableName string) (out []byte, err error) { f := excelize.NewFile() sheetName := XlsxPrimarySheetName _ = f.SetSheetName("Sheet1", sheetName) @@ -274,7 +274,7 @@ func createXlsxReportMultiTarget(allTargetsTableValues [][]table.TableValues, ta tableValues = append(tableValues, targetTableValues[tableIndex]) } // render the table, if system summary table put it in a separate sheet - if tableName == table.SystemSummaryTableName { + if tableName == systemSummaryTableName { summaryRow := 1 sheetName := XlsxBriefSheetName _, _ = f.NewSheet(sheetName) diff --git a/internal/report/render_html.go b/internal/report/render_html.go index 32da19fc..27bd5971 100644 --- a/internal/report/render_html.go +++ b/internal/report/render_html.go @@ -9,42 +9,15 @@ import ( "html" htmltemplate "html/template" "log/slog" - "math" "perfspect/internal/table" - "perfspect/internal/util" - "slices" - "sort" - "strconv" "strings" texttemplate "text/template" // nosemgrep ) // Package-level maps for custom HTML renderers -var customHTMLRenderers = map[string]table.HTMLTableRenderer{ - table.DIMMTableName: dimmTableHTMLRenderer, - table.FrequencyBenchmarkTableName: frequencyBenchmarkTableHtmlRenderer, - table.MemoryBenchmarkTableName: memoryBenchmarkTableHtmlRenderer, - table.CPUUtilizationTelemetryTableName: cpuUtilizationTelemetryTableHTMLRenderer, - table.UtilizationCategoriesTelemetryTableName: utilizationCategoriesTelemetryTableHTMLRenderer, - table.IPCTelemetryTableName: ipcTelemetryTableHTMLRenderer, - table.C6TelemetryTableName: c6TelemetryTableHTMLRenderer, - table.FrequencyTelemetryTableName: averageFrequencyTelemetryTableHTMLRenderer, - table.IRQRateTelemetryTableName: irqRateTelemetryTableHTMLRenderer, - table.DriveTelemetryTableName: driveTelemetryTableHTMLRenderer, - table.NetworkTelemetryTableName: networkTelemetryTableHTMLRenderer, - table.MemoryTelemetryTableName: memoryTelemetryTableHTMLRenderer, - table.PowerTelemetryTableName: powerTelemetryTableHTMLRenderer, - table.TemperatureTelemetryTableName: temperatureTelemetryTableHTMLRenderer, - table.InstructionTelemetryTableName: instructionTelemetryTableHTMLRenderer, - table.GaudiTelemetryTableName: gaudiTelemetryTableHTMLRenderer, - table.PDUTelemetryTableName: pduTelemetryTableHTMLRenderer, - table.CallStackFrequencyTableName: callStackFrequencyTableHTMLRenderer, - table.KernelLockAnalysisTableName: kernelLockAnalysisHTMLRenderer, -} +var customHTMLRenderers = map[string]table.HTMLTableRenderer{} -var customHTMLMultiTargetRenderers = map[string]table.HTMLMultiTargetTableRenderer{ - table.MemoryBenchmarkTableName: memoryBenchmarkTableMultiTargetHtmlRenderer, -} +var customHTMLMultiTargetRenderers = map[string]table.HTMLMultiTargetTableRenderer{} // getCustomHTMLRenderer returns the custom renderer for a table, or nil if no custom renderer exists func getCustomHTMLRenderer(tableName string) table.HTMLTableRenderer { @@ -303,7 +276,7 @@ func createHtmlReport(allTableValues []table.TableValues, targetName string) (ou sb.WriteString(fmt.Sprintf("

%[1]s

\n", html.EscapeString(tableValues.Name))) // if there's no data in the table, print a message and continue if len(tableValues.Fields) == 0 || len(tableValues.Fields[0].Values) == 0 { - msg := noDataFound + msg := NoDataFound if tableValues.NoDataFound != "" { msg = tableValues.NoDataFound } @@ -373,7 +346,7 @@ func createHtmlReportMultiTarget(allTargetsTableValues [][]table.TableValues, ta sb.WriteString(fmt.Sprintf("

%s

\n", targetName)) // if there's no data in the table, print a message and continue if len(targetTableValues.Fields) == 0 || len(targetTableValues.Fields[0].Values) == 0 { - sb.WriteString("

" + noDataFound + "

\n") + sb.WriteString("

" + NoDataFound + "

\n") continue } if renderer := getCustomHTMLRenderer(targetTableValues.Name); renderer != nil { // custom table renderer @@ -537,7 +510,7 @@ new Chart(document.getElementById('{{.ID}}'), { ` -type chartTemplateStruct struct { +type ChartTemplateStruct struct { ID string Labels string // only for line charts Datasets string @@ -559,7 +532,7 @@ func CreateFieldNameWithDescription(fieldName, description string) string { return htmltemplate.HTMLEscapeString(fieldName) + `?` + htmltemplate.HTMLEscapeString(description) + `` } -func renderHTMLTable(tableHeaders []string, tableValues [][]string, class string, valuesStyle [][]string) string { +func RenderHTMLTable(tableHeaders []string, tableValues [][]string, class string, valuesStyle [][]string) string { return renderHTMLTableWithDescriptions(tableHeaders, nil, tableValues, class, valuesStyle) } @@ -624,7 +597,7 @@ func DefaultHTMLTableRendererFunc(tableValues table.TableValues) string { values = append(values, rowValues) tableValueStyles = append(tableValueStyles, []string{"font-weight:bold"}) } - return renderHTMLTable([]string{}, values, "pure-table pure-table-striped", tableValueStyles) + return RenderHTMLTable([]string{}, values, "pure-table pure-table-striped", tableValueStyles) } } @@ -648,123 +621,10 @@ func RenderMultiTargetTableValuesAsHTML(tableValues []table.TableValues, targetN } headers := []string{""} headers = append(headers, targetNames...) - return renderHTMLTable(headers, values, "pure-table pure-table-striped", tableValueStyles) -} - -func dimmDetails(dimm []string) (details string) { - if strings.Contains(dimm[table.SizeIdx], "No") { - details = "No Module Installed" - } else { - // Intel PMEM modules may have serial number appended to end of part number... - // strip that off so it doesn't mess with color selection later - partNumber := dimm[table.PartIdx] - if strings.Contains(dimm[table.DetailIdx], "Synchronous Non-Volatile") && - dimm[table.ManufacturerIdx] == "Intel" && - strings.HasSuffix(dimm[table.PartIdx], dimm[table.SerialIdx]) { - partNumber = dimm[table.PartIdx][:len(dimm[table.PartIdx])-len(dimm[table.SerialIdx])] - } - // example: "64GB DDR5 R2 Synchronous Registered (Buffered) Micron Technology MTC78ASF4G72PZ-2G6E1 6400 MT/s [6000 MT/s]" - details = fmt.Sprintf("%s %s %s R%s %s %s %s [%s]", - strings.ReplaceAll(dimm[table.SizeIdx], " ", ""), - dimm[table.TypeIdx], - dimm[table.DetailIdx], - dimm[table.RankIdx], - dimm[table.ManufacturerIdx], - partNumber, - strings.ReplaceAll(dimm[table.SpeedIdx], " ", ""), - strings.ReplaceAll(dimm[table.ConfiguredSpeedIdx], " ", "")) - } - return + return RenderHTMLTable(headers, values, "pure-table pure-table-striped", tableValueStyles) } -func dimmTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - if tableValues.Fields[table.DerivedSocketIdx].Values[0] == "" || tableValues.Fields[table.DerivedChannelIdx].Values[0] == "" || tableValues.Fields[table.DerivedSlotIdx].Values[0] == "" { - return DefaultHTMLTableRendererFunc(tableValues) - } - htmlColors := []string{"lightgreen", "orange", "aqua", "lime", "yellow", "beige", "magenta", "violet", "salmon", "pink"} - var slotColorIndices = make(map[string]int) - // socket -> channel -> slot -> dimm details - var dimms = map[string]map[string]map[string]string{} - for dimmIdx := range tableValues.Fields[table.DerivedSocketIdx].Values { - if _, ok := dimms[tableValues.Fields[table.DerivedSocketIdx].Values[dimmIdx]]; !ok { - dimms[tableValues.Fields[table.DerivedSocketIdx].Values[dimmIdx]] = make(map[string]map[string]string) - } - if _, ok := dimms[tableValues.Fields[table.DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[table.DerivedChannelIdx].Values[dimmIdx]]; !ok { - dimms[tableValues.Fields[table.DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[table.DerivedChannelIdx].Values[dimmIdx]] = make(map[string]string) - } - dimmValues := []string{} - for _, field := range tableValues.Fields { - dimmValues = append(dimmValues, field.Values[dimmIdx]) - } - dimms[tableValues.Fields[table.DerivedSocketIdx].Values[dimmIdx]][tableValues.Fields[table.DerivedChannelIdx].Values[dimmIdx]][tableValues.Fields[table.DerivedSlotIdx].Values[dimmIdx]] = dimmDetails(dimmValues) - } - - var socketTableHeaders = []string{"Socket", ""} - var socketTableValues [][]string - var socketKeys []string - for k := range dimms { - socketKeys = append(socketKeys, k) - } - sort.Strings(socketKeys) - for _, socket := range socketKeys { - socketMap := dimms[socket] - socketTableValues = append(socketTableValues, []string{}) - var channelTableHeaders = []string{"Channel", "Slots"} - var channelTableValues [][]string - var channelKeys []int - for k := range socketMap { - channel, err := strconv.Atoi(k) - if err != nil { - slog.Error("failed to convert channel to int", slog.String("error", err.Error())) - return "" - } - channelKeys = append(channelKeys, channel) - } - sort.Ints(channelKeys) - for _, channel := range channelKeys { - channelMap := socketMap[strconv.Itoa(channel)] - channelTableValues = append(channelTableValues, []string{}) - var slotTableHeaders []string - var slotTableValues [][]string - var slotTableValuesStyles [][]string - var slotKeys []string - for k := range channelMap { - slotKeys = append(slotKeys, k) - } - sort.Strings(slotKeys) - slotTableValues = append(slotTableValues, []string{}) - slotTableValuesStyles = append(slotTableValuesStyles, []string{}) - for _, slot := range slotKeys { - dimmDetails := channelMap[slot] - slotTableValues[0] = append(slotTableValues[0], htmltemplate.HTMLEscapeString(dimmDetails)) - var slotColor string - if dimmDetails == "No Module Installed" { - slotColor = "background-color:silver" - } else { - if _, ok := slotColorIndices[dimmDetails]; !ok { - slotColorIndices[dimmDetails] = int(math.Min(float64(len(slotColorIndices)), float64(len(htmlColors)-1))) - } - slotColor = "background-color:" + htmlColors[slotColorIndices[dimmDetails]] - } - slotTableValuesStyles[0] = append(slotTableValuesStyles[0], slotColor) - } - slotTable := renderHTMLTable(slotTableHeaders, slotTableValues, "pure-table pure-table-bordered", slotTableValuesStyles) - // channel number - channelTableValues[len(channelTableValues)-1] = append(channelTableValues[len(channelTableValues)-1], strconv.Itoa(channel)) - // slot table - channelTableValues[len(channelTableValues)-1] = append(channelTableValues[len(channelTableValues)-1], slotTable) - // style - } - channelTable := renderHTMLTable(channelTableHeaders, channelTableValues, "pure-table pure-table-bordered", [][]string{}) - // socket number - socketTableValues[len(socketTableValues)-1] = append(socketTableValues[len(socketTableValues)-1], socket) - // channel table - socketTableValues[len(socketTableValues)-1] = append(socketTableValues[len(socketTableValues)-1], channelTable) - } - return renderHTMLTable(socketTableHeaders, socketTableValues, "pure-table pure-table-bordered", [][]string{}) -} - -// renderChart generates an HTML/JavaScript representation of a chart using the provided data and configuration. +// RenderChart generates an HTML/JavaScript representation of a chart using the provided data and configuration. // It supports different chart types (e.g., "line", "scatter") and uses Go templates to format the datasets and chart. // Parameters: // - chartType: the type of chart to render ("line", "scatter"). @@ -776,7 +636,7 @@ func dimmTableHTMLRenderer(tableValues table.TableValues, targetName string) str // // Returns: // - A string containing the rendered chart HTML/JavaScript, or an error message if rendering fails. -func renderChart(chartType string, allFormattedPoints []string, datasetNames []string, xAxisLabels []string, config chartTemplateStruct, datasetHiddenFlags []bool) string { +func RenderChart(chartType string, allFormattedPoints []string, datasetNames []string, xAxisLabels []string, config ChartTemplateStruct, datasetHiddenFlags []bool) string { datasets := []string{} for dataIdx, formattedPoints := range allFormattedPoints { specValues := formattedPoints @@ -835,12 +695,12 @@ func renderChart(chartType string, allFormattedPoints []string, datasetNames []s return out } -type scatterPoint struct { - x float64 - y float64 +type ScatterPoint struct { + X float64 + Y float64 } -// renderScatterChart generates an HTML string for a scatter chart using the provided data and configuration. +// RenderScatterChart generates an HTML string for a scatter chart using the provided data and configuration. // // Parameters: // @@ -851,138 +711,16 @@ type scatterPoint struct { // Returns: // // A string containing the rendered HTML for the scatter chart. -func renderScatterChart(data [][]scatterPoint, datasetNames []string, config chartTemplateStruct) string { - allFormattedPoints := []string{} - for dataIdx := range data { - formattedPoints := []string{} - for _, point := range data[dataIdx] { - formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %f, y: %f}", point.x, point.y)) - } - allFormattedPoints = append(allFormattedPoints, strings.Join(formattedPoints, ",")) - } - return renderChart("scatter", allFormattedPoints, datasetNames, nil, config, nil) -} - -// renderLineChart generates an HTML string for a line chart using the provided data and configuration. -// -// Parameters: -// -// xAxisLabels - Slice of strings representing the labels for the X axis. -// data - 2D slice of float64 values, where each inner slice represents a dataset's data points. -// datasetNames - Slice of strings representing the names of each dataset. -// config - chartTemplateStruct containing chart configuration options. -// datasetHiddenFlags - Slice of booleans indicating whether each dataset should be hidden initially. -// -// Returns: -// -// A string containing the rendered HTML for the line chart. -func renderLineChart(xAxisLabels []string, data [][]float64, datasetNames []string, config chartTemplateStruct, datasetHiddenFlags []bool) string { +func RenderScatterChart(data [][]ScatterPoint, datasetNames []string, config ChartTemplateStruct) string { allFormattedPoints := []string{} for dataIdx := range data { formattedPoints := []string{} for _, point := range data[dataIdx] { - formattedPoints = append(formattedPoints, fmt.Sprintf("%f", point)) + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %f, y: %f}", point.X, point.Y)) } allFormattedPoints = append(allFormattedPoints, strings.Join(formattedPoints, ",")) } - return renderChart("line", allFormattedPoints, datasetNames, xAxisLabels, config, datasetHiddenFlags) -} - -func renderFrequencyTable(tableValues table.TableValues) (out string) { - var rows [][]string - headers := []string{""} - valuesStyles := [][]string{} - for i := range tableValues.Fields[0].Values { - headers = append(headers, fmt.Sprintf("%d", i+1)) - } - for _, field := range tableValues.Fields[1:] { - row := append([]string{CreateFieldNameWithDescription(field.Name, field.Description)}, field.Values...) - rows = append(rows, row) - valuesStyles = append(valuesStyles, []string{"font-weight:bold"}) - } - out = renderHTMLTable(headers, rows, "pure-table pure-table-striped", valuesStyles) - return -} - -func coreTurboFrequencyTableHTMLRenderer(tableValues table.TableValues) string { - data := [][]scatterPoint{} - datasetNames := []string{} - for _, field := range tableValues.Fields[1:] { - points := []scatterPoint{} - for i, val := range field.Values { - if val == "" { - break - } - freq, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing frequency", slog.String("error", err.Error())) - return "" - } - points = append(points, scatterPoint{float64(i + 1), freq}) - } - if len(points) > 0 { - data = append(data, points) - datasetNames = append(datasetNames, field.Name) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("turboFrequency%d", util.RandUint(10000)), - XaxisText: "Core Count", - YaxisText: "Frequency (GHz)", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "4", - SuggestedMin: "2", - SuggestedMax: "4", - } - out := renderScatterChart(data, datasetNames, chartConfig) - out += "\n" - out += renderFrequencyTable(tableValues) - return out -} - -func frequencyBenchmarkTableHtmlRenderer(tableValues table.TableValues, targetName string) string { - return coreTurboFrequencyTableHTMLRenderer(tableValues) -} - -func memoryBenchmarkTableHtmlRenderer(tableValues table.TableValues, targetName string) string { - return memoryBenchmarkTableMultiTargetHtmlRenderer([]table.TableValues{tableValues}, []string{targetName}) -} - -func memoryBenchmarkTableMultiTargetHtmlRenderer(allTableValues []table.TableValues, targetNames []string) string { - data := [][]scatterPoint{} - datasetNames := []string{} - for targetIdx, tableValues := range allTableValues { - points := []scatterPoint{} - for valIdx := range tableValues.Fields[0].Values { - latency, err := strconv.ParseFloat(tableValues.Fields[0].Values[valIdx], 64) - if err != nil { - slog.Error("error parsing latency", slog.String("error", err.Error())) - return "" - } - bandwidth, err := strconv.ParseFloat(tableValues.Fields[1].Values[valIdx], 64) - if err != nil { - slog.Error("error parsing bandwidth", slog.String("error", err.Error())) - return "" - } - points = append(points, scatterPoint{bandwidth, latency}) - } - data = append(data, points) - datasetNames = append(datasetNames, targetNames[targetIdx]) - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("latencyBandwidth%d", util.RandUint(10000)), - XaxisText: "Bandwidth (GB/s)", - YaxisText: "Latency (ns)", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "4", - SuggestedMin: "0", - SuggestedMax: "0", - } - return renderScatterChart(data, datasetNames, chartConfig) + return RenderChart("scatter", allFormattedPoints, datasetNames, nil, config, nil) } func getColor(idx int) string { @@ -990,682 +728,3 @@ func getColor(idx int) string { colors := []string{"#9F0162", "#009F81", "#FF5AAF", "#00FCCF", "#8400CD", "#008DF9", "#00C2F9", "#FFB2FD", "#A40122", "#E20134", "#FF6E3A", "#FFC33B"} return colors[idx%len(colors)] } - -func telemetryTableHTMLRenderer(tableValues table.TableValues, data [][]float64, datasetNames []string, chartConfig chartTemplateStruct, datasetHiddenFlags []bool) string { - tsFieldIdx := 0 - var timestamps []string - for i := range tableValues.Fields[0].Values { - timestamp := tableValues.Fields[tsFieldIdx].Values[i] - if !slices.Contains(timestamps, timestamp) { // could be slow if list is long - timestamps = append(timestamps, timestamp) - } - } - return renderLineChart(timestamps, data, datasetNames, chartConfig, datasetHiddenFlags) -} - -func cpuUtilizationTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - data := [][]float64{} - datasetNames := []string{} - // collect the busy (100 - idle) values for each CPU - cpuBusyStats := make(map[int][]float64) - idleFieldIdx := len(tableValues.Fields) - 1 - cpuFieldIdx := 1 - for i := range tableValues.Fields[0].Values { - idle, err := strconv.ParseFloat(tableValues.Fields[idleFieldIdx].Values[i], 64) - if err != nil { - continue - } - busy := 100 - idle - cpu, err := strconv.Atoi(tableValues.Fields[cpuFieldIdx].Values[i]) - if err != nil { - continue - } - if _, ok := cpuBusyStats[cpu]; !ok { - cpuBusyStats[cpu] = []float64{} - } - cpuBusyStats[cpu] = append(cpuBusyStats[cpu], busy) - } - // sort map keys by cpu number - var keys []int - for cpu := range cpuBusyStats { - keys = append(keys, cpu) - } - sort.Ints(keys) - // build the data - for _, cpu := range keys { - if len(cpuBusyStats[cpu]) > 0 { - data = append(data, cpuBusyStats[cpu]) - datasetNames = append(datasetNames, fmt.Sprintf("CPU %d", cpu)) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "% Utilization", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "false", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "100", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -func utilizationCategoriesTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - data := [][]float64{} - datasetNames := []string{} - for _, field := range tableValues.Fields[1:] { - points := []float64{} - for _, val := range field.Values { - if val == "" { - break - } - util, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing percentage", slog.String("error", err.Error())) - return "" - } - points = append(points, util) - } - if len(points) > 0 { - data = append(data, points) - datasetNames = append(datasetNames, field.Name) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "% Utilization", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "100", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -func irqRateTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - data := [][]float64{} - datasetNames := []string{} - for _, field := range tableValues.Fields[2:] { // 1 data set per field, e.g., %usr, %nice, etc., skip Time and CPU fields - datasetNames = append(datasetNames, field.Name) - // sum the values in the field per timestamp, store the sum as a point - timeStamp := tableValues.Fields[0].Values[0] - points := []float64{} - total := 0.0 - for i := range field.Values { - if tableValues.Fields[0].Values[i] != timeStamp { // new timestamp? - points = append(points, total) - total = 0.0 - timeStamp = tableValues.Fields[0].Values[i] - } - val, err := strconv.ParseFloat(field.Values[i], 64) - if err != nil { - slog.Error("error parsing value", slog.String("error", err.Error())) - return "" - } - total += val - } - points = append(points, total) // add the point for the last timestamp - // save the points in the data slice - data = append(data, points) - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "IRQ/s", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "0", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -// driveTelemetryTableHTMLRenderer renders charts of drive statistics -// - one scatter chart per drive, showing the drive's utilization over time -// - each drive stat is a separate dataset within the chart -func driveTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - var out string - driveStats := make(map[string][][]string) - for i := range tableValues.Fields[0].Values { - drive := tableValues.Fields[1].Values[i] - if _, ok := driveStats[drive]; !ok { - driveStats[drive] = make([][]string, len(tableValues.Fields)-2) - } - for j := range len(tableValues.Fields) - 2 { - driveStats[drive][j] = append(driveStats[drive][j], tableValues.Fields[j+2].Values[i]) - } - } - var keys []string - for drive := range driveStats { - keys = append(keys, drive) - } - sort.Strings(keys) - for _, drive := range keys { - data := [][]float64{} - datasetNames := []string{} - for i, statVals := range driveStats[drive] { - points := []float64{} - for i, val := range statVals { - if val == "" { - slog.Error("empty stat value", slog.String("drive", drive), slog.Int("index", i)) - return "" - } - util, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing stat", slog.String("error", err.Error())) - return "" - } - points = append(points, util) - } - if len(points) > 0 { - data = append(data, points) - datasetNames = append(datasetNames, tableValues.Fields[i+2].Name) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "", - TitleText: drive, - DisplayTitle: "true", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "0", - } - out += telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) - } - return out -} - -// networkTelemetryTableHTMLRenderer renders charts of network device statistics -// - one scatter chart per network device, showing the device's utilization over time -// - each network stat is a separate dataset within the chart -func networkTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - var out string - nicStats := make(map[string][][]string) - for i := range tableValues.Fields[0].Values { - drive := tableValues.Fields[1].Values[i] - if _, ok := nicStats[drive]; !ok { - nicStats[drive] = make([][]string, len(tableValues.Fields)-2) - } - for j := range len(tableValues.Fields) - 2 { - nicStats[drive][j] = append(nicStats[drive][j], tableValues.Fields[j+2].Values[i]) - } - } - var keys []string - for drive := range nicStats { - keys = append(keys, drive) - } - sort.Strings(keys) - for _, nic := range keys { - data := [][]float64{} - datasetNames := []string{} - for i, statVals := range nicStats[nic] { - points := []float64{} - for i, val := range statVals { - if val == "" { - slog.Error("empty stat value", slog.String("nic", nic), slog.Int("index", i)) - return "" - } - util, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing stat", slog.String("error", err.Error())) - return "" - } - points = append(points, util) - } - if len(points) > 0 { - data = append(data, points) - datasetNames = append(datasetNames, tableValues.Fields[i+2].Name) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "", - TitleText: nic, - DisplayTitle: "true", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "0", - } - out += telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) - } - return out -} - -func memoryTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - data := [][]float64{} - datasetNames := []string{} - for _, field := range tableValues.Fields[1:] { - points := []float64{} - for _, val := range field.Values { - if val == "" { - break - } - stat, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing stat", slog.String("error", err.Error())) - return "" - } - points = append(points, stat) - } - if len(points) > 0 { - data = append(data, points) - datasetNames = append(datasetNames, field.Name) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "kilobytes", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "0", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -func averageFrequencyTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - data := [][]float64{} - datasetNames := []string{} - for _, field := range tableValues.Fields[1:] { - points := []float64{} - for _, val := range field.Values { - if val == "" { - break - } - stat, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing stat", slog.String("error", err.Error())) - return "" - } - points = append(points, stat) - } - if len(points) > 0 { - data = append(data, points) - datasetNames = append(datasetNames, field.Name) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "MHz", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "0", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -func powerTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - data := [][]float64{} - datasetNames := []string{} - for _, field := range tableValues.Fields[1:] { - points := []float64{} - for _, val := range field.Values { - if val == "" { - break - } - stat, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing stat", slog.String("error", err.Error())) - return "" - } - points = append(points, stat) - } - if len(points) > 0 { - data = append(data, points) - datasetNames = append(datasetNames, field.Name) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "Watts", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "0", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -func temperatureTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - data := [][]float64{} - datasetNames := []string{} - for _, field := range tableValues.Fields[1:] { - points := []float64{} - for _, val := range field.Values { - if val == "" { - break - } - stat, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing stat", slog.String("error", err.Error())) - return "" - } - points = append(points, stat) - } - if len(points) > 0 { - data = append(data, points) - datasetNames = append(datasetNames, field.Name) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "Celsius", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "0", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -func ipcTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - data := [][]float64{} - datasetNames := []string{} - for _, field := range tableValues.Fields[1:] { - points := []float64{} - for _, val := range field.Values { - if val == "" { - break - } - stat, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing stat", slog.String("error", err.Error())) - return "" - } - points = append(points, stat) - } - if len(points) > 0 { - data = append(data, points) - datasetNames = append(datasetNames, field.Name) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "IPC", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "0", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -func c6TelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - data := [][]float64{} - datasetNames := []string{} - for _, field := range tableValues.Fields[1:] { - points := []float64{} - for _, val := range field.Values { - if val == "" { - break - } - stat, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing stat", slog.String("error", err.Error())) - return "" - } - points = append(points, stat) - } - if len(points) > 0 { - data = append(data, points) - datasetNames = append(datasetNames, field.Name) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "% C6 Residency", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "0", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -// instructionTelemetryTableHTMLRenderer renders instruction set usage statistics. -// Each category is a separate dataset within the chart. -// Categories with zero total usage are hidden by default. -// Categories are sorted in two tiers: first, all non-zero categories are sorted alphabetically; -// then, all zero-sum categories are sorted alphabetically and placed after the non-zero categories. -func instructionTelemetryTableHTMLRenderer(tableValues table.TableValues, targetname string) string { - // Collect entries with their sums so we can sort per requirements - type instrEntry struct { - name string - points []float64 - sum float64 - } - entries := []instrEntry{} - for _, field := range tableValues.Fields[1:] { // skip timestamp field - points := []float64{} - sum := 0.0 - for _, val := range field.Values { - if val == "" { // end of data for this category - break - } - stat, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing stat", slog.String("error", err.Error())) - return "" - } - points = append(points, stat) - sum += stat - } - if len(points) > 0 { // only include categories with at least one point - entries = append(entries, instrEntry{name: field.Name, points: points, sum: sum}) - } - } - // Partition into non-zero and zero-sum groups - nonZero := []instrEntry{} - zero := []instrEntry{} - for _, e := range entries { - if e.sum > 0 { - nonZero = append(nonZero, e) - } else { - zero = append(zero, e) - } - } - sort.Slice(nonZero, func(i, j int) bool { return nonZero[i].name < nonZero[j].name }) - sort.Slice(zero, func(i, j int) bool { return zero[i].name < zero[j].name }) - ordered := append(nonZero, zero...) - data := make([][]float64, 0, len(ordered)) - datasetNames := make([]string, 0, len(ordered)) - hiddenFlags := make([]bool, 0, len(ordered)) - for _, e := range ordered { - data = append(data, e.points) - datasetNames = append(datasetNames, e.name) - // hide zero-sum categories by default - hiddenFlags = append(hiddenFlags, e.sum == 0) - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "% Samples", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "1", // extra tall due to large number of data sets - SuggestedMin: "0", - SuggestedMax: "0", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, hiddenFlags) -} - -func renderGaudiStatsChart(tableValues table.TableValues, chartStatFieldName string, titleText string, yAxisText string, suggestedMax string) string { - data := [][]float64{} - datasetNames := []string{} - // timestamp is in the first field - // find the module_id field index - moduleIdFieldIdx, err := table.GetFieldIndex("module_id", tableValues) - if err != nil { - slog.Error("no gaudi module_id field found") - return "" - } - // find the chartStatFieldName field index - chartStatFieldIndex, err := table.GetFieldIndex(chartStatFieldName, tableValues) - if err != nil { - slog.Error("no gaudi chartStatFieldName field found") - return "" - } - // group the data points by module_id - moduleStat := make(map[string][]float64) - for i := range tableValues.Fields[0].Values { - moduleId := tableValues.Fields[moduleIdFieldIdx].Values[i] - val, err := strconv.ParseFloat(tableValues.Fields[chartStatFieldIndex].Values[i], 64) - if err != nil { - slog.Error("error parsing utilization", slog.String("error", err.Error())) - return "" - } - if _, ok := moduleStat[moduleId]; !ok { - moduleStat[moduleId] = []float64{} - } - moduleStat[moduleId] = append(moduleStat[moduleId], val) - } - // sort the module ids - var moduleIds []string - for moduleId := range moduleStat { - moduleIds = append(moduleIds, moduleId) - } - sort.Strings(moduleIds) - // build the data - for _, moduleId := range moduleIds { - if len(moduleStat[moduleId]) > 0 { - data = append(data, moduleStat[moduleId]) - datasetNames = append(datasetNames, "module "+moduleId) - } - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: yAxisText, - TitleText: titleText, - DisplayTitle: "true", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: suggestedMax, - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -func gaudiTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - out := "" - out += renderGaudiStatsChart(tableValues, "utilization.aip [%]", "Utilization", "% Utilization", "100") - out += renderGaudiStatsChart(tableValues, "memory.free [MiB]", "Memory Free", "Memory (MiB)", "0") - out += renderGaudiStatsChart(tableValues, "memory.used [MiB]", "Memory Used", "Memory (MiB)", "0") - out += renderGaudiStatsChart(tableValues, "power.draw [W]", "Power", "Watts", "0") - out += renderGaudiStatsChart(tableValues, "temperature.aip [C]", "Temperature", "Temperature (C)", "0") - return out -} - -func pduTelemetryTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - data := [][]float64{} - for _, field := range tableValues.Fields[1:] { - points := []float64{} - for _, val := range field.Values { - if val == "" { - break - } - stat, err := strconv.ParseFloat(val, 64) - if err != nil { - slog.Error("error parsing stat", slog.String("error", err.Error())) - return "" - } - points = append(points, stat) - } - if len(points) > 0 { - data = append(data, points) - } - } - datasetNames := []string{} - for _, field := range tableValues.Fields[1:] { - datasetNames = append(datasetNames, field.Name) - } - chartConfig := chartTemplateStruct{ - ID: fmt.Sprintf("%s%d", tableValues.Name, util.RandUint(10000)), - XaxisText: "Time", - YaxisText: "Watts", - TitleText: "", - DisplayTitle: "false", - DisplayLegend: "true", - AspectRatio: "2", - SuggestedMin: "0", - SuggestedMax: "0", - } - return telemetryTableHTMLRenderer(tableValues, data, datasetNames, chartConfig, nil) -} - -func callStackFrequencyTableHTMLRenderer(tableValues table.TableValues, targetName string) string { - out := ` -` - out += renderFlameGraph("Native", tableValues, "Native Stacks") - out += renderFlameGraph("Java", tableValues, "Java Stacks") - return out -} - -func kernelLockAnalysisHTMLRenderer(tableValues table.TableValues, targetName string) string { - values := [][]string{} - var tableValueStyles [][]string - for _, field := range tableValues.Fields { - rowValues := []string{} - rowValues = append(rowValues, field.Name) - rowValues = append(rowValues, htmltemplate.HTMLEscapeString(field.Values[0])) - values = append(values, rowValues) - rowStyles := []string{} - rowStyles = append(rowStyles, "font-weight:bold") - rowStyles = append(rowStyles, "white-space: pre-wrap") - tableValueStyles = append(tableValueStyles, rowStyles) - } - return renderHTMLTable([]string{}, values, "pure-table pure-table-striped", tableValueStyles) -} diff --git a/internal/report/render_raw.go b/internal/report/render_raw.go index 8988068a..73dbc51d 100644 --- a/internal/report/render_raw.go +++ b/internal/report/render_raw.go @@ -8,6 +8,7 @@ import ( "fmt" "os" "perfspect/internal/script" + "perfspect/internal/table" "strings" ) @@ -21,7 +22,11 @@ type RawReport struct { // CreateRawReport creates a raw report with the specified table names, script outputs, and target name. // It marshals the report into a JSON format with indentation for readability. // The function returns the JSON byte slice and any error encountered during the process. -func CreateRawReport(tableNames []string, scriptOutputs map[string]script.ScriptOutput, targetName string) (out []byte, err error) { +func CreateRawReport(tables []table.TableDefinition, scriptOutputs map[string]script.ScriptOutput, targetName string) (out []byte, err error) { + tableNames := []string{} + for _, tbl := range tables { + tableNames = append(tableNames, tbl.Name) + } report := RawReport{ TargetName: targetName, TableNames: tableNames, diff --git a/internal/report/render_text.go b/internal/report/render_text.go index e7cbef4c..07199446 100644 --- a/internal/report/render_text.go +++ b/internal/report/render_text.go @@ -10,9 +10,7 @@ import ( ) // Package-level map for custom text renderers -var customTextRenderers = map[string]table.TextTableRenderer{ - table.ConfigurationTableName: configurationTableTextRenderer, -} +var customTextRenderers = map[string]table.TextTableRenderer{} // getCustomTextRenderer returns the custom text renderer for a table, or nil if no custom renderer exists func getCustomTextRenderer(tableName string) table.TextTableRenderer { @@ -33,7 +31,7 @@ func createTextReport(allTableValues []table.TableValues) (out []byte, err error } sb.WriteString("\n") if len(tableValues.Fields) == 0 || len(tableValues.Fields[0].Values) == 0 { - msg := noDataFound + msg := NoDataFound if tableValues.NoDataFound != "" { msg = tableValues.NoDataFound } @@ -113,44 +111,3 @@ func DefaultTextTableRendererFunc(tableValues table.TableValues) string { } return sb.String() } - -// configurationTableTextRenderer renders the configuration table for text reports. -// It's similar to the default text table renderer, but uses the Description field -// to show the command line argument for each config item. -// Example output: -// Configuration -// ============= -// Cores per Socket: 86 --cores -// L3 Cache: 336M --llc -// Package Power / TDP: 350W --tdp -// All-Core Max Frequency: 3.2GHz --core-max -func configurationTableTextRenderer(tableValues table.TableValues) string { - var sb strings.Builder - - // Find the longest field name and value for formatting - maxFieldNameLen := 0 - maxValueLen := 0 - for _, field := range tableValues.Fields { - if len(field.Name) > maxFieldNameLen { - maxFieldNameLen = len(field.Name) - } - if len(field.Values) > 0 && len(field.Values[0]) > maxValueLen { - maxValueLen = len(field.Values[0]) - } - } - - // Print each field with name, value, and description (command-line arg) - for _, field := range tableValues.Fields { - var value string - if len(field.Values) > 0 { - value = field.Values[0] - } - // Format: "Field Name: Value Description" - sb.WriteString(fmt.Sprintf("%-*s %-*s %s\n", - maxFieldNameLen+1, field.Name+":", - maxValueLen, value, - field.Description)) - } - - return sb.String() -} diff --git a/internal/report/report.go b/internal/report/report.go index cdb9624e..2fdfc03c 100644 --- a/internal/report/report.go +++ b/internal/report/report.go @@ -19,7 +19,7 @@ const ( FormatAll = "all" ) -const noDataFound = "No data found." +const NoDataFound = "No data found." var FormatOptions = []string{FormatHtml, FormatXlsx, FormatJson, FormatTxt} @@ -36,7 +36,7 @@ var FormatOptions = []string{FormatHtml, FormatXlsx, FormatJson, FormatTxt} // Returns: // - out: The generated report as a byte slice. // - err: An error, if any occurred during report generation. -func Create(format string, allTableValues []table.TableValues, targetName string) (out []byte, err error) { +func Create(format string, allTableValues []table.TableValues, targetName string, systemSummaryTableName string) (out []byte, err error) { // make sure that all fields have the same number of values for _, tableValue := range allTableValues { numRows := -1 @@ -59,7 +59,7 @@ func Create(format string, allTableValues []table.TableValues, targetName string case FormatHtml: return createHtmlReport(allTableValues, targetName) case FormatXlsx: - return createXlsxReport(allTableValues) + return createXlsxReport(allTableValues, systemSummaryTableName) } panic(fmt.Sprintf("expected one of %s, got %s", strings.Join(FormatOptions, ", "), format)) } @@ -77,12 +77,12 @@ func Create(format string, allTableValues []table.TableValues, targetName string // - err: An error if the report generation fails. // // Note: If an unsupported format is provided, the function will panic. -func CreateMultiTarget(format string, allTargetsTableValues [][]table.TableValues, targetNames []string, allTableNames []string) (out []byte, err error) { +func CreateMultiTarget(format string, allTargetsTableValues [][]table.TableValues, targetNames []string, allTableNames []string, systemSummaryTableName string) (out []byte, err error) { switch format { case "html": return createHtmlReportMultiTarget(allTargetsTableValues, targetNames, allTableNames) case "xlsx": - return createXlsxReportMultiTarget(allTargetsTableValues, targetNames, allTableNames) + return createXlsxReportMultiTarget(allTargetsTableValues, targetNames, allTableNames, systemSummaryTableName) } panic("only HTML and XLSX multi-target report supported currently") } diff --git a/internal/table/cpu.go b/internal/table/cpu.go deleted file mode 100644 index c5715532..00000000 --- a/internal/table/cpu.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -package table - -import ( - "fmt" - "log/slog" - "strconv" - "strings" - - "perfspect/internal/cpus" - "perfspect/internal/script" - "perfspect/internal/util" -) - -// UarchFromOutput returns the architecture of the CPU that matches family, model, stepping, -// capid4, and devices information from the output or an empty string, if no match is found. -func UarchFromOutput(outputs map[string]script.ScriptOutput) string { - family := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) - model := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) - stepping := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) - capid4 := valFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) - devices := valFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) - cpu, err := cpus.GetCPUExtended(family, model, stepping, capid4, devices) - if err == nil { - return cpu.MicroArchitecture - } - return "" -} - -func hyperthreadingFromOutput(outputs map[string]script.ScriptOutput) string { - family := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) - model := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) - stepping := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) - sockets := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) - coresPerSocket := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(.+)$`) - cpuCount := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU\(.*:\s*(.+?)$`) - onlineCpus := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^On-line CPU\(s\) list:\s*(.+)$`) - threadsPerCore := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Thread\(s\) per core:\s*(.+)$`) - - numCPUs, err := strconv.Atoi(cpuCount) // logical CPUs - if err != nil { - slog.Error("error parsing cpus from lscpu") - return "" - } - onlineCpusList, err := util.SelectiveIntRangeToIntList(onlineCpus) // logical online CPUs - numOnlineCpus := len(onlineCpusList) - if err != nil { - slog.Error("error parsing online cpus from lscpu") - numOnlineCpus = 0 // set to 0 to indicate parsing failed, will use numCPUs instead - } - numThreadsPerCore, err := strconv.Atoi(threadsPerCore) // logical threads per core - if err != nil { - slog.Error("error parsing threads per core from lscpu") - numThreadsPerCore = 0 - } - numSockets, err := strconv.Atoi(sockets) - if err != nil { - slog.Error("error parsing sockets from lscpu") - return "" - } - numCoresPerSocket, err := strconv.Atoi(coresPerSocket) // physical cores - if err != nil { - slog.Error("error parsing cores per sockets from lscpu") - return "" - } - cpu, err := cpus.GetCPUExtended(family, model, stepping, "", "") - if err != nil { - return "" - } - if numOnlineCpus > 0 && numOnlineCpus < numCPUs { - // if online CPUs list is available, use it to determine the number of CPUs - // supersedes lscpu output of numCPUs which counts CPUs on the system, not online CPUs - numCPUs = numOnlineCpus - } - if cpu.LogicalThreadCount < 2 { - return "N/A" - } else if numThreadsPerCore == 1 { - // if threads per core is 1, hyperthreading is disabled - return "Disabled" - } else if numThreadsPerCore >= 2 { - // if threads per core is greater than or equal to 2, hyperthreading is enabled - return "Enabled" - } else if numCPUs > numCoresPerSocket*numSockets { - // if the threads per core attribute is not available, we can still check if hyperthreading is enabled - // by checking if the number of logical CPUs is greater than the number of physical cores - return "Enabled" - } else { - return "Disabled" - } -} - -func numaCPUListFromOutput(outputs map[string]script.ScriptOutput) string { - nodeCPUs := valsFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node[0-9] CPU\(.*:\s*(.+?)$`) - return strings.Join(nodeCPUs, " :: ") -} - -func ppinsFromOutput(outputs map[string]script.ScriptOutput) string { - uniquePpins := []string{} - for line := range strings.SplitSeq(outputs[script.PPINName].Stdout, "\n") { - parts := strings.Split(line, ":") - if len(parts) < 2 { - continue - } - ppin := strings.TrimSpace(parts[1]) - found := false - for _, p := range uniquePpins { - if string(p) == ppin { - found = true - break - } - } - if !found && ppin != "" { - uniquePpins = append(uniquePpins, ppin) - } - } - return strings.Join(uniquePpins, ", ") -} - -func channelsFromOutput(outputs map[string]script.ScriptOutput) string { - family := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^CPU family:\s*(.+)$`) - model := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model:\s*(.+)$`) - stepping := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Stepping:\s*(.+)$`) - capid4 := valFromRegexSubmatch(outputs[script.LspciBitsScriptName].Stdout, `^([0-9a-fA-F]+)`) - devices := valFromRegexSubmatch(outputs[script.LspciDevicesScriptName].Stdout, `^([0-9]+)`) - cpu, err := cpus.GetCPUExtended(family, model, stepping, capid4, devices) - if err != nil { - slog.Error("error getting CPU from CPUdb", slog.String("error", err.Error())) - return "" - } - return fmt.Sprintf("%d", cpu.MemoryChannelCount) -} - -func turboEnabledFromOutput(outputs map[string]script.ScriptOutput) string { - vendor := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) - switch vendor { - case cpus.IntelVendor: - val := valFromRegexSubmatch(outputs[script.CpuidScriptName].Stdout, `^Intel Turbo Boost Technology\s*= (.+?)$`) - if val == "true" { - return "Enabled" - } - if val == "false" { - return "Disabled" - } - return "" // unknown value - case cpus.AMDVendor: - val := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Frequency boost.*:\s*(.+?)$`) - if val != "" { - return val + " (AMD Frequency Boost)" - } - } - return "" -} - -func tdpFromOutput(outputs map[string]script.ScriptOutput) string { - msrHex := strings.TrimSpace(outputs[script.PackagePowerLimitName].Stdout) - msr, err := strconv.ParseInt(msrHex, 16, 0) - if err != nil || msr == 0 { - return "" - } - return fmt.Sprint(msr/8) + "W" -} - -func chaCountFromOutput(outputs map[string]script.ScriptOutput) string { - // output is the result of three rdmsr calls - // - client cha count - // - cha count - // - spr cha count - // stop when we find a non-zero value - // note: rdmsr writes to stderr on error so we will likely have fewer than 3 lines in stdout - for hexCount := range strings.SplitSeq(outputs[script.ChaCountScriptName].Stdout, "\n") { - if hexCount != "" && hexCount != "0" { - count, err := strconv.ParseInt(hexCount, 16, 64) - if err == nil { - return fmt.Sprintf("%d", count) - } - } - } - return "" -} - -func numaBalancingFromOutput(outputs map[string]script.ScriptOutput) string { - if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "1") { - return "Enabled" - } else if strings.Contains(outputs[script.NumaBalancingScriptName].Stdout, "0") { - return "Disabled" - } - return "" -} - -func clusteringModeFromOutput(outputs map[string]script.ScriptOutput) string { - uarch := UarchFromOutput(outputs) - sockets := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(.+)$`) - nodes := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^NUMA node\(s\):\s*(.+)$`) - if uarch == "" || sockets == "" || nodes == "" { - return "" - } - socketCount, err := strconv.Atoi(sockets) - if err != nil { - slog.Error("failed to parse socket count", slog.String("error", err.Error())) - return "" - } - nodeCount, err := strconv.Atoi(nodes) - if err != nil { - slog.Error("failed to parse node count", slog.String("error", err.Error())) - return "" - } - if nodeCount == 0 || socketCount == 0 { - slog.Error("node count or socket count is zero") - return "" - } - nodesPerSocket := nodeCount / socketCount - switch uarch { - case "GNR_X1": - return "All2All" - case "GNR_X2": - switch nodesPerSocket { - case 1: - return "UMA 4 (Quad)" - case 2: - return "SNC 2" - } - case "GNR_X3": - switch nodesPerSocket { - case 1: - return "UMA 6 (Hex)" - case 3: - return "SNC 3" - } - case "SRF_SP": - return "UMA 2 (Hemi)" - case "SRF_AP": - switch nodesPerSocket { - case 1: - return "UMA 4 (Quad)" - case 2: - return "SNC 2" - } - case "CWF": - switch nodesPerSocket { - case 1: - return "UMA 6 (Hex)" - case 3: - return "SNC 3" - } - } - return "" -} diff --git a/internal/table/stacks_test.go b/internal/table/stacks_test.go deleted file mode 100644 index 70472ec0..00000000 --- a/internal/table/stacks_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package table - -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -import ( - "reflect" - "testing" -) - -func TestGetSectionsFromOutput(t *testing.T) { - tests := []struct { - name string - output string - want map[string]string - }{ - { - name: "Valid sections with content", - output: `########## Section A ########## -Content A1 -Content A2 -########## Section B ########## -Content B1 -Content B2 -########## Section C ########## -Content C1`, - want: map[string]string{ - "Section A": "Content A1\nContent A2\n", - "Section B": "Content B1\nContent B2\n", - "Section C": "Content C1\n", - }, - }, - { - name: "Valid sections with empty content", - output: `########## Section A ########## -########## Section B ########## -########## Section C ##########`, - want: map[string]string{ - "Section A": "", - "Section B": "", - "Section C": "", - }, - }, - { - name: "No sections", - output: "No section headers here", - want: map[string]string{}, - }, - { - name: "Empty output", - output: ``, - want: map[string]string{}, - }, - { - name: "Empty lines in output", - output: "\n\n\n", - want: map[string]string{}, - }, - { - name: "Section with trailing newlines", - output: `########## Section A ########## - -Content A1 - -########## Section B ########## -Content B1`, - want: map[string]string{ - "Section A": "\nContent A1\n\n", - "Section B": "Content B1\n", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := getSectionsFromOutput(tt.output) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("getSectionsFromOutput() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestSectionValueFromOutput(t *testing.T) { - tests := []struct { - name string - output string - sectionName string - want string - }{ - { - name: "Section A exists with content", - output: `########## Section A ########## -Content A1 -Content A2 -########## Section B ########## -Content B1 -Content B2`, - sectionName: "Section A", - want: "Content A1\nContent A2\n", - }, - { - name: "Section B exists with content", - output: `########## Section A ########## -Content A1 -Content A2 -########## Section B ########## -Content B1 -Content B2`, - sectionName: "Section B", - want: "Content B1\nContent B2\n", - }, - { - name: "Section exists with no content", - output: `########## Section A ########## -########## Section B ########## -Content B1`, - sectionName: "Section A", - want: "", - }, - { - name: "Section does not exist", - output: `########## Section A ########## -Content A1 -########## Section B ########## -Content B1`, - sectionName: "Section C", - want: "", - }, - { - name: "Empty output", - output: "", - sectionName: "Section A", - want: "", - }, - { - name: "Section with trailing newlines", - output: `########## Section A ########## - -Content A1 - -########## Section B ########## -Content B1`, - sectionName: "Section A", - want: "\nContent A1\n\n", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := sectionValueFromOutput(tt.output, tt.sectionName) - if got != tt.want { - t.Errorf("sectionValueFromOutput() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/internal/table/system.go b/internal/table/system.go deleted file mode 100644 index cc0eac12..00000000 --- a/internal/table/system.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -package table - -import ( - "fmt" - "strings" - "time" - - "perfspect/internal/cpus" - "perfspect/internal/script" -) - -func operatingSystemFromOutput(outputs map[string]script.ScriptOutput) string { - os := valFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^PRETTY_NAME=\"(.+?)\"`) - centos := valFromRegexSubmatch(outputs[script.EtcReleaseScriptName].Stdout, `^(CentOS Linux release .*)`) - if centos != "" { - os = centos - } - return os -} - -func systemSummaryFromOutput(outputs map[string]script.ScriptOutput) string { - // BASELINE: 1-node, 2x Intel® Xeon® , xx cores, 100W TDP, HT On/Off?, Turbo On/Off?, Total Memory xxx GB (xx slots/ xx GB/ xxxx MHz [run @ xxxx MHz] ), , , , . Test by Intel as of . - template := "1-node, %s, %sx %s, %s cores, %s TDP, %s %s, %s %s, Total Memory %s, BIOS %s, microcode %s, %s, %s, %s, %s. Test by Intel as of %s." - var systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date string - - // system type - systemType = valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Manufacturer:\s*(.+?)$`) + " " + valFromDmiDecodeRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, "1", `^Product Name:\s*(.+?)$`) - // socket count - socketCount = valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Socket\(s\):\s*(\d+)$`) - // CPU model - cpuModel = valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Model name:\s*(.+?)$`) - // core count - coreCount = valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Core\(s\) per socket:\s*(\d+)$`) - // TDP - tdp = tdpFromOutput(outputs) - if tdp == "" { - tdp = "?" - } - vendor := valFromRegexSubmatch(outputs[script.LscpuScriptName].Stdout, `^Vendor ID:\s*(.+)$`) - // hyperthreading - htLabel = "HT" - if vendor == cpus.AMDVendor { - htLabel = "SMT" - } - htOnOff = hyperthreadingFromOutput(outputs) - switch htOnOff { - case "Enabled": - htOnOff = "On" - case "Disabled": - htOnOff = "Off" - case "N/A": - htOnOff = "N/A" - default: - htOnOff = "?" - } - // turbo - turboLabel = "Turbo" - if vendor == cpus.AMDVendor { - turboLabel = "Boost" - } - turboOnOff = turboEnabledFromOutput(outputs) - if strings.Contains(strings.ToLower(turboOnOff), "enabled") { - turboOnOff = "On" - } else if strings.Contains(strings.ToLower(turboOnOff), "disabled") { - turboOnOff = "Off" - } else { - turboOnOff = "?" - } - // memory - installedMem = installedMemoryFromOutput(outputs) - // BIOS - biosVersion = valFromRegexSubmatch(outputs[script.DmidecodeScriptName].Stdout, `^Version:\s*(.+?)$`) - // microcode - uCodeVersion = valFromRegexSubmatch(outputs[script.ProcCpuinfoScriptName].Stdout, `^microcode.*:\s*(.+?)$`) - // NICs - nics = nicSummaryFromOutput(outputs) - // disks - disks = diskSummaryFromOutput(outputs) - // OS - operatingSystem = operatingSystemFromOutput(outputs) - // kernel - kernelVersion = valFromRegexSubmatch(outputs[script.UnameScriptName].Stdout, `^Linux \S+ (\S+)`) - // date - date = strings.TrimSpace(outputs[script.DateScriptName].Stdout) - // parse date so that we can format it - parsedTime, err := time.Parse("Mon Jan 2 15:04:05 MST 2006", date) // without AM/PM - if err != nil { - parsedTime, err = time.Parse("Mon Jan 2 15:04:05 AM MST 2006", date) // with AM/PM - } - if err == nil { - date = parsedTime.Format("January 2 2006") - } - - // put it all together - return fmt.Sprintf(template, systemType, socketCount, cpuModel, coreCount, tdp, htLabel, htOnOff, turboLabel, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion, date) -} diff --git a/internal/table/table.go b/internal/table/table.go index 64b09763..ae52ae75 100644 --- a/internal/table/table.go +++ b/internal/table/table.go @@ -58,17 +58,8 @@ type TableDefinition struct { InsightsFunc InsightsRetriever } -// GetTableByName retrieves a table definition by its name. -func GetTableByName(name string) TableDefinition { - if table, ok := tableDefinitions[name]; ok { - return table - } - panic(fmt.Sprintf("table not found: %s", name)) -} - // IsTableForTarget checks if the given table is applicable for the specified target -func IsTableForTarget(tableName string, myTarget target.Target) bool { - table := GetTableByName(tableName) +func IsTableForTarget(table TableDefinition, myTarget target.Target) bool { if len(table.Architectures) > 0 { architecture, err := myTarget.GetArchitecture() if err != nil { @@ -120,21 +111,13 @@ func IsTableForTarget(tableName string, myTarget target.Target) bool { // ProcessTables processes the given tables and script outputs to generate table values. // It collects values for each field in the tables and returns a slice of TableValues. // If any error occurs during processing, it is returned along with the table values. -func ProcessTables(tableNames []string, scriptOutputs map[string]script.ScriptOutput) (allTableValues []TableValues, err error) { - for _, tableName := range tableNames { - allTableValues = append(allTableValues, GetValuesForTable(tableName, scriptOutputs)) +func ProcessTables(tables []TableDefinition, scriptOutputs map[string]script.ScriptOutput) (allTableValues []TableValues, err error) { + for _, table := range tables { + allTableValues = append(allTableValues, GetValuesForTable(table, scriptOutputs)) } return } -// GetScriptNamesForTable returns the script names required to generate the table with the given name -func GetScriptNamesForTable(name string) []string { - if _, ok := tableDefinitions[name]; !ok { - panic(fmt.Sprintf("table not found: %s", name)) - } - return tableDefinitions[name].ScriptNames -} - // GetFieldIndex returns the index of a field with the given name in the TableValues structure. // Returns: // - int: The index of the field if found and valid, -1 otherwise @@ -152,27 +135,22 @@ func GetFieldIndex(fieldName string, tableValues TableValues) (int, error) { } // GetValuesForTable returns the fields and their values for the table with the given name -func GetValuesForTable(name string, outputs map[string]script.ScriptOutput) TableValues { - // if table with given name doesn't exist, panic - if _, ok := tableDefinitions[name]; !ok { - panic(fmt.Sprintf("table not found: %s", name)) - } - table := tableDefinitions[name] +func GetValuesForTable(table TableDefinition, outputs map[string]script.ScriptOutput) TableValues { // ValuesFunc can't be nil if table.FieldsFunc == nil { - panic(fmt.Sprintf("table %s, ValuesFunc cannot be nil", name)) + panic(fmt.Sprintf("table %s, ValuesFunc cannot be nil", table.Name)) } // call the table's FieldsFunc to get the table's fields and values fields := table.FieldsFunc(outputs) tableValues := TableValues{ - TableDefinition: tableDefinitions[name], + TableDefinition: table, Fields: fields, } // sanity check if err := validateTableValues(tableValues); err != nil { - slog.Error("table validation failed", "table", name, "error", err) + slog.Error("table validation failed", "table", table.Name, "error", err) return TableValues{ - TableDefinition: tableDefinitions[name], + TableDefinition: table, Fields: []Field{}, } } diff --git a/internal/table/table_helpers.go b/internal/table/table_helpers.go deleted file mode 100644 index 5b99bd48..00000000 --- a/internal/table/table_helpers.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (C) 2021-2025 Intel Corporation -// SPDX-License-Identifier: BSD-3-Clause - -// table_helpers.go contains base helper functions that are used to extract values from the output of the scripts. - -package table - -import ( - "regexp" - "strings" -) - -// valFromRegexSubmatch searches for a regex pattern in the given output string and returns the first captured group. -// If no match is found, an empty string is returned. -func valFromRegexSubmatch(output string, regex string) string { - re := regexp.MustCompile(regex) - for line := range strings.SplitSeq(output, "\n") { - match := re.FindStringSubmatch(strings.TrimSpace(line)) - if len(match) > 1 { - return match[1] - } - } - return "" -} - -// valsFromRegexSubmatch extracts the captured groups from each line in the output -// that matches the given regular expression. -// It returns a slice of strings containing the captured values. -func valsFromRegexSubmatch(output string, regex string) []string { - var vals []string - re := regexp.MustCompile(regex) - for line := range strings.SplitSeq(output, "\n") { - match := re.FindStringSubmatch(strings.TrimSpace(line)) - if len(match) > 1 { - vals = append(vals, match[1]) - } - } - return vals -} - -// return all matches for all capture groups in regex -func valsArrayFromRegexSubmatch(output string, regex string) (vals [][]string) { - re := regexp.MustCompile(regex) - for line := range strings.SplitSeq(output, "\n") { - match := re.FindStringSubmatch(line) - if len(match) > 1 { - vals = append(vals, match[1:]) - } - } - return -} - -// valFromDmiDecodeRegexSubmatch extracts a value from the DMI decode output using a regular expression. -// It takes the DMI decode output, the DMI type, and the regular expression as input parameters. -// It returns the extracted value as a string. -func valFromDmiDecodeRegexSubmatch(dmiDecodeOutput string, dmiType string, regex string) string { - return valFromRegexSubmatch(getDmiDecodeType(dmiDecodeOutput, dmiType), regex) -} - -func valsArrayFromDmiDecodeRegexSubmatch(dmiDecodeOutput string, dmiType string, regexes ...string) (vals [][]string) { - var res []*regexp.Regexp - for _, r := range regexes { - re := regexp.MustCompile(r) - res = append(res, re) - } - for _, entry := range getDmiDecodeEntries(dmiDecodeOutput, dmiType) { - row := make([]string, len(res)) - for _, line := range entry { - for i, re := range res { - match := re.FindStringSubmatch(strings.TrimSpace(line)) - if len(match) > 1 { - row[i] = match[1] - } - } - } - vals = append(vals, row) - } - return -} - -// getDmiDecodeType extracts the lines from the given `dmiDecodeOutput` that belong to the specified `dmiType`. -func getDmiDecodeType(dmiDecodeOutput string, dmiType string) string { - var lines []string - start := false - for line := range strings.SplitSeq(dmiDecodeOutput, "\n") { - if start && strings.HasPrefix(line, "Handle ") { - start = false - } - if strings.Contains(line, "DMI type "+dmiType+",") { - start = true - } - if start { - lines = append(lines, line) - } - } - return strings.Join(lines, "\n") -} - -// getDmiDecodeEntries extracts the entries from the given `dmiDecodeOutput` that belong to the specified `dmiType`. -func getDmiDecodeEntries(dmiDecodeOutput string, dmiType string) (entries [][]string) { - lines := strings.Split(dmiDecodeOutput, "\n") - var entry []string - typeMatch := false - for _, line := range lines { - if strings.HasPrefix(line, "Handle ") { - if strings.Contains(line, "DMI type "+dmiType+",") { - // type match - typeMatch = true - entry = []string{} - } else { - // not a type match - typeMatch = false - } - } - if !typeMatch { - continue - } - if line == "" { - // end of type match entry - entries = append(entries, entry) - } else { - // a line in the entry - entry = append(entry, line) - } - } - return -} From 39e1b930c257508ea32541ecb6f6397941d01a6a Mon Sep 17 00:00:00 2001 From: "Harper, Jason M" Date: Fri, 5 Dec 2025 16:08:19 -0800 Subject: [PATCH 6/6] add back support for --input Signed-off-by: Harper, Jason M --- internal/common/common.go | 61 ++++++++++++++++++++--------------- internal/report/render_raw.go | 6 +++- 2 files changed, 40 insertions(+), 27 deletions(-) diff --git a/internal/common/common.go b/internal/common/common.go index e2b8cbbe..f44ea1e3 100644 --- a/internal/common/common.go +++ b/internal/common/common.go @@ -57,9 +57,6 @@ type TargetScriptOutputs struct { func (tso *TargetScriptOutputs) GetScriptOutputs() map[string]script.ScriptOutput { return tso.ScriptOutputs } -func (tso *TargetScriptOutputs) GetTables() []table.TableDefinition { - return tso.Tables -} const ( TableNameInsights = "Insights" @@ -140,7 +137,7 @@ func (rc *ReportingCommand) Run() error { var myTargets []target.Target if FlagInput != "" { var err error - orderedTargetScriptOutputs, err = outputsFromInput(rc.SummaryTableName) + orderedTargetScriptOutputs, err = outputsFromInput(rc.Tables, rc.SummaryTableName) if err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) slog.Error(err.Error()) @@ -477,29 +474,41 @@ func extractTableNamesFromValues(allTargetsTableValues [][]table.TableValues) [] return targetTableNames } +func findTableByName(tables []table.TableDefinition, name string) (*table.TableDefinition, error) { + for _, tbl := range tables { + if tbl.Name == name { + return &tbl, nil + } + } + return nil, fmt.Errorf("table [%s] not found", name) +} + // outputsFromInput reads the raw file(s) and returns the data in the order of the raw files -// TODO: this won't work post re-factor -func outputsFromInput(summaryTableName string) ([]TargetScriptOutputs, error) { - return nil, fmt.Errorf("outputsFromInput not implemented post refactor") - // orderedTargetScriptOutputs := []TargetScriptOutputs{} - // tables := []table.TableDefinition{} - // // read the raw file(s) as JSON - // rawReports, err := report.ReadRawReports(FlagInput) - // if err != nil { - // err = fmt.Errorf("failed to read raw file(s): %w", err) - // return nil, err - // } - // for _, rawReport := range rawReports { - // for _, tableName := range rawReport.TableNames { // just in case someone tries to use the raw files that were collected with a different set of categories - // // filter out tables that we add after processing - // if tableName == TableNameInsights || tableName == TableNamePerfspect || tableName == summaryTableName { - // continue - // } - // tables = append(tables, table.GetTableByName(tableName)) - // } - // orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, TargetScriptOutputs{TargetName: rawReport.TargetName, ScriptOutputs: rawReport.ScriptOutputs, Tables: tables}) - // } - // return orderedTargetScriptOutputs, nil +func outputsFromInput(tables []table.TableDefinition, summaryTableName string) ([]TargetScriptOutputs, error) { + orderedTargetScriptOutputs := []TargetScriptOutputs{} + includedTables := []table.TableDefinition{} + // read the raw file(s) as JSON + rawReports, err := report.ReadRawReports(FlagInput) + if err != nil { + err = fmt.Errorf("failed to read raw file(s): %w", err) + return nil, err + } + for _, rawReport := range rawReports { + for _, tableName := range rawReport.TableNames { // just in case someone tries to use the raw files that were collected with a different set of categories + // filter out tables that we add after processing + if tableName == TableNameInsights || tableName == TableNamePerfspect || tableName == summaryTableName { + continue + } + includedTable, err := findTableByName(tables, tableName) + if err != nil { + slog.Warn("table from raw report not found in current tables", slog.String("table", tableName), slog.String("target", rawReport.TargetName)) + continue + } + includedTables = append(includedTables, *includedTable) + } + orderedTargetScriptOutputs = append(orderedTargetScriptOutputs, TargetScriptOutputs{TargetName: rawReport.TargetName, ScriptOutputs: rawReport.ScriptOutputs, Tables: includedTables}) + } + return orderedTargetScriptOutputs, nil } // outputsFromTargets runs the scripts on the targets and returns the data in the order of the targets diff --git a/internal/report/render_raw.go b/internal/report/render_raw.go index 73dbc51d..6451429a 100644 --- a/internal/report/render_raw.go +++ b/internal/report/render_raw.go @@ -79,9 +79,13 @@ func ReadRawReports(path string) (reports []RawReport, err error) { func readRawReport(rawReportPath string) (report RawReport, err error) { reportBytes, err := os.ReadFile(rawReportPath) // #nosec G304 if err != nil { - err = fmt.Errorf("failed to read raw report file: %v", err) + err = fmt.Errorf("failed to read raw report file (%s): %v", rawReportPath, err) return } err = json.Unmarshal(reportBytes, &report) + if err != nil { + err = fmt.Errorf("failed to unmarshal raw report JSON: %v", err) + return + } return }