diff --git a/.gitignore b/.gitignore index 1589650..5684d8f 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,7 @@ # Dependency directories (remove the comment below to include it) # vendor/ bin -vendor \ No newline at end of file +vendor +cache +lib +*.json diff --git a/Makefile.hwloc b/Makefile.hwloc new file mode 100644 index 0000000..152f047 --- /dev/null +++ b/Makefile.hwloc @@ -0,0 +1,35 @@ +# This makefile will be used when we can add hwloc - there is currently a bug. +HERE ?= $(shell pwd) +LOCALBIN ?= $(shell pwd)/bin + +# Install hwloc here for use to compile, etc. +LOCALLIB ?= $(shell pwd)/lib +HWLOC_INCLUDE ?= $(LOCALLIB)/include/hwloc.h +BUILDENVVAR=CGO_CFLAGS="-I$(LOCALLIB)/include" CGO_LDFLAGS="-L$(LOCALLIB)/lib -lhwloc" + +.PHONY: all + +all: build + +.PHONY: $(LOCALBIN) +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +.PHONY: $(LOCALLIB) +$(LOCALLIB): + mkdir -p $(LOCALLIB) + +$(HWLOC_INCLUDE): + git clone --depth 1 https://github.com/open-mpi/hwloc /tmp/hwloc || true && \ + cd /tmp/hwloc && ./autogen.sh && \ + ./configure --enable-static --disable-shared LDFLAGS="-static" --prefix=$(LOCALLIB)/ && \ + make LDFLAGS=-all-static && make install + +build: $(LOCALBIN) $(HWLOC_INCLUDE) + GO111MODULE="on" $(BUILDENVVAR) go build -ldflags '-w' -o $(LOCALBIN)/compspec cmd/compspec/compspec.go + +build-arm: $(LOCALBIN) $(HWLOC_INCLUDE) + GO111MODULE="on" $(BUILDENVVAR) GOARCH=arm64 go build -ldflags '-w' -o $(LOCALBIN)/compspec-arm cmd/compspec/compspec.go + +build-ppc: $(LOCALBIN) $(HWLOC_INCLUDE) + GO111MODULE="on" $(BUILDENVVAR) GOARCH=ppc64le go build -ldflags '-w' -o $(LOCALBIN)/compspec-ppc cmd/compspec/compspec.go \ No newline at end of file diff --git a/README.md b/README.md index b06ad2b..863ec32 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,8 @@ This is a prototype compatibility checking tool. Right now our aim is to use in - I'm starting with just Linux. I know there are those "other" platforms, but if it doesn't run on HPC or Kubernetes easily I'm not super interested (ahem, Mac and Windows)! - not all extractors work in containers (e.g., kernel needs to be on the host) + - The node feature discovery source doesn't provide mapping of socket -> cores, nor does it give details about logical vs. physical CPU. + - We will likely want to add hwloc go bindings, but there is a bug currently. Note that for development we are using nfd-source that does not require kubernetes: diff --git a/cmd/compspec/compspec.go b/cmd/compspec/compspec.go index 7a3a308..626797e 100644 --- a/cmd/compspec/compspec.go +++ b/cmd/compspec/compspec.go @@ -54,12 +54,21 @@ func main() { cachePath := matchCmd.String("", "cache", &argparse.Options{Help: "A path to a cache for artifacts"}) saveGraph := matchCmd.String("", "cache-graph", &argparse.Options{Help: "Load or use a cached graph"}) - // Create arguments - options := createCmd.StringList("a", "append", &argparse.Options{Help: "Append one or more custom metadata fields to append"}) - specname := createCmd.String("i", "in", &argparse.Options{Required: true, Help: "Input yaml that contains spec for creation"}) - specfile := createCmd.String("o", "out", &argparse.Options{Help: "Save compatibility json artifact to this file"}) - mediaType := createCmd.String("m", "media-type", &argparse.Options{Help: "The expected media-type for the compatibility artifact"}) - allowFailCreate := createCmd.Flag("f", "allow-fail", &argparse.Options{Help: "Allow any specific extractor to fail (and continue extraction)"}) + // Create subcommands - note that "nodes" could be cluster, but could want to make a subset of one + artifactCmd := createCmd.NewCommand("artifact", "Create a new artifact") + nodesCmd := createCmd.NewCommand("nodes", "Create nodes in Json Graph format from extraction data") + + // Artifaction creation arguments + options := artifactCmd.StringList("a", "append", &argparse.Options{Help: "Append one or more custom metadata fields to append"}) + specname := artifactCmd.String("i", "in", &argparse.Options{Required: true, Help: "Input yaml that contains spec for creation"}) + specfile := artifactCmd.String("o", "out", &argparse.Options{Help: "Save compatibility json artifact to this file"}) + mediaType := artifactCmd.String("m", "media-type", &argparse.Options{Help: "The expected media-type for the compatibility artifact"}) + allowFailCreate := artifactCmd.Flag("f", "allow-fail", &argparse.Options{Help: "Allow any specific extractor to fail (and continue extraction)"}) + + // Nodes creation arguments + nodesOutFile := nodesCmd.String("", "nodes-output", &argparse.Options{Help: "Output json file for cluster nodes"}) + nodesDir := nodesCmd.String("", "node-dir", &argparse.Options{Required: true, Help: "Input directory with extraction data for nodes"}) + clusterName := nodesCmd.String("", "cluster-name", &argparse.Options{Required: true, Help: "Cluster name to describe in graph"}) // Now parse the arguments err := parser.Parse(os.Args) @@ -75,10 +84,21 @@ func main() { log.Fatalf("Issue with extraction: %s\n", err) } } else if createCmd.Happened() { - err := create.Run(*specname, *options, *specfile, *allowFailCreate) - if err != nil { - log.Fatal(err.Error()) + if artifactCmd.Happened() { + err := create.Artifact(*specname, *options, *specfile, *allowFailCreate) + if err != nil { + log.Fatal(err.Error()) + } + } else if nodesCmd.Happened() { + err := create.Nodes(*nodesDir, *clusterName, *nodesOutFile) + if err != nil { + log.Fatal(err.Error()) + } + } else { + fmt.Println(Header) + fmt.Println("Please provide a --node-dir and (optionally) --nodes-output (json file to write)") } + } else if matchCmd.Happened() { err := match.Run( *manifestFile, diff --git a/cmd/compspec/create/artifact.go b/cmd/compspec/create/artifact.go new file mode 100644 index 0000000..9867927 --- /dev/null +++ b/cmd/compspec/create/artifact.go @@ -0,0 +1,31 @@ +package create + +import ( + "strings" + + "github.com/compspec/compspec-go/plugins/creators/artifact" +) + +// Artifact will create a compatibility artifact based on a request in YAML +// TODO likely want to refactor this into a proper create plugin +func Artifact(specname string, fields []string, saveto string, allowFail bool) error { + + // This is janky, oh well + allowFailFlag := "false" + if allowFail { + allowFailFlag = "true" + } + + // assemble options for node creator + creator, err := artifact.NewPlugin() + if err != nil { + return err + } + options := map[string]string{ + "specname": specname, + "fields": strings.Join(fields, "||"), + "saveto": saveto, + "allowFail": allowFailFlag, + } + return creator.Create(options) +} diff --git a/cmd/compspec/create/nodes.go b/cmd/compspec/create/nodes.go new file mode 100644 index 0000000..b7a4f54 --- /dev/null +++ b/cmd/compspec/create/nodes.go @@ -0,0 +1,23 @@ +package create + +import ( + "github.com/compspec/compspec-go/plugins/creators/cluster" +) + +// Nodes will read in one or more node extraction metadata files and generate a single nodes JGF graph +// This is intentended for a registration command. +// TODO this should be converted to a creation (converter) plugin +func Nodes(nodesDir, clusterName, nodeOutFile string) error { + + // assemble options for node creator + creator, err := cluster.NewPlugin() + if err != nil { + return err + } + options := map[string]string{ + "nodes-dir": nodesDir, + "cluster-name": clusterName, + "node-outfile": nodeOutFile, + } + return creator.Create(options) +} diff --git a/cmd/compspec/extract/extract.go b/cmd/compspec/extract/extract.go index f06b2f4..56e7f3a 100644 --- a/cmd/compspec/extract/extract.go +++ b/cmd/compspec/extract/extract.go @@ -15,7 +15,7 @@ func Run(filename string, pluginNames []string, allowFail bool) error { // Womp womp, we only support linux! There is no other way. operatingSystem := runtime.GOOS if operatingSystem != "linux" { - return fmt.Errorf("🤓️ Sorry, we only support linux.") + return fmt.Errorf("🤓️ sorry, we only support linux") } // parse [section,...,section] into named plugins and sections @@ -37,7 +37,7 @@ func Run(filename string, pluginNames []string, allowFail bool) error { // This returns an array of bytes b, err := result.ToJson() if err != nil { - return fmt.Errorf("There was an issue marshalling to JSON: %s\n", err) + return fmt.Errorf("there was an issue marshalling to JSON: %s", err) } err = os.WriteFile(filename, b, 0644) if err != nil { diff --git a/docs/README.md b/docs/README.md index 1db043f..f175f68 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,9 +2,9 @@ This is early documentation that will be converted eventually to something prettier. Read more about: - - [Design](design.md) - - [Usage](usage.md) - + - [Design](design.md) of compspec + - [Usage](usage.md) generic use cases + - [Rainbow](rainbow.md) use cases and examples for the rainbow scheduler ## Thanks and Previous Art diff --git a/docs/design.md b/docs/design.md index 3d825bf..c171f37 100644 --- a/docs/design.md +++ b/docs/design.md @@ -7,18 +7,28 @@ The compatibility tool is responsible for extracting information about a system, ## Definitions -### Extractor +### Plugin -An **extractor** is a core plugin that knows how to retrieve metadata about a host. An extractor is usually going to be run for two cases: +A plugin can define one or more functionalities; + +- "Extract" is expected to know how to extract metadata about an application or environment +- "Create" is expected to create something from extracted data + +This means that an **extractor** is a core plugin that knows how to retrieve metadata about a host. An extractor is usually going to be run for two cases: 1. During CI to extract (and save) metadata about a particular build to put in a compatibility artifact. 2. During image selection to extract information about the host to compare to. -Examples extractors could be "library" or "system." +Examples extractors could be "library" or "system." You interact with extractor plugins via the "extract" command. + +A **creator** is a plugin that is responsible for creating an artifact that includes some extracted metadata. The creator is agnostic to what it it being asked to generate in the sense that it just needs a mapping. The mapping will be from the extractor namespace to the compatibility artifact namespace. For our first prototype, this just means asking for particular extractor attributes to map to a set of annotations that we want to dump into json. To start there should only be one creator plugin needed, however if there are different structures of artifacts needed, I could imagine more. An example creation specification for a prototype experiment where we care about architecture, MPI, and GPU is provided in [examples](examples). + +Plugins can be one or the other, or both. -### Section +#### Section -A **section** is a group of metadata within an extractor. For example, within "library" a section is for "mpi." This allows a user to specify running the `--name library[mpi]` extractor to ask for the mpi section of the library family. Another example is under kernel. +A **section** is a group of metadata typically within an extractor, and could also be defined for creators when we have more use cases. +For example, within "library" a section is for "mpi." This allows a user to specify running the `--name library[mpi]` extractor to ask for the mpi section of the library family. Another example is under kernel. The user might want to ask for more than one group to be extracted and might ask for `--name kernel[boot,config]`. Section basically provides more granularity to an extractor namespace. For the above two examples, the metadata generated would be organized like: ``` @@ -31,12 +41,10 @@ kernel For the above, right now I am implementing extractors generally, or "wild-westy" in the sense that the namespace is oriented toward the extractor name and sections it owns (e.g., no community namespaces like archspec, spack, opencontainers, etc). This is subject to change depending on the design the working group decides on. -### Creator - -A creator is a plugin that is responsible for creating an artifact that includes some extracted metadata. The creator is agnostic to what it it being asked to generate in the sense that it just needs a mapping. The mapping will be from the extractor namespace to the compatibility artifact namespace. For our first prototype, this just means asking for particular extractor attributes to map to a set of annotations that we want to dump into json. To start there should only be one creator plugin needed, however if there are different structures of artifacts needed, I could imagine more. An example creation specification for a prototype experiment where we care about architecture, MPI, and GPU is provided in [examples](examples). - ## Overview +> This was the original proposal and may be out of date. + The design is based on the prototype from that pull request, shown below. ![img/proposal-c-plugin-design.png](img/proposal-c-plugin-design.png) diff --git a/docs/img/rainbow-scheduler-register.png b/docs/img/rainbow-scheduler-register.png new file mode 100644 index 0000000..b547e6b Binary files /dev/null and b/docs/img/rainbow-scheduler-register.png differ diff --git a/docs/rainbow/README.md b/docs/rainbow/README.md new file mode 100644 index 0000000..4204b10 --- /dev/null +++ b/docs/rainbow/README.md @@ -0,0 +1,48 @@ +# Rainbow Scheduler + +The [rainbow scheduler](https://github.com/converged-computing/rainbow) has a registration step that requires a cluster to send over node metadata. The reason is because when a user sends a request for work, the scheduler needs to understand +how to properly assign it. To do that, it needs to be able to see all the resources (clusters) available to it. + +![../img/rainbow-scheduler-register.png](../img/rainbow-scheduler-register.png) + +For the purposes of compspec here, we care about the registration step. This is what that includes: + +## Registration + +1. At registration, the cluster also sends over metadata about itself (and the nodes it has). This is going to allow for selection for those nodes. +1. When submitting a job, the user no longer is giving an exact command, but a command + an image with compatibility metadata. The compatibility metadata (somehow) needs to be used to inform the cluster selection. +1. At selection, the rainbow schdeuler needs to filter down cluster options, and choose a subset. + - Level 1: Don't ask, just choose the top choice and submit + - Level 2: Ask the cluster for TBA time or cost, choose based on that. + - Job is added to that queue. + +Specifically, this means two steps for compspec go: + +1. A step to ask each node to extract it's own metadata, saved to a directory. +2. A second step to combine those nodes into a graph. + +Likely we will take a simple approach to do an extract for one node that captures it's metadata into Json Graph Format (JGF) and then dumps into a shared directory (we might imagine this being run with a flux job) +and then some combination step. + +## Example + +In the example below, we will extract node level metadata with `compspec extract` and then generate the cluster JGF to send for registration with compspec create-nodes. + +### 1. Extract Metadata + +Let's first generate faux node metadata for a "cluster" - I will just run an extraction a few times and generate equivalent files :) This isn't such a crazy idea because it emulates nodes that are the same! + +```bash +mkdir -p ./docs/rainbow/cluster +compspec extract --name library --name nfd[cpu,memory,network,storage,system] --name system[cpu,processor,arch,memory] --out ./docs/rainbow/cluster/node-1.json +compspec extract --name library --name nfd[cpu,memory,network,storage,system] --name system[cpu,processor,arch,memory] --out ./docs/rainbow/cluster/node-2.json +compspec extract --name library --name nfd[cpu,memory,network,storage,system] --name system[cpu,processor,arch,memory] --out ./docs/rainbow/cluster/node-3.json +``` + +### 2. Create Nodes + +Now we are going to give compspec the directory, and ask it to create nodes. This will be in JSON graph format. This outputs to the terminal: + +```bash +compspec create nodes --cluster-name cluster-red --node-dir ./docs/rainbow/cluster/ +``` \ No newline at end of file diff --git a/docs/usage.md b/docs/usage.md index aaf134d..01d4567 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -14,6 +14,7 @@ This generates the `bin/compspec` that you can use: ./bin/compspec ``` ```console + ┏┏┓┏┳┓┏┓┏┏┓┏┓┏ ┗┗┛┛┗┗┣┛┛┣┛┗ ┗ ┛ ┛ @@ -28,11 +29,16 @@ Commands: version See the version of compspec extract Run one or more extractors + list List plugins and known sections + create Create a compatibility artifact for the current host according to a + definition + match Match a manifest of container images / artifact pairs against a set + of host fields Arguments: -h --help Print help information - -n --name One or more specific extractor plugin names + -n --name One or more specific plugins to target names ``` ## Version @@ -41,57 +47,70 @@ Arguments: $ ./bin/compspec version ``` ```console -⭐️ compspec version 0.1.0-draft +⭐️ compspec version 0.1.1-draft ``` I know, the star should not be there. Fight me. ## List -The list command lists each extractor, and sections available for it. +The list command lists plugins (extractors and creators), and sections available for extractors. ```bash $ ./bin/compspec list ``` ```console - Compatibility Plugins - TYPE NAME SECTION - generic kernel extractor - extractor kernel boot - extractor kernel config - extractor kernel modules ----------------------------------------------------------- - generic system extractor - extractor system processor - extractor system os - extractor system arch - extractor system memory ----------------------------------------------------------- - generic library extractor - extractor library mpi ----------------------------------------------------------- - node feature discovery - extractor nfd cpu - extractor nfd kernel - extractor nfd local - extractor nfd memory - extractor nfd network - extractor nfd pci - extractor nfd storage - extractor nfd system - extractor nfd usb - TOTAL 4 17 + Compatibility Plugins + TYPE NAME SECTION + creation plugins + creator artifact + creator cluster +----------------------------------------------------------- + generic kernel extractor + extractor kernel boot + extractor kernel config + extractor kernel modules +----------------------------------------------------------- + generic system extractor + extractor system processor + extractor system os + extractor system arch + extractor system memory + extractor system cpu +----------------------------------------------------------- + generic library extractor + extractor library mpi +----------------------------------------------------------- + node feature discovery + extractor nfd cpu + extractor nfd kernel + extractor nfd local + extractor nfd memory + extractor nfd network + extractor nfd pci + extractor nfd storage + extractor nfd system + extractor nfd usb + TOTAL 6 20 ``` Note that we will eventually add a description column - it's not really warranted yet! ## Create -The create command is how you take a compatibility request, or a YAML file that has a mapping between the extractors defined by this tool and your compatibility metadata namespace, and generate an artifact. The artifact typically will be a JSON dump of key value pairs, scoped under different namespaces, that you might push to a registry to live alongside a container image, and with the intention to eventually use it to check compatiility against a new system. To run create -we can use the example in the top level repository: +The create command handles two kinds of creation (sub-commands). Each of these is currently linked to a creation plugin. + + - **artifact**: create a compatibility artifact to describe an environment or application + - **nodes** create a json graph format summary of nodes (a directory with one or more extracted metadata JSON files with node metadata) + +The artifact case is described here. For the node case, you can read about it in the [rainbow scheduler](rainbow) documentation. + +### Artifact + +The create artifact command is how you take a compatibility request, or a YAML file that has a mapping between the extractors defined by this tool and your compatibility metadata namespace, and generate an artifact. The artifact typically will be a JSON dump of key value pairs, scoped under different namespaces, that you might push to a registry to live alongside a container image, and with the intention to eventually use it to check compatiility against a new system. To run create we can use the example in the top level repository: ```bash -./bin/compspec create --in ./examples/lammps-experiment.yaml +./bin/compspec create artifact --in ./examples/lammps-experiment.yaml ``` Note that you'll see some errors about fields not being found! This is because we've implemented this for the fields to be added custom, on the command line. @@ -99,7 +118,7 @@ The idea here is that you can add custom metadata fields during your build, whic ```bash # a stands for "append" and it can write a new field or overwrite an existing one -./bin/compspec create --in ./examples/lammps-experiment.yaml -a custom.gpu.available=yes +./bin/compspec create artifact --in ./examples/lammps-experiment.yaml -a custom.gpu.available=yes ``` ```console { @@ -143,7 +162,7 @@ Awesome! That, as simple as it is, is our compatibility artifact. I ran the comm a build will generate it for that context. We would want to save this to file: ```bash -./bin/compspec create --in ./examples/lammps-experiment.yaml -a custom.gpu.available=yes -o ./examples/generated-compatibility-spec.json +./bin/compspec create artifact --in ./examples/lammps-experiment.yaml -a custom.gpu.available=yes -o ./examples/generated-compatibility-spec.json ``` And that's it! We would next (likely during CI) push this compatibility artifact to a URI that is likely (TBA) linked to the image. diff --git a/go.mod b/go.mod index b5d091c..fa2dfa5 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/akamensky/argparse v1.4.0 - github.com/converged-computing/jsongraph-go v0.0.0-20231221142916-249fef6889b3 + github.com/converged-computing/jsongraph-go v0.0.0-20240225004212-223ddffb7565 github.com/converged-computing/nfd-source v0.0.0-20240224025007-20d686e64926 github.com/jedib0t/go-pretty/v6 v6.5.4 github.com/moby/moby v25.0.3+incompatible diff --git a/go.sum b/go.sum index 44b9560..377d95e 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/akamensky/argparse v1.4.0 h1:YGzvsTqCvbEZhL8zZu2AiA5nq805NZh75JNj4ajn github.com/akamensky/argparse v1.4.0/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/converged-computing/jsongraph-go v0.0.0-20231221142916-249fef6889b3 h1:frJJfyARuHmF2eohDCyltBLE6tRJKvA1shuS2aWQaf8= -github.com/converged-computing/jsongraph-go v0.0.0-20231221142916-249fef6889b3/go.mod h1:+DhVyLXGVfBsfta4185jd33jqa94inshCcdvsXK2Irk= +github.com/converged-computing/jsongraph-go v0.0.0-20240225004212-223ddffb7565 h1:ZwJngPrF1yvM4ZGEyoT1b8h5e0qUumOxeDZLN37pPTk= +github.com/converged-computing/jsongraph-go v0.0.0-20240225004212-223ddffb7565/go.mod h1:+DhVyLXGVfBsfta4185jd33jqa94inshCcdvsXK2Irk= github.com/converged-computing/nfd-source v0.0.0-20240224025007-20d686e64926 h1:VZmgK3t4564vdHNpE//q6kuPlugOrojkDHP4Gqd4A1g= github.com/converged-computing/nfd-source v0.0.0-20240224025007-20d686e64926/go.mod h1:I15nBsQqBTUsc3A4a6cuQmZjQ8lYUZSZ2a7UAE5SZ3g= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= diff --git a/pkg/graph/cluster.go b/pkg/graph/cluster.go new file mode 100644 index 0000000..1724fb9 --- /dev/null +++ b/pkg/graph/cluster.go @@ -0,0 +1,172 @@ +package graph + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/compspec/compspec-go/pkg/utils" + "github.com/converged-computing/jsongraph-go/jsongraph/metadata" + "github.com/converged-computing/jsongraph-go/jsongraph/v2/graph" + jgf "github.com/converged-computing/jsongraph-go/jsongraph/v2/graph" +) + +// A ClusterGraph is meant to be a plain (flux oriented) JGF to describe a cluster (nodes) +type ClusterGraph struct { + *jgf.JsonGraph + + Name string + + // Top level counter for node labels (JGF v2) that maps to ids (JGF v1) + nodeCounter int32 + + // Counters for specific resource types (e.g., rack, node) + resourceCounters map[string]int32 +} + +// HasNode determines if the graph has a node, named by label +func (c *ClusterGraph) HasNode(name string) bool { + _, ok := c.Graph.Nodes[name] + return ok +} + +// Save graph to a cached file +func (c *ClusterGraph) SaveGraph(path string) error { + exists, err := utils.PathExists(path) + if err != nil { + return err + } + // Don't overwrite if exists + if exists { + fmt.Printf("Graph %s already exists, will not overwrite\n", path) + return nil + } + content, err := json.MarshalIndent(c.Graph, "", " ") + if err != nil { + return err + } + fmt.Printf("Saving graph to %s\n", path) + err = os.WriteFile(path, content, 0644) + if err != nil { + return err + } + return nil +} + +// Path gets a new path +func getNodePath(root, subpath string) string { + if subpath == "" { + return fmt.Sprintf("/%s", root) + } + return fmt.Sprintf("/%s/%s", root, subpath) +} + +// AddNode adds a node to the graph +// g.AddNode("rack", 1, false, "", root) +func (c *ClusterGraph) AddNode( + resource string, + name string, + size int32, + exclusive bool, + unit string, +) *graph.Node { + node := c.getNode(resource, name, size, exclusive, unit) + c.Graph.Nodes[*node.Label] = *node + return node +} + +// Add an edge from source to dest with some relationship +func (c *ClusterGraph) AddEdge(source, dest graph.Node, relation string) { + edge := getEdge(*source.Label, *dest.Label, relation) + c.Graph.Edges = append(c.Graph.Edges, edge) +} + +// getNode is a private shared function that can also be used to generate the root! +func (c *ClusterGraph) getNode( + resource string, + name string, + size int32, + exclusive bool, + unit string, +) *graph.Node { + + // Get the identifier for the resource type + counter, ok := c.resourceCounters[resource] + if !ok { + counter = 0 + } + + // The current count in the graph (global) + count := c.nodeCounter + + // The id in the metadata is the counter for that resource type + resourceCounter := fmt.Sprintf("%d", counter) + + // The resource name is the type + the resource counter + resourceName := fmt.Sprintf("%s%d", name, counter) + + // New Metadata with expected fluxion data + m := metadata.Metadata{} + m.AddElement("type", resource) + m.AddElement("basename", name) + m.AddElement("id", resourceCounter) + m.AddElement("name", resourceName) + + // uniq_id should be the same as the label, but as an integer + m.AddElement("uniq_id", count) + m.AddElement("rank", -1) + m.AddElement("exclusive", exclusive) + m.AddElement("unit", unit) + m.AddElement("size", size) + m.AddElement("paths", map[string]string{"containment": getNodePath(name, "")}) + + // Update the resource counter + counter += 1 + c.resourceCounters[resource] = counter + + // Update the global counter + c.nodeCounter += 1 + + // Assemble the node! + // Label for v2 will be identifier "id" for JGF v1 + label := fmt.Sprintf("%d", count) + node := graph.Node{Label: &label, Metadata: m} + return &node +} + +// Init a new FlexGraph from a graphml filename +// The cluster root is slightly different so we don't use getNode here +func NewClusterGraph(name string) (ClusterGraph, error) { + + // prepare a graph to load targets into + g := jgf.NewGraph() + + clusterName := fmt.Sprintf("%s0", name) + + // New Metadata with expected fluxion data + m := metadata.Metadata{} + m.AddElement("type", "cluster") + m.AddElement("basename", name) + m.AddElement("name", clusterName) + m.AddElement("id", 0) + m.AddElement("uniq_id", 0) + m.AddElement("rank", -1) + m.AddElement("exclusive", false) + m.AddElement("unit", "") + m.AddElement("size", 1) + m.AddElement("paths", map[string]string{"containment": getNodePath(name, "")}) + + // Root cluster node + label := "0" + node := graph.Node{Label: &label, Metadata: m} + + // Set the root node + g.Graph.Nodes[label] = node + + // Create a new cluster! + // Start counting at 1 - index 0 is the cluster root + resourceCounters := map[string]int32{"cluster": int32(1)} + cluster := ClusterGraph{g, name, 1, resourceCounters} + + return cluster, nil +} diff --git a/pkg/graph/graph.go b/pkg/graph/compatibility.go similarity index 100% rename from pkg/graph/graph.go rename to pkg/graph/compatibility.go diff --git a/pkg/graph/edges.go b/pkg/graph/edges.go index 6cb463f..056ba03 100644 --- a/pkg/graph/edges.go +++ b/pkg/graph/edges.go @@ -4,7 +4,7 @@ import ( "github.com/converged-computing/jsongraph-go/jsongraph/v2/graph" ) -// Get an edge with a specific containment (typically "contains" or "in") -func getEdge(source string, dest string, containment string) graph.Edge { - return graph.Edge{Source: source, Target: dest, Relation: containment} +// Get an edge with a specific relationship (typically "contains" or "in") +func getEdge(source string, dest string, relation string) graph.Edge { + return graph.Edge{Source: source, Target: dest, Relation: relation} } diff --git a/plugins/field.go b/pkg/plugin/field.go similarity index 97% rename from plugins/field.go rename to pkg/plugin/field.go index 6326ff2..005433e 100644 --- a/plugins/field.go +++ b/pkg/plugin/field.go @@ -1,4 +1,4 @@ -package plugins +package plugin import ( "fmt" diff --git a/pkg/extractor/extractor.go b/pkg/plugin/plugin.go similarity index 56% rename from pkg/extractor/extractor.go rename to pkg/plugin/plugin.go index 22f6686..c53833e 100644 --- a/pkg/extractor/extractor.go +++ b/pkg/plugin/plugin.go @@ -1,30 +1,40 @@ -package extractor +package plugin import ( "encoding/json" "fmt" ) -// An Extractor interface has: +// A Plugin interface can define any of the following: // // an Extract function to return extractor data across sections // a validate function to typically check that the plugin is valid -type Extractor interface { +// a Creation interface that can use extractor data to generate something new +type PluginInterface interface { Name() string Description() string - Extract(interface{}) (ExtractorData, error) + + // This is probably a dumb way to do it, but it works + IsExtractor() bool + IsCreator() bool + + // Extractors + Extract(interface{}) (PluginData, error) Validate() bool Sections() []string + + // Creators take a map of named options + Create(map[string]string) error } // ExtractorData is returned by an extractor -type ExtractorData struct { +type PluginData struct { Sections Sections `json:"sections,omitempty"` } -type Sections map[string]ExtractorSection +type Sections map[string]PluginSection // Print extractor data to the console -func (e *ExtractorData) Print() { +func (e *PluginData) Print() { for name, section := range e.Sections { fmt.Printf(" -- Section %s\n", name) for key, value := range section { @@ -35,7 +45,7 @@ func (e *ExtractorData) Print() { } // ToJson serializes to json -func (e *ExtractorData) ToJson() (string, error) { +func (e *PluginData) ToJson() (string, error) { b, err := json.MarshalIndent(e, "", " ") if err != nil { return "", err @@ -44,7 +54,7 @@ func (e *ExtractorData) ToJson() (string, error) { } // An extractor section corresponds to a named group of attributes -type ExtractorSection map[string]string +type PluginSection map[string]string // Extractors is a lookup of registered extractors by name -type Extractors map[string]Extractor +type Plugins map[string]PluginInterface diff --git a/pkg/plugin/result.go b/pkg/plugin/result.go new file mode 100644 index 0000000..0b84d51 --- /dev/null +++ b/pkg/plugin/result.go @@ -0,0 +1,92 @@ +package plugin + +import ( + "encoding/json" + "fmt" + "os" + "strings" +) + +// A Result wraps named extractor data, just for easy dumping to json +type Result struct { + Results map[string]PluginData `json:"results,omitempty"` +} + +// Load a filename into the result object! +func (r *Result) Load(filename string) error { + + file, err := os.ReadFile(filename) + if err != nil { + return err + } + err = json.Unmarshal(file, r) + if err != nil { + return err + } + return nil +} + +// ToJson serializes a result to json +func (r *Result) ToJson() ([]byte, error) { + b, err := json.MarshalIndent(r, "", " ") + if err != nil { + return []byte{}, err + } + return b, err +} + +// Print prints the result to the terminal +func (r *Result) Print() { + for name, result := range r.Results { + fmt.Printf(" --Result for %s\n", name) + result.Print() + } +} + +// AddCustomFields adds or updates an existing result with +// custom metadata, either new or to overwrite +func (r *Result) AddCustomFields(fields []string) { + + for _, field := range fields { + if !strings.Contains(field, "=") { + fmt.Printf("warning: field %s does not contain an '=', skipping\n", field) + continue + } + parts := strings.Split(field, "=") + if len(parts) < 2 { + fmt.Printf("warning: field %s has an empty value, skipping\n", field) + continue + } + + // No reason the value cannot have additional = + field = parts[0] + value := strings.Join(parts[1:], "=") + + // Get the extractor, section, and subfield from the field + f, err := ParseField(field) + if err != nil { + fmt.Printf(err.Error(), field) + continue + } + + // Is the extractor name in the result? + _, ok := r.Results[f.Extractor] + if !ok { + sections := Sections{} + r.Results[f.Extractor] = PluginData{Sections: sections} + } + data := r.Results[f.Extractor] + + // Is the section name in the extractor data? + _, ok = data.Sections[f.Section] + if !ok { + data.Sections[f.Section] = PluginSection{} + } + section := data.Sections[f.Section] + section[f.Field] = value + + // Wrap it back up! + data.Sections[f.Section] = section + r.Results[f.Extractor] = data + } +} diff --git a/pkg/types/version.go b/pkg/types/version.go index cd34d7b..65b41db 100644 --- a/pkg/types/version.go +++ b/pkg/types/version.go @@ -10,7 +10,7 @@ const ( VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDraft indicates development branch. Releases will be empty string. VersionDraft = "-draft" diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index a4ec477..80168a2 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -4,6 +4,7 @@ import ( "bufio" "errors" "fmt" + "math" "math/rand" "os" "strings" @@ -21,6 +22,21 @@ func PathExists(path string) (bool, error) { return true, nil } +// chunkify a count of processors across sockets +func Chunkify(items []string, count int) [][]string { + var chunks [][]string + chunkSize := int(math.Ceil(float64(len(items) / count))) + + for i := 0; i < len(items); i += chunkSize { + end := i + chunkSize + if end > len(items) { + end = len(items) + } + chunks = append(chunks, items[i:end]) + } + return chunks +} + // SplitDelimiterList splits a list of items by an additional delimiter func SplitDelimiterList(items []string, delim string) (map[string]string, error) { diff --git a/cmd/compspec/create/create.go b/plugins/creators/artifact/artifact.go similarity index 67% rename from cmd/compspec/create/create.go rename to plugins/creators/artifact/artifact.go index 2745b36..a7bdf63 100644 --- a/cmd/compspec/create/create.go +++ b/plugins/creators/artifact/artifact.go @@ -1,35 +1,61 @@ -package create +package artifact import ( "fmt" "os" + "strings" + "github.com/compspec/compspec-go/pkg/plugin" "github.com/compspec/compspec-go/pkg/types" p "github.com/compspec/compspec-go/plugins" "sigs.k8s.io/yaml" ) -// loadRequest loads a Compatibility Request YAML into a struct -func loadRequest(filename string) (*types.CompatibilityRequest, error) { - request := types.CompatibilityRequest{} - yamlFile, err := os.ReadFile(filename) - if err != nil { - return &request, err - } +const ( + CreatorName = "artifact" + CreatorDescription = "describe an application or environment" +) - err = yaml.Unmarshal(yamlFile, &request) - if err != nil { - return &request, err - } - return &request, nil +type ArtifactCreator struct{} + +func (c ArtifactCreator) Description() string { + return CreatorDescription +} + +func (c ArtifactCreator) Name() string { + return CreatorName +} + +func (c ArtifactCreator) Sections() []string { + return []string{} } -// Run will create a compatibility artifact based on a request in YAML -func Run(specname string, fields []string, saveto string, allowFail bool) error { +func (c ArtifactCreator) Extract(interface{}) (plugin.PluginData, error) { + return plugin.PluginData{}, nil +} +func (c ArtifactCreator) IsCreator() bool { return true } +func (c ArtifactCreator) IsExtractor() bool { return false } + +// Create generates the desired output +func (c ArtifactCreator) Create(options map[string]string) error { + + // unwrap options (we can be sure they are at least provided) + specname := options["specname"] + saveto := options["saveto"] + fieldsCombined := options["fields"] + fields := strings.Split(fieldsCombined, "||") + + // This is uber janky. We could use interfaces + // But I just feel so lazy right now + allowFailFlag := options["allowFail"] + allowFail := false + if allowFailFlag == "true" { + allowFail = true + } // Cut out early if a spec not provided if specname == "" { - return fmt.Errorf("A spec input -i/--input is required") + return fmt.Errorf("a spec input -i/--input is required") } request, err := loadRequest(specname) if err != nil { @@ -74,9 +100,24 @@ func Run(specname string, fields []string, saveto string, allowFail bool) error return nil } +// loadRequest loads a Compatibility Request YAML into a struct +func loadRequest(filename string) (*types.CompatibilityRequest, error) { + request := types.CompatibilityRequest{} + yamlFile, err := os.ReadFile(filename) + if err != nil { + return &request, err + } + + err = yaml.Unmarshal(yamlFile, &request) + if err != nil { + return &request, err + } + return &request, nil +} + // LoadExtractors loads a compatibility result into a compatibility request // After this we can save the populated thing into an artifact (json DUMP) -func PopulateExtractors(result *p.Result, request *types.CompatibilityRequest) (*types.CompatibilityRequest, error) { +func PopulateExtractors(result *plugin.Result, request *types.CompatibilityRequest) (*types.CompatibilityRequest, error) { // Every metadata attribute must be known under a schema schemas := request.Metadata.Schemas @@ -97,7 +138,7 @@ func PopulateExtractors(result *p.Result, request *types.CompatibilityRequest) ( for key, extractorKey := range compat.Attributes { // Get the extractor, section, and subfield from the extractor lookup key - f, err := p.ParseField(extractorKey) + f, err := plugin.ParseField(extractorKey) if err != nil { fmt.Printf("warning: cannot parse %s: %s, setting to empty\n", key, extractorKey) compat.Attributes[key] = "" @@ -138,3 +179,13 @@ func PopulateExtractors(result *p.Result, request *types.CompatibilityRequest) ( return request, nil } + +func (c ArtifactCreator) Validate() bool { + return true +} + +// NewPlugin creates a new ArtifactCreator +func NewPlugin() (plugin.PluginInterface, error) { + c := ArtifactCreator{} + return c, nil +} diff --git a/plugins/creators/cluster/cluster.go b/plugins/creators/cluster/cluster.go new file mode 100644 index 0000000..622f47a --- /dev/null +++ b/plugins/creators/cluster/cluster.go @@ -0,0 +1,219 @@ +package cluster + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/compspec/compspec-go/pkg/graph" + "github.com/compspec/compspec-go/pkg/plugin" + "github.com/compspec/compspec-go/pkg/utils" +) + +const ( + CreatorName = "cluster" + CreatorDescription = "create cluster of nodes" +) + +type ClusterCreator struct{} + +func (c ClusterCreator) Description() string { + return CreatorDescription +} + +func (c ClusterCreator) Name() string { + return CreatorName +} + +func (c ClusterCreator) Sections() []string { + return []string{} +} + +func (c ClusterCreator) Extract(interface{}) (plugin.PluginData, error) { + return plugin.PluginData{}, nil +} +func (c ClusterCreator) IsCreator() bool { return true } +func (c ClusterCreator) IsExtractor() bool { return false } + +// Create generates the desired output +func (c ClusterCreator) Create(options map[string]string) error { + + // unwrap options (we can be sure they are at least provided) + nodesDir := options["nodes-dir"] + clusterName := options["cluster-name"] + nodeOutFile := options["node-outfile"] + + // Read in each node into a plugins.Result + // Results map[string]plugin.PluginData `json:"extractors,omitempty"` + nodes := map[string]plugin.Result{} + + nodeFiles, err := os.ReadDir(nodesDir) + if err != nil { + return err + } + for _, f := range nodeFiles { + fmt.Printf("Loading %s\n", f.Name()) + result := plugin.Result{} + fullpath := filepath.Join(nodesDir, f.Name()) + + // Be forgiving if extra files are there... + err := result.Load(fullpath) + if err != nil { + fmt.Printf("Warning, filename %s is not in the correct format. Skipping\n", f.Name()) + continue + } + // Add to nodes, if we don't error + nodes[f.Name()] = result + } + + // When we get here, no nodes, no graph + if len(nodes) == 0 { + fmt.Println("There were no nodes for the graph.") + return nil + } + + // Prepare a graph that will describe our cluster + g, err := graph.NewClusterGraph(clusterName) + if err != nil { + return err + } + + // This is the root node, we reference it as a parent to the rack + root := g.Graph.Nodes["0"] + + // Right now assume we have just one rack with all nodes + // https://github.com/flux-framework/flux-sched/blob/master/t/data/resource/jgfs/tiny.json#L4 + // Note that these are flux specific, and we can make them more generic if needed + + // resource (e.g., rack, node) + // name (usually the same as the resource) + // size (usually 1) + // exclusive (usually false) + // unit (usually empty or an amount) + rack := *g.AddNode("rack", "rack", 1, false, "") + + // Connect the rack to the parent, both ways. + // I think this is because fluxion is Depth First and Upwards (dfu) + // "The root cluster contains a rack" + g.AddEdge(root, rack, "contains") + + // "The rack is in a cluster" + g.AddEdge(rack, root, "in") + + // Read in each node and add to the rack. + // There are several levels here: + // /tiny0/rack0/node0/socket0/core1 + for nodeFile, meta := range nodes { + + // We must have extractors, nfd, and sections + nfd, ok := meta.Results["nfd"] + if !ok || len(nfd.Sections) == 0 { + fmt.Printf("node %s is missing extractors->nfd data, skipping\n", nodeFile) + continue + } + + // We also need system -> sections -> processor + system, ok := meta.Results["system"] + if !ok || len(system.Sections) == 0 { + fmt.Printf("node %s is missing extractors->system data, skipping\n", nodeFile) + continue + } + processor, ok := system.Sections["processor"] + if !ok || len(processor) == 0 { + fmt.Printf("node %s is missing extractors->system->processor, skipping\n", nodeFile) + continue + } + cpu, ok := system.Sections["cpu"] + if !ok || len(cpu) == 0 { + fmt.Printf("node %s is missing extractors->system->cpu, skipping\n", nodeFile) + continue + } + + // IMPORTANT: this is runtime nproces, which might be physical and virtual + // we need hwloc for just physical I think + cores, ok := cpu["cores"] + if !ok { + fmt.Printf("node %s is missing extractors->system->cpu->cores, skipping\n", nodeFile) + continue + } + cpuCount, err := strconv.Atoi(cores) + if err != nil { + fmt.Printf("node %s cannot convert cores, skipping\n", nodeFile) + continue + } + + // First add the rack -> node + node := *g.AddNode("node", "node", 1, false, "") + g.AddEdge(rack, node, "contains") + g.AddEdge(node, rack, "in") + + // Now add the socket. We need hwloc for this + // nfd has a socket count, but we can't be sure which CPU are assigned to which? + // This isn't good enough, see https://github.com/compspec/compspec-go/issues/19 + // For the prototype we will use the nfd socket count and split cores across it + // cpu metadata from ndf + socketCount := 1 + + nfdCpu, ok := nfd.Sections["cpu"] + if ok { + sockets, ok := nfdCpu["topology.socket_count"] + if ok { + sCount, err := strconv.Atoi(sockets) + if err == nil { + socketCount = sCount + } + } + } + + // Get the processors, assume we divide between the sockets + // TODO we should also get this in better detail, physical vs logical cores + items := []string{} + for i := 0; i < cpuCount; i++ { + items = append(items, fmt.Sprintf("%s", i)) + } + // Mapping of socket to cores + chunks := utils.Chunkify(items, socketCount) + for _, chunk := range chunks { + + // Create each socket attached to the node + // rack -> node -> socket + socketNode := *g.AddNode("socket", "socket", 1, false, "") + g.AddEdge(node, socketNode, "contains") + g.AddEdge(socketNode, node, "in") + + // Create each core attached to the socket + for _, _ = range chunk { + coreNode := *g.AddNode("core", "core", 1, false, "") + g.AddEdge(socketNode, coreNode, "contains") + g.AddEdge(coreNode, socketNode, "in") + + } + } + } + + // Save graph if given a file + if nodeOutFile != "" { + err = g.SaveGraph(nodeOutFile) + if err != nil { + return err + } + } else { + toprint, _ := json.MarshalIndent(g.Graph, "", "\t") + fmt.Println(string(toprint)) + return nil + } + return nil + +} + +func (c ClusterCreator) Validate() bool { + return true +} + +// NewPlugin creates a new ClusterCreator +func NewPlugin() (plugin.PluginInterface, error) { + c := ClusterCreator{} + return c, nil +} diff --git a/plugins/extractors/kernel/extractors.go b/plugins/extractors/kernel/extractors.go index 667e2de..5e4f9bf 100644 --- a/plugins/extractors/kernel/extractors.go +++ b/plugins/extractors/kernel/extractors.go @@ -5,7 +5,7 @@ import ( "os" "strings" - "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/pkg/plugin" "github.com/compspec/compspec-go/pkg/utils" kernelParser "github.com/moby/moby/pkg/parsers/kernel" ) @@ -24,7 +24,7 @@ const ( ) // getKernelBootParams loads parameters given to the kernel at boot time -func getKernelBootParams() (extractor.ExtractorSection, error) { +func getKernelBootParams() (plugin.PluginSection, error) { raw, err := os.ReadFile(kernelBootFile) if err != nil { @@ -36,7 +36,7 @@ func getKernelBootParams() (extractor.ExtractorSection, error) { } // getKernelBootConfig loads key value pairs from the kernel config -func getKernelBootConfig() (extractor.ExtractorSection, error) { +func getKernelBootConfig() (plugin.PluginSection, error) { version, err := kernelParser.GetKernelVersion() if err != nil { @@ -50,7 +50,7 @@ func getKernelBootConfig() (extractor.ExtractorSection, error) { // getKernelModules flattens the list of kernel modules (drivers) into // the name (and if enabled) and version. I don't know if we need more than that. -func getKernelModules() (extractor.ExtractorSection, error) { +func getKernelModules() (plugin.PluginSection, error) { version, err := kernelParser.GetKernelVersion() if err != nil { return nil, err @@ -66,7 +66,7 @@ func getKernelModules() (extractor.ExtractorSection, error) { // module. = // module.parameter. = value // TODO will this work? - modules := extractor.ExtractorSection{} + modules := plugin.PluginSection{} for _, moduleDir := range moduleDirs { // Don't look unless it's a directory diff --git a/plugins/extractors/kernel/kernel.go b/plugins/extractors/kernel/kernel.go index 4a8c241..ad7ceed 100644 --- a/plugins/extractors/kernel/kernel.go +++ b/plugins/extractors/kernel/kernel.go @@ -3,7 +3,7 @@ package kernel import ( "fmt" - "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/pkg/plugin" "github.com/compspec/compspec-go/pkg/utils" ) @@ -27,6 +27,12 @@ func (e KernelExtractor) Description() string { return ExtractorDescription } +func (e KernelExtractor) Create(options map[string]string) error { + return nil +} +func (e KernelExtractor) IsCreator() bool { return false } +func (e KernelExtractor) IsExtractor() bool { return true } + func (e KernelExtractor) Sections() []string { return e.sections } @@ -48,10 +54,10 @@ func (c KernelExtractor) Validate() bool { // Extract returns kernel metadata, for a set of named sections // TODO eventually the user could select which sections they want -func (c KernelExtractor) Extract(interface{}) (extractor.ExtractorData, error) { +func (c KernelExtractor) Extract(interface{}) (plugin.PluginData, error) { - sections := map[string]extractor.ExtractorSection{} - data := extractor.ExtractorData{} + sections := map[string]plugin.PluginSection{} + data := plugin.PluginData{} // Only extract the sections we asked for for _, name := range c.sections { @@ -87,14 +93,14 @@ func (c KernelExtractor) Extract(interface{}) (extractor.ExtractorData, error) { return data, nil } -// NewPlugin validates and returns a new kernel plugin -func NewPlugin(sections []string) (extractor.Extractor, error) { +// NewPlugin validates and returns a new kernel plugins +func NewPlugin(sections []string) (plugin.PluginInterface, error) { if len(sections) == 0 { sections = validSections } e := KernelExtractor{sections: sections} if !e.Validate() { - return nil, fmt.Errorf("plugin %s is not valid\n", e.Name()) + return nil, fmt.Errorf("plugin %s is not valid", e.Name()) } return e, nil } diff --git a/plugins/extractors/library/extractors.go b/plugins/extractors/library/extractors.go index 3dab797..573cace 100644 --- a/plugins/extractors/library/extractors.go +++ b/plugins/extractors/library/extractors.go @@ -6,7 +6,7 @@ import ( "regexp" "strings" - "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/pkg/plugin" "github.com/compspec/compspec-go/pkg/utils" ) @@ -20,8 +20,8 @@ var ( // getMPIInformation returns info on mpi versions and variant // yes, fairly janky, please improve upon! This is for a prototype -func getMPIInformation() (extractor.ExtractorSection, error) { - info := extractor.ExtractorSection{} +func getMPIInformation() (plugin.PluginSection, error) { + info := plugin.PluginSection{} // Do we even have mpirun? path, err := exec.LookPath(MPIRunExec) diff --git a/plugins/extractors/library/library.go b/plugins/extractors/library/library.go index 2bb3df2..ae4236e 100644 --- a/plugins/extractors/library/library.go +++ b/plugins/extractors/library/library.go @@ -3,7 +3,7 @@ package library import ( "fmt" - "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/pkg/plugin" "github.com/compspec/compspec-go/pkg/utils" ) @@ -33,6 +33,13 @@ func (e LibraryExtractor) Description() string { return ExtractorDescription } +func (e LibraryExtractor) Create(options map[string]string) error { + return nil +} + +func (e LibraryExtractor) IsCreator() bool { return false } +func (e LibraryExtractor) IsExtractor() bool { return true } + // Validate ensures that the sections provided are in the list we know func (e LibraryExtractor) Validate() bool { invalids, valid := utils.StringArrayIsSubset(e.sections, validSections) @@ -43,10 +50,10 @@ func (e LibraryExtractor) Validate() bool { } // Extract returns library metadata, for a set of named sections -func (e LibraryExtractor) Extract(interface{}) (extractor.ExtractorData, error) { +func (e LibraryExtractor) Extract(interface{}) (plugin.PluginData, error) { - sections := map[string]extractor.ExtractorSection{} - data := extractor.ExtractorData{} + sections := map[string]plugin.PluginSection{} + data := plugin.PluginData{} // Only extract the sections we asked for for _, name := range e.sections { @@ -63,13 +70,13 @@ func (e LibraryExtractor) Extract(interface{}) (extractor.ExtractorData, error) } // NewPlugin validates and returns a new plugin -func NewPlugin(sections []string) (extractor.Extractor, error) { +func NewPlugin(sections []string) (plugin.PluginInterface, error) { if len(sections) == 0 { sections = validSections } e := LibraryExtractor{sections: sections} if !e.Validate() { - return nil, fmt.Errorf("plugin %s is not valid\n", e.Name()) + return nil, fmt.Errorf("plugin %s is not valid", e.Name()) } return e, nil } diff --git a/plugins/extractors/nfd/nfd.go b/plugins/extractors/nfd/nfd.go index 0afd16c..bdc9314 100644 --- a/plugins/extractors/nfd/nfd.go +++ b/plugins/extractors/nfd/nfd.go @@ -16,7 +16,7 @@ import ( _ "github.com/converged-computing/nfd-source/source/system" _ "github.com/converged-computing/nfd-source/source/usb" - "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/pkg/plugin" "github.com/compspec/compspec-go/pkg/utils" ) @@ -69,6 +69,13 @@ func (e NFDExtractor) Description() string { return ExtractorDescription } +func (e NFDExtractor) Create(options map[string]string) error { + return nil +} + +func (e NFDExtractor) IsCreator() bool { return false } +func (e NFDExtractor) IsExtractor() bool { return true } + // Validate ensures that the sections provided are in the list we know func (e NFDExtractor) Validate() bool { invalids, valid := utils.StringArrayIsSubset(e.sections, validSections) @@ -79,10 +86,10 @@ func (e NFDExtractor) Validate() bool { } // Extract returns system metadata, for a set of named sections -func (e NFDExtractor) Extract(interface{}) (extractor.ExtractorData, error) { +func (e NFDExtractor) Extract(interface{}) (plugin.PluginData, error) { - sections := map[string]extractor.ExtractorSection{} - data := extractor.ExtractorData{} + sections := map[string]plugin.PluginSection{} + data := plugin.PluginData{} // Get all registered feature sources sources := source.GetAllFeatureSources() @@ -105,7 +112,7 @@ func (e NFDExtractor) Extract(interface{}) (extractor.ExtractorData, error) { // Create a new section for the group // For each of the below, "fs" is a feature set // AttributeFeatureSet - section := extractor.ExtractorSection{} + section := plugin.PluginSection{} features := discovery.GetFeatures() for k, fs := range features.Attributes { for fName, feature := range fs.Elements { @@ -140,13 +147,13 @@ func (e NFDExtractor) Extract(interface{}) (extractor.ExtractorData, error) { } // NewPlugin validates and returns a new kernel plugin -func NewPlugin(sections []string) (extractor.Extractor, error) { +func NewPlugin(sections []string) (plugin.PluginInterface, error) { if len(sections) == 0 { sections = validSections } e := NFDExtractor{sections: sections} if !e.Validate() { - return nil, fmt.Errorf("plugin %s is not valid\n", e.Name()) + return nil, fmt.Errorf("plugin %s is not valid", e.Name()) } return e, nil } diff --git a/plugins/extractors/system/arch.go b/plugins/extractors/system/arch.go index 113037b..3cba3a2 100644 --- a/plugins/extractors/system/arch.go +++ b/plugins/extractors/system/arch.go @@ -4,7 +4,7 @@ import ( "fmt" "os/exec" - "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/pkg/plugin" "github.com/compspec/compspec-go/pkg/utils" ) @@ -35,8 +35,8 @@ func getOsArch() (string, error) { } // getArchInformation gets architecture information -func getArchInformation() (extractor.ExtractorSection, error) { - info := extractor.ExtractorSection{} +func getArchInformation() (plugin.PluginSection, error) { + info := plugin.PluginSection{} // Read in architectures arch, err := getOsArch() diff --git a/plugins/extractors/system/extractors.go b/plugins/extractors/system/extractors.go index 2229a84..448470e 100644 --- a/plugins/extractors/system/extractors.go +++ b/plugins/extractors/system/extractors.go @@ -3,9 +3,10 @@ package system import ( "fmt" "os" + "runtime" "strings" - "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/pkg/plugin" "github.com/compspec/compspec-go/pkg/utils" ) @@ -112,8 +113,13 @@ func getCpuFeatures(p map[string]string) (string, error) { // getCPUInformation gets information about the system // TODO this is not used. -func getCPUInformation() (extractor.ExtractorSection, error) { - info := extractor.ExtractorSection{} +func getCPUInformation() (plugin.PluginSection, error) { + info := plugin.PluginSection{} + + cores := runtime.NumCPU() + + // This is a guess at best + info["cores"] = fmt.Sprintf("%d", cores) //stat, err := linuxproc.ReadCPUInfo(CpuInfoFile) //if err != nil { @@ -129,8 +135,8 @@ func getCPUInformation() (extractor.ExtractorSection, error) { } // getProcessorInformation returns details about each processor -func getProcessorInformation() (extractor.ExtractorSection, error) { - info := extractor.ExtractorSection{} +func getProcessorInformation() (plugin.PluginSection, error) { + info := plugin.PluginSection{} raw, err := os.ReadFile(CpuInfoFile) if err != nil { diff --git a/plugins/extractors/system/memory.go b/plugins/extractors/system/memory.go index db9b283..fb2bb92 100644 --- a/plugins/extractors/system/memory.go +++ b/plugins/extractors/system/memory.go @@ -4,7 +4,7 @@ import ( "os" "strings" - "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/pkg/plugin" ) const ( @@ -12,8 +12,8 @@ const ( ) // getMemoryInformation parses /proc/meminfo to get node memory metadata -func getMemoryInformation() (extractor.ExtractorSection, error) { - info := extractor.ExtractorSection{} +func getMemoryInformation() (plugin.PluginSection, error) { + info := plugin.PluginSection{} raw, err := os.ReadFile(memoryInfoFile) if err != nil { diff --git a/plugins/extractors/system/os.go b/plugins/extractors/system/os.go index 38fd597..4af0679 100644 --- a/plugins/extractors/system/os.go +++ b/plugins/extractors/system/os.go @@ -11,7 +11,7 @@ import ( "regexp" "strings" - "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/pkg/plugin" ) const ( @@ -124,8 +124,8 @@ func readOsRelease(prettyName string, vendor string) (string, error) { } // getOSInformation gets operating system level metadata -func getOsInformation() (extractor.ExtractorSection, error) { - info := extractor.ExtractorSection{} +func getOsInformation() (plugin.PluginSection, error) { + info := plugin.PluginSection{} // Get the name, version, and vendor name, version, vendor, err := parseOsRelease() diff --git a/plugins/extractors/system/system.go b/plugins/extractors/system/system.go index bcf2a07..208f2ad 100644 --- a/plugins/extractors/system/system.go +++ b/plugins/extractors/system/system.go @@ -3,7 +3,7 @@ package system import ( "fmt" - "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/pkg/plugin" "github.com/compspec/compspec-go/pkg/utils" ) @@ -20,7 +20,7 @@ const ( ) var ( - validSections = []string{ProcessorSection, OsSection, ArchSection, MemorySection} + validSections = []string{ProcessorSection, OsSection, ArchSection, MemorySection, CPUSection} ) type SystemExtractor struct { @@ -39,6 +39,13 @@ func (e SystemExtractor) Sections() []string { return e.sections } +func (e SystemExtractor) Create(options map[string]string) error { + return nil +} + +func (e SystemExtractor) IsCreator() bool { return false } +func (e SystemExtractor) IsExtractor() bool { return true } + // Validate ensures that the sections provided are in the list we know func (e SystemExtractor) Validate() bool { invalids, valid := utils.StringArrayIsSubset(e.sections, validSections) @@ -49,10 +56,10 @@ func (e SystemExtractor) Validate() bool { } // Extract returns system metadata, for a set of named sections -func (e SystemExtractor) Extract(interface{}) (extractor.ExtractorData, error) { +func (e SystemExtractor) Extract(interface{}) (plugin.PluginData, error) { - sections := map[string]extractor.ExtractorSection{} - data := extractor.ExtractorData{} + sections := map[string]plugin.PluginSection{} + data := plugin.PluginData{} // Only extract the sections we asked for for _, name := range e.sections { @@ -71,6 +78,13 @@ func (e SystemExtractor) Extract(interface{}) (extractor.ExtractorData, error) { sections[OsSection] = section } + if name == CPUSection { + section, err := getCPUInformation() + if err != nil { + return data, err + } + sections[CPUSection] = section + } if name == ArchSection { section, err := getArchInformation() if err != nil { @@ -93,7 +107,7 @@ func (e SystemExtractor) Extract(interface{}) (extractor.ExtractorData, error) { } // NewPlugin validates and returns a new kernel plugin -func NewPlugin(sections []string) (extractor.Extractor, error) { +func NewPlugin(sections []string) (plugin.PluginInterface, error) { if len(sections) == 0 { sections = validSections } diff --git a/plugins/list.go b/plugins/list.go index 9883fca..67cffa2 100644 --- a/plugins/list.go +++ b/plugins/list.go @@ -6,6 +6,18 @@ import ( "github.com/jedib0t/go-pretty/v6/table" ) +// getPluginType returns a string to describe the plugin type +func getPluginType(p PluginRequest) string { + + if p.Plugin.IsCreator() && p.Plugin.IsExtractor() { + return "extractor and creator" + } + if p.Plugin.IsExtractor() { + return "extractor" + } + return "creator" +} + // List plugins available, print in a pretty table! func (r *PluginsRequest) List() error { @@ -16,26 +28,59 @@ func (r *PluginsRequest) List() error { t.AppendHeader(table.Row{"", "Type", "Name", "Section"}) t.AppendSeparator() - // keep count of plugins (just extractors for now) + // keep count of plugins, total, and for each kind count := 0 extractorCount := 0 + creatorCount := 0 + + // Do creators first in one section (only a few) + t.AppendSeparator() + t.AppendRow(table.Row{"creation plugins", "", "", ""}) + + // TODO add description column + for _, p := range *r { + + if !p.Plugin.IsCreator() { + continue + } + pluginType := getPluginType(p) + + // Creators don't have sections necessarily + creatorCount += 1 + count += 1 + + // Allow plugins to serve dual purposes + // TODO what should sections be used for? + t.AppendRow([]interface{}{"", pluginType, p.Name, ""}) + } // TODO add description column for _, p := range *r { - extractorCount += 1 - for i, section := range p.Extractor.Sections() { + + if p.Plugin.IsExtractor() { + extractorCount += 1 + } + + newPlugin := true + pluginType := getPluginType(p) + + // Extractors are parsed by sections + for _, section := range p.Plugin.Sections() { // Add the extractor plugin description only for first in the list - if i == 0 { + if newPlugin { t.AppendSeparator() - t.AppendRow(table.Row{p.Extractor.Description(), "", "", ""}) + t.AppendRow(table.Row{p.Plugin.Description(), "", "", ""}) + newPlugin = false } count += 1 - t.AppendRow([]interface{}{"", "extractor", p.Name, section}) + + // Allow plugins to serve dual purposes + t.AppendRow([]interface{}{"", pluginType, p.Name, section}) } } t.AppendSeparator() - t.AppendFooter(table.Row{"Total", "", extractorCount, count}) + t.AppendFooter(table.Row{"Total", "", extractorCount + creatorCount, count}) t.SetStyle(table.StyleColoredCyanWhiteOnBlack) t.Render() return nil diff --git a/plugins/plugins.go b/plugins/plugins.go index fbde2bf..521ef15 100644 --- a/plugins/plugins.go +++ b/plugins/plugins.go @@ -7,15 +7,30 @@ import ( "github.com/compspec/compspec-go/plugins/extractors/library" "github.com/compspec/compspec-go/plugins/extractors/nfd" "github.com/compspec/compspec-go/plugins/extractors/system" + + "github.com/compspec/compspec-go/plugins/creators/cluster" ) // Add new plugin names here. They should correspond with the package name, then NewPlugin() var ( + // Explicitly extractors KernelExtractor = "kernel" SystemExtractor = "system" LibraryExtractor = "library" NFDExtractor = "nfd" - pluginNames = []string{KernelExtractor, SystemExtractor, LibraryExtractor, NFDExtractor} + + // Explicitly creators + ClusterCreator = "cluster" + ArtifactCreator = "artifact" + + pluginNames = []string{ + ArtifactCreator, + ClusterCreator, + KernelExtractor, + SystemExtractor, + LibraryExtractor, + NFDExtractor, + } ) // parseSections will return sections from the name string @@ -40,7 +55,7 @@ func parseSections(raw string) (string, []string) { return name, sections } -// Get plugins parses a request and returns a list of plugins +// Get plugins parses a request and returns a list of extractor plugins // We honor the order that the plugins and sections are provided in func GetPlugins(names []string) (PluginsRequest, error) { @@ -63,7 +78,27 @@ func GetPlugins(names []string) (PluginsRequest, error) { return request, err } // Save the name, the instantiated interface, and sections - pr := PluginRequest{Name: name, Extractor: p, Sections: sections} + pr := PluginRequest{Name: name, Plugin: p, Sections: sections} + request = append(request, pr) + } + + // Cluster and artifact creators + if strings.HasPrefix(name, ClusterCreator) { + p, err := cluster.NewPlugin() + if err != nil { + return request, err + } + // Save the name, the instantiated interface, and sections + pr := PluginRequest{Name: name, Plugin: p} + request = append(request, pr) + } + if strings.HasPrefix(name, ArtifactCreator) { + p, err := cluster.NewPlugin() + if err != nil { + return request, err + } + // Save the name, the instantiated interface, and sections + pr := PluginRequest{Name: name, Plugin: p} request = append(request, pr) } @@ -73,7 +108,7 @@ func GetPlugins(names []string) (PluginsRequest, error) { return request, err } // Save the name, the instantiated interface, and sections - pr := PluginRequest{Name: name, Extractor: p, Sections: sections} + pr := PluginRequest{Name: name, Plugin: p, Sections: sections} request = append(request, pr) } @@ -83,7 +118,7 @@ func GetPlugins(names []string) (PluginsRequest, error) { return request, err } // Save the name, the instantiated interface, and sections - pr := PluginRequest{Name: name, Extractor: p, Sections: sections} + pr := PluginRequest{Name: name, Plugin: p, Sections: sections} request = append(request, pr) } @@ -93,7 +128,7 @@ func GetPlugins(names []string) (PluginsRequest, error) { return request, err } // Save the name, the instantiated interface, and sections - pr := PluginRequest{Name: name, Extractor: p, Sections: sections} + pr := PluginRequest{Name: name, Plugin: p, Sections: sections} request = append(request, pr) } } diff --git a/plugins/request.go b/plugins/request.go index 97447bd..b909a07 100644 --- a/plugins/request.go +++ b/plugins/request.go @@ -1,111 +1,64 @@ package plugins import ( - "encoding/json" "fmt" - "strings" - "github.com/compspec/compspec-go/pkg/extractor" + pg "github.com/compspec/compspec-go/pkg/plugin" ) // A plugin request has a Name and sections type PluginRequest struct { - Name string - Sections []string - Extractor extractor.Extractor - // TODO add checker here eventually too. + Name string + Sections []string + Plugin pg.PluginInterface } type PluginsRequest []PluginRequest -// A Result wraps named extractor data, just for easy dumping to json -type Result struct { - Results map[string]extractor.ExtractorData `json:"extractors,omitempty"` -} - -// ToJson serializes a result to json -func (r *Result) ToJson() ([]byte, error) { - b, err := json.MarshalIndent(r, "", " ") - if err != nil { - return []byte{}, err - } - return b, err -} - -// Print prints the result to the terminal -func (r *Result) Print() { - for name, result := range r.Results { - fmt.Printf(" --Result for %s\n", name) - result.Print() - } -} - -// AddCustomFields adds or updates an existing result with -// custom metadata, either new or to overwrite -func (r *Result) AddCustomFields(fields []string) { +// Do the extraction for a plugin request, meaning across a set of plugins +func (r *PluginsRequest) Extract(allowFail bool) (pg.Result, error) { - for _, field := range fields { - if !strings.Contains(field, "=") { - fmt.Printf("warning: field %s does not contain an '=', skipping\n", field) - continue - } - parts := strings.Split(field, "=") - if len(parts) < 2 { - fmt.Printf("warning: field %s has an empty value, skipping\n", field) - continue - } + // Prepare Result + result := pg.Result{} + results := map[string]pg.PluginData{} - // No reason the value cannot have additional = - field = parts[0] - value := strings.Join(parts[1:], "=") + for _, p := range *r { - // Get the extractor, section, and subfield from the field - f, err := ParseField(field) - if err != nil { - fmt.Printf(err.Error(), field) + // Skip plugins that don't define extraction + if !p.Plugin.IsExtractor() { continue } + r, err := p.Plugin.Extract(p.Sections) - // Is the extractor name in the result? - _, ok := r.Results[f.Extractor] - if !ok { - sections := extractor.Sections{} - r.Results[f.Extractor] = extractor.ExtractorData{Sections: sections} - } - data := r.Results[f.Extractor] - - // Is the section name in the extractor data? - _, ok = data.Sections[f.Section] - if !ok { - data.Sections[f.Section] = extractor.ExtractorSection{} + // We can allow failure + if err != nil && !allowFail { + return result, fmt.Errorf("there was an extraction error for %s: %s", p.Name, err) + } else if err != nil && allowFail { + fmt.Printf("Allowing failure - ignoring extraction error for %s: %s\n", p.Name, err) } - section := data.Sections[f.Section] - section[f.Field] = value - - // Wrap it back up! - data.Sections[f.Section] = section - r.Results[f.Extractor] = data + results[p.Name] = r } + result.Results = results + return result, nil } -// Do the extraction for a plugin request, meaning across a set of plugins -func (r *PluginsRequest) Extract(allowFail bool) (Result, error) { +// Do creation +func (r *PluginsRequest) Create() (pg.Result, error) { // Prepare Result - result := Result{} - results := map[string]extractor.ExtractorData{} + result := pg.Result{} for _, p := range *r { - r, err := p.Extractor.Extract(p.Sections) - // We can allow failure - if err != nil && !allowFail { - return result, fmt.Errorf("There was an extraction error for %s: %s\n", p.Name, err) - } else if err != nil && allowFail { - fmt.Printf("Allowing failure - ignoring extraction error for %s: %s\n", p.Name, err) + // Skip plugins that don't define extraction + if !p.Plugin.IsCreator() { + continue } - results[p.Name] = r + err := p.Plugin.Create(nil) + if err != nil { + return result, err + } + } - result.Results = results return result, nil }