diff --git a/cmd/compspec/compspec.go b/cmd/compspec/compspec.go index 7a3a308..626797e 100644 --- a/cmd/compspec/compspec.go +++ b/cmd/compspec/compspec.go @@ -54,12 +54,21 @@ func main() { cachePath := matchCmd.String("", "cache", &argparse.Options{Help: "A path to a cache for artifacts"}) saveGraph := matchCmd.String("", "cache-graph", &argparse.Options{Help: "Load or use a cached graph"}) - // Create arguments - options := createCmd.StringList("a", "append", &argparse.Options{Help: "Append one or more custom metadata fields to append"}) - specname := createCmd.String("i", "in", &argparse.Options{Required: true, Help: "Input yaml that contains spec for creation"}) - specfile := createCmd.String("o", "out", &argparse.Options{Help: "Save compatibility json artifact to this file"}) - mediaType := createCmd.String("m", "media-type", &argparse.Options{Help: "The expected media-type for the compatibility artifact"}) - allowFailCreate := createCmd.Flag("f", "allow-fail", &argparse.Options{Help: "Allow any specific extractor to fail (and continue extraction)"}) + // Create subcommands - note that "nodes" could be cluster, but could want to make a subset of one + artifactCmd := createCmd.NewCommand("artifact", "Create a new artifact") + nodesCmd := createCmd.NewCommand("nodes", "Create nodes in Json Graph format from extraction data") + + // Artifaction creation arguments + options := artifactCmd.StringList("a", "append", &argparse.Options{Help: "Append one or more custom metadata fields to append"}) + specname := artifactCmd.String("i", "in", &argparse.Options{Required: true, Help: "Input yaml that contains spec for creation"}) + specfile := artifactCmd.String("o", "out", &argparse.Options{Help: "Save compatibility json artifact to this file"}) + mediaType := artifactCmd.String("m", "media-type", &argparse.Options{Help: "The expected media-type for the compatibility artifact"}) + allowFailCreate := artifactCmd.Flag("f", "allow-fail", &argparse.Options{Help: "Allow any specific extractor to fail (and continue extraction)"}) + + // Nodes creation arguments + nodesOutFile := nodesCmd.String("", "nodes-output", &argparse.Options{Help: "Output json file for cluster nodes"}) + nodesDir := nodesCmd.String("", "node-dir", &argparse.Options{Required: true, Help: "Input directory with extraction data for nodes"}) + clusterName := nodesCmd.String("", "cluster-name", &argparse.Options{Required: true, Help: "Cluster name to describe in graph"}) // Now parse the arguments err := parser.Parse(os.Args) @@ -75,10 +84,21 @@ func main() { log.Fatalf("Issue with extraction: %s\n", err) } } else if createCmd.Happened() { - err := create.Run(*specname, *options, *specfile, *allowFailCreate) - if err != nil { - log.Fatal(err.Error()) + if artifactCmd.Happened() { + err := create.Artifact(*specname, *options, *specfile, *allowFailCreate) + if err != nil { + log.Fatal(err.Error()) + } + } else if nodesCmd.Happened() { + err := create.Nodes(*nodesDir, *clusterName, *nodesOutFile) + if err != nil { + log.Fatal(err.Error()) + } + } else { + fmt.Println(Header) + fmt.Println("Please provide a --node-dir and (optionally) --nodes-output (json file to write)") } + } else if matchCmd.Happened() { err := match.Run( *manifestFile, diff --git a/cmd/compspec/create/artifact.go b/cmd/compspec/create/artifact.go new file mode 100644 index 0000000..9a98e55 --- /dev/null +++ b/cmd/compspec/create/artifact.go @@ -0,0 +1,127 @@ +package create + +import ( + "fmt" + "os" + + "github.com/compspec/compspec-go/pkg/types" + ep "github.com/compspec/compspec-go/plugins/extractors" + + p "github.com/compspec/compspec-go/plugins" +) + +// Artifact will create a compatibility artifact based on a request in YAML +// TODO likely want to refactor this into a proper create plugin +func Artifact(specname string, fields []string, saveto string, allowFail bool) error { + + // Cut out early if a spec not provided + if specname == "" { + return fmt.Errorf("a spec input -i/--input is required") + } + request, err := loadRequest(specname) + if err != nil { + return err + } + + // Right now we only know about extractors, when we define subfields + // we can further filter here. + extractors := request.GetExtractors() + plugins, err := ep.GetPlugins(extractors) + if err != nil { + return err + } + + // Finally, add custom fields and extract metadata + result, err := plugins.Extract(allowFail) + if err != nil { + return err + } + + // Update with custom fields (either new or overwrite) + result.AddCustomFields(fields) + + // The compspec returned is the populated Compatibility request! + compspec, err := PopulateExtractors(&result, request) + if err != nil { + return err + } + + output, err := compspec.ToJson() + if err != nil { + return err + } + if saveto == "" { + fmt.Println(string(output)) + } else { + err = os.WriteFile(saveto, output, 0644) + if err != nil { + return err + } + } + return nil +} + +// LoadExtractors loads a compatibility result into a compatibility request +// After this we can save the populated thing into an artifact (json DUMP) +func PopulateExtractors(result *ep.Result, request *types.CompatibilityRequest) (*types.CompatibilityRequest, error) { + + // Every metadata attribute must be known under a schema + schemas := request.Metadata.Schemas + if len(schemas) == 0 { + return nil, fmt.Errorf("the request must have one or more schemas") + } + for i, compat := range request.Compatibilities { + + // The compatibility section name is a schema, and must be defined + url, ok := schemas[compat.Name] + if !ok { + return nil, fmt.Errorf("%s is missing a schema", compat.Name) + } + if url == "" { + return nil, fmt.Errorf("%s has an empty schema", compat.Name) + } + + for key, extractorKey := range compat.Attributes { + + // Get the extractor, section, and subfield from the extractor lookup key + f, err := p.ParseField(extractorKey) + if err != nil { + fmt.Printf("warning: cannot parse %s: %s, setting to empty\n", key, extractorKey) + compat.Attributes[key] = "" + continue + } + + // If we get here, we can parse it and look it up in our result metadata + extractor, ok := result.Results[f.Extractor] + if !ok { + fmt.Printf("warning: extractor %s is unknown, setting to empty\n", f.Extractor) + compat.Attributes[key] = "" + continue + } + + // Now get the section + section, ok := extractor.Sections[f.Section] + if !ok { + fmt.Printf("warning: section %s.%s is unknown, setting to empty\n", f.Extractor, f.Section) + compat.Attributes[key] = "" + continue + } + + // Now get the value! + value, ok := section[f.Field] + if !ok { + fmt.Printf("warning: field %s.%s.%s is unknown, setting to empty\n", f.Extractor, f.Section, f.Field) + compat.Attributes[key] = "" + continue + } + + // If we get here - we found it! Hooray! + compat.Attributes[key] = value + } + + // Update the compatibiity + request.Compatibilities[i] = compat + } + + return request, nil +} diff --git a/cmd/compspec/create/create.go b/cmd/compspec/create/create.go index 2745b36..7d713e8 100644 --- a/cmd/compspec/create/create.go +++ b/cmd/compspec/create/create.go @@ -1,11 +1,9 @@ package create import ( - "fmt" "os" "github.com/compspec/compspec-go/pkg/types" - p "github.com/compspec/compspec-go/plugins" "sigs.k8s.io/yaml" ) @@ -23,118 +21,3 @@ func loadRequest(filename string) (*types.CompatibilityRequest, error) { } return &request, nil } - -// Run will create a compatibility artifact based on a request in YAML -func Run(specname string, fields []string, saveto string, allowFail bool) error { - - // Cut out early if a spec not provided - if specname == "" { - return fmt.Errorf("A spec input -i/--input is required") - } - request, err := loadRequest(specname) - if err != nil { - return err - } - - // Right now we only know about extractors, when we define subfields - // we can further filter here. - extractors := request.GetExtractors() - plugins, err := p.GetPlugins(extractors) - if err != nil { - return err - } - - // Finally, add custom fields and extract metadata - result, err := plugins.Extract(allowFail) - if err != nil { - return err - } - - // Update with custom fields (either new or overwrite) - result.AddCustomFields(fields) - - // The compspec returned is the populated Compatibility request! - compspec, err := PopulateExtractors(&result, request) - if err != nil { - return err - } - - output, err := compspec.ToJson() - if err != nil { - return err - } - if saveto == "" { - fmt.Println(string(output)) - } else { - err = os.WriteFile(saveto, output, 0644) - if err != nil { - return err - } - } - return nil -} - -// LoadExtractors loads a compatibility result into a compatibility request -// After this we can save the populated thing into an artifact (json DUMP) -func PopulateExtractors(result *p.Result, request *types.CompatibilityRequest) (*types.CompatibilityRequest, error) { - - // Every metadata attribute must be known under a schema - schemas := request.Metadata.Schemas - if len(schemas) == 0 { - return nil, fmt.Errorf("the request must have one or more schemas") - } - for i, compat := range request.Compatibilities { - - // The compatibility section name is a schema, and must be defined - url, ok := schemas[compat.Name] - if !ok { - return nil, fmt.Errorf("%s is missing a schema", compat.Name) - } - if url == "" { - return nil, fmt.Errorf("%s has an empty schema", compat.Name) - } - - for key, extractorKey := range compat.Attributes { - - // Get the extractor, section, and subfield from the extractor lookup key - f, err := p.ParseField(extractorKey) - if err != nil { - fmt.Printf("warning: cannot parse %s: %s, setting to empty\n", key, extractorKey) - compat.Attributes[key] = "" - continue - } - - // If we get here, we can parse it and look it up in our result metadata - extractor, ok := result.Results[f.Extractor] - if !ok { - fmt.Printf("warning: extractor %s is unknown, setting to empty\n", f.Extractor) - compat.Attributes[key] = "" - continue - } - - // Now get the section - section, ok := extractor.Sections[f.Section] - if !ok { - fmt.Printf("warning: section %s.%s is unknown, setting to empty\n", f.Extractor, f.Section) - compat.Attributes[key] = "" - continue - } - - // Now get the value! - value, ok := section[f.Field] - if !ok { - fmt.Printf("warning: field %s.%s.%s is unknown, setting to empty\n", f.Extractor, f.Section, f.Field) - compat.Attributes[key] = "" - continue - } - - // If we get here - we found it! Hooray! - compat.Attributes[key] = value - } - - // Update the compatibiity - request.Compatibilities[i] = compat - } - - return request, nil -} diff --git a/cmd/compspec/create/nodes.go b/cmd/compspec/create/nodes.go new file mode 100644 index 0000000..256067c --- /dev/null +++ b/cmd/compspec/create/nodes.go @@ -0,0 +1,180 @@ +package create + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/compspec/compspec-go/pkg/graph" + "github.com/compspec/compspec-go/pkg/utils" + ep "github.com/compspec/compspec-go/plugins/extractors" +) + +// Nodes will read in one or more node extraction metadata files and generate a single nodes JGF graph +// This is intentended for a registration command. +// TODO this should be converted to a creation (converter) plugin +func Nodes(nodesDir, clusterName, nodeOutFile string) error { + + // Read in each node into a plugins.Result + // Results map[string]extractor.ExtractorData `json:"extractors,omitempty"` + nodes := map[string]ep.Result{} + + nodeFiles, err := os.ReadDir(nodesDir) + if err != nil { + return err + } + for _, f := range nodeFiles { + fmt.Printf("Loading %s\n", f.Name()) + result := ep.Result{} + fullpath := filepath.Join(nodesDir, f.Name()) + + // Be forgiving if extra files are there... + err := result.Load(fullpath) + if err != nil { + fmt.Printf("Warning, filename %s is not in the correct format. Skipping\n", f.Name()) + continue + } + // Add to nodes, if we don't error + nodes[f.Name()] = result + } + + // When we get here, no nodes, no graph + if len(nodes) == 0 { + fmt.Println("There were no nodes for the graph.") + return nil + } + + // Prepare a graph that will describe our cluster + g, err := graph.NewClusterGraph(clusterName) + if err != nil { + return err + } + + // This is the root node, we reference it as a parent to the rack + root := g.Graph.Nodes["0"] + + // Right now assume we have just one rack with all nodes + // https://github.com/flux-framework/flux-sched/blob/master/t/data/resource/jgfs/tiny.json#L4 + // Note that these are flux specific, and we can make them more generic if needed + + // resource (e.g., rack, node) + // name (usually the same as the resource) + // size (usually 1) + // exclusive (usually false) + // unit (usually empty or an amount) + rack := *g.AddNode("rack", "rack", 1, false, "") + + // Connect the rack to the parent, both ways. + // I think this is because fluxion is Depth First and Upwards (dfu) + // "The root cluster contains a rack" + g.AddEdge(root, rack, "contains") + + // "The rack is in a cluster" + g.AddEdge(rack, root, "in") + + // Read in each node and add to the rack. + // There are several levels here: + // /tiny0/rack0/node0/socket0/core1 + for nodeFile, meta := range nodes { + + // We must have extractors, nfd, and sections + nfd, ok := meta.Results["nfd"] + if !ok || len(nfd.Sections) == 0 { + fmt.Printf("node %s is missing extractors->nfd data, skipping\n", nodeFile) + continue + } + + // We also need system -> sections -> processor + system, ok := meta.Results["system"] + if !ok || len(system.Sections) == 0 { + fmt.Printf("node %s is missing extractors->system data, skipping\n", nodeFile) + continue + } + processor, ok := system.Sections["processor"] + if !ok || len(processor) == 0 { + fmt.Printf("node %s is missing extractors->system->processor, skipping\n", nodeFile) + continue + } + cpu, ok := system.Sections["cpu"] + if !ok || len(cpu) == 0 { + fmt.Printf("node %s is missing extractors->system->cpu, skipping\n", nodeFile) + continue + } + + // IMPORTANT: this is runtime nproces, which might be physical and virtual + // we need hwloc for just physical I think + cores, ok := cpu["cores"] + if !ok { + fmt.Printf("node %s is missing extractors->system->cpu->cores, skipping\n", nodeFile) + continue + } + cpuCount, err := strconv.Atoi(cores) + if err != nil { + fmt.Printf("node %s cannot convert cores, skipping\n", nodeFile) + continue + } + + // First add the rack -> node + node := *g.AddNode("node", "node", 1, false, "") + g.AddEdge(rack, node, "contains") + g.AddEdge(node, rack, "in") + + // Now add the socket. We need hwloc for this + // nfd has a socket count, but we can't be sure which CPU are assigned to which? + // This isn't good enough, see https://github.com/compspec/compspec-go/issues/19 + // For the prototype we will use the nfd socket count and split cores across it + // cpu metadata from ndf + socketCount := 1 + + nfdCpu, ok := nfd.Sections["cpu"] + if ok { + sockets, ok := nfdCpu["topology.socket_count"] + if ok { + sCount, err := strconv.Atoi(sockets) + if err == nil { + socketCount = sCount + } + } + } + + // Get the processors, assume we divide between the sockets + // TODO we should also get this in better detail, physical vs logical cores + items := []string{} + for i := 0; i < cpuCount; i++ { + items = append(items, fmt.Sprintf("%s", i)) + } + // Mapping of socket to cores + chunks := utils.Chunkify(items, socketCount) + for _, chunk := range chunks { + + // Create each socket attached to the node + // rack -> node -> socket + socketNode := *g.AddNode("socket", "socket", 1, false, "") + g.AddEdge(node, socketNode, "contains") + g.AddEdge(socketNode, node, "in") + + // Create each core attached to the socket + for _, _ = range chunk { + coreNode := *g.AddNode("core", "core", 1, false, "") + g.AddEdge(socketNode, coreNode, "contains") + g.AddEdge(coreNode, socketNode, "in") + + } + } + } + + // Save graph if given a file + if nodeOutFile != "" { + err = g.SaveGraph(nodeOutFile) + if err != nil { + return err + } + } else { + toprint, _ := json.MarshalIndent(g.Graph, "", "\t") + fmt.Println(string(toprint)) + return nil + } + return nil +} diff --git a/cmd/compspec/extract/extract.go b/cmd/compspec/extract/extract.go index f06b2f4..acf6900 100644 --- a/cmd/compspec/extract/extract.go +++ b/cmd/compspec/extract/extract.go @@ -5,7 +5,7 @@ import ( "os" "runtime" - p "github.com/compspec/compspec-go/plugins" + ep "github.com/compspec/compspec-go/plugins/extractors" ) // Run will run an extraction of host metadata @@ -15,12 +15,12 @@ func Run(filename string, pluginNames []string, allowFail bool) error { // Womp womp, we only support linux! There is no other way. operatingSystem := runtime.GOOS if operatingSystem != "linux" { - return fmt.Errorf("🤓️ Sorry, we only support linux.") + return fmt.Errorf("🤓️ sorry, we only support linux") } // parse [section,...,section] into named plugins and sections // return plugins - plugins, err := p.GetPlugins(pluginNames) + plugins, err := ep.GetPlugins(pluginNames) if err != nil { return err } @@ -37,7 +37,7 @@ func Run(filename string, pluginNames []string, allowFail bool) error { // This returns an array of bytes b, err := result.ToJson() if err != nil { - return fmt.Errorf("There was an issue marshalling to JSON: %s\n", err) + return fmt.Errorf("there was an issue marshalling to JSON: %s", err) } err = os.WriteFile(filename, b, 0644) if err != nil { diff --git a/cmd/compspec/list/list.go b/cmd/compspec/list/list.go index 0af9db5..2ab77df 100644 --- a/cmd/compspec/list/list.go +++ b/cmd/compspec/list/list.go @@ -1,17 +1,25 @@ package list import ( + "github.com/compspec/compspec-go/plugins/extractors" + p "github.com/compspec/compspec-go/plugins" ) // Run will list the extractor names and sections known func Run(pluginNames []string) error { + // parse [section,...,section] into named plugins and sections // return plugins - plugins, err := p.GetPlugins(pluginNames) + plugins, err := extractors.GetPlugins(pluginNames) if err != nil { return err } + // Convert to plugin information + info := []p.PluginInformation{} + for _, p := range plugins { + info = append(info, &p) + } // List plugin table - return plugins.List() + return p.List(info) } diff --git a/docs/README.md b/docs/README.md index 1db043f..f175f68 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,9 +2,9 @@ This is early documentation that will be converted eventually to something prettier. Read more about: - - [Design](design.md) - - [Usage](usage.md) - + - [Design](design.md) of compspec + - [Usage](usage.md) generic use cases + - [Rainbow](rainbow.md) use cases and examples for the rainbow scheduler ## Thanks and Previous Art diff --git a/docs/design.md b/docs/design.md index 3d825bf..0d07d6a 100644 --- a/docs/design.md +++ b/docs/design.md @@ -9,14 +9,16 @@ The compatibility tool is responsible for extracting information about a system, ### Extractor +> The "extract" command + An **extractor** is a core plugin that knows how to retrieve metadata about a host. An extractor is usually going to be run for two cases: 1. During CI to extract (and save) metadata about a particular build to put in a compatibility artifact. 2. During image selection to extract information about the host to compare to. -Examples extractors could be "library" or "system." +Examples extractors could be "library" or "system." You interact with extractor plugins via the "extract" command. -### Section +#### Section A **section** is a group of metadata within an extractor. For example, within "library" a section is for "mpi." This allows a user to specify running the `--name library[mpi]` extractor to ask for the mpi section of the library family. Another example is under kernel. The user might want to ask for more than one group to be extracted and might ask for `--name kernel[boot,config]`. Section basically provides more granularity to an extractor namespace. For the above two examples, the metadata generated would be organized like: @@ -31,12 +33,25 @@ kernel For the above, right now I am implementing extractors generally, or "wild-westy" in the sense that the namespace is oriented toward the extractor name and sections it owns (e.g., no community namespaces like archspec, spack, opencontainers, etc). This is subject to change depending on the design the working group decides on. -### Creator +### Convert + +> The "create" command + +A **converter** is a plugin that knows how to take extracted data and turn it into something else. For example: + +1. We can extract metadata about nodes and convert to Json Graph format to describe a cluster. +2. We can extract metadata about an application and create a compatibility specification. + +You interact with converters via the "create" command. + +#### Create A creator is a plugin that is responsible for creating an artifact that includes some extracted metadata. The creator is agnostic to what it it being asked to generate in the sense that it just needs a mapping. The mapping will be from the extractor namespace to the compatibility artifact namespace. For our first prototype, this just means asking for particular extractor attributes to map to a set of annotations that we want to dump into json. To start there should only be one creator plugin needed, however if there are different structures of artifacts needed, I could imagine more. An example creation specification for a prototype experiment where we care about architecture, MPI, and GPU is provided in [examples](examples). ## Overview +> This was the original proposal and may be out of date. + The design is based on the prototype from that pull request, shown below. ![img/proposal-c-plugin-design.png](img/proposal-c-plugin-design.png) diff --git a/docs/img/rainbow-scheduler-register.png b/docs/img/rainbow-scheduler-register.png new file mode 100644 index 0000000..b547e6b Binary files /dev/null and b/docs/img/rainbow-scheduler-register.png differ diff --git a/docs/rainbow/README.md b/docs/rainbow/README.md new file mode 100644 index 0000000..4204b10 --- /dev/null +++ b/docs/rainbow/README.md @@ -0,0 +1,48 @@ +# Rainbow Scheduler + +The [rainbow scheduler](https://github.com/converged-computing/rainbow) has a registration step that requires a cluster to send over node metadata. The reason is because when a user sends a request for work, the scheduler needs to understand +how to properly assign it. To do that, it needs to be able to see all the resources (clusters) available to it. + +![../img/rainbow-scheduler-register.png](../img/rainbow-scheduler-register.png) + +For the purposes of compspec here, we care about the registration step. This is what that includes: + +## Registration + +1. At registration, the cluster also sends over metadata about itself (and the nodes it has). This is going to allow for selection for those nodes. +1. When submitting a job, the user no longer is giving an exact command, but a command + an image with compatibility metadata. The compatibility metadata (somehow) needs to be used to inform the cluster selection. +1. At selection, the rainbow schdeuler needs to filter down cluster options, and choose a subset. + - Level 1: Don't ask, just choose the top choice and submit + - Level 2: Ask the cluster for TBA time or cost, choose based on that. + - Job is added to that queue. + +Specifically, this means two steps for compspec go: + +1. A step to ask each node to extract it's own metadata, saved to a directory. +2. A second step to combine those nodes into a graph. + +Likely we will take a simple approach to do an extract for one node that captures it's metadata into Json Graph Format (JGF) and then dumps into a shared directory (we might imagine this being run with a flux job) +and then some combination step. + +## Example + +In the example below, we will extract node level metadata with `compspec extract` and then generate the cluster JGF to send for registration with compspec create-nodes. + +### 1. Extract Metadata + +Let's first generate faux node metadata for a "cluster" - I will just run an extraction a few times and generate equivalent files :) This isn't such a crazy idea because it emulates nodes that are the same! + +```bash +mkdir -p ./docs/rainbow/cluster +compspec extract --name library --name nfd[cpu,memory,network,storage,system] --name system[cpu,processor,arch,memory] --out ./docs/rainbow/cluster/node-1.json +compspec extract --name library --name nfd[cpu,memory,network,storage,system] --name system[cpu,processor,arch,memory] --out ./docs/rainbow/cluster/node-2.json +compspec extract --name library --name nfd[cpu,memory,network,storage,system] --name system[cpu,processor,arch,memory] --out ./docs/rainbow/cluster/node-3.json +``` + +### 2. Create Nodes + +Now we are going to give compspec the directory, and ask it to create nodes. This will be in JSON graph format. This outputs to the terminal: + +```bash +compspec create nodes --cluster-name cluster-red --node-dir ./docs/rainbow/cluster/ +``` \ No newline at end of file diff --git a/docs/usage.md b/docs/usage.md index aaf134d..5b5efb6 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -87,11 +87,19 @@ Note that we will eventually add a description column - it's not really warrante ## Create -The create command is how you take a compatibility request, or a YAML file that has a mapping between the extractors defined by this tool and your compatibility metadata namespace, and generate an artifact. The artifact typically will be a JSON dump of key value pairs, scoped under different namespaces, that you might push to a registry to live alongside a container image, and with the intention to eventually use it to check compatiility against a new system. To run create -we can use the example in the top level repository: +The create command handles two kinds of creation (sub-commands): + + - **artifact**: create a compatibility artifact to describe an environment or application + - **nodes** create a json graph format summary of nodes (a directory with one or more extracted metadata JSON files with node metadata) + +The artifact case is described here. For the node case, you can read about it in the [rainbow scheduler](rainbow) documentation. + +### Artifact + +The create artifact command is how you take a compatibility request, or a YAML file that has a mapping between the extractors defined by this tool and your compatibility metadata namespace, and generate an artifact. The artifact typically will be a JSON dump of key value pairs, scoped under different namespaces, that you might push to a registry to live alongside a container image, and with the intention to eventually use it to check compatiility against a new system. To run create we can use the example in the top level repository: ```bash -./bin/compspec create --in ./examples/lammps-experiment.yaml +./bin/compspec create artifact --in ./examples/lammps-experiment.yaml ``` Note that you'll see some errors about fields not being found! This is because we've implemented this for the fields to be added custom, on the command line. @@ -99,7 +107,7 @@ The idea here is that you can add custom metadata fields during your build, whic ```bash # a stands for "append" and it can write a new field or overwrite an existing one -./bin/compspec create --in ./examples/lammps-experiment.yaml -a custom.gpu.available=yes +./bin/compspec create artifact --in ./examples/lammps-experiment.yaml -a custom.gpu.available=yes ``` ```console { @@ -143,7 +151,7 @@ Awesome! That, as simple as it is, is our compatibility artifact. I ran the comm a build will generate it for that context. We would want to save this to file: ```bash -./bin/compspec create --in ./examples/lammps-experiment.yaml -a custom.gpu.available=yes -o ./examples/generated-compatibility-spec.json +./bin/compspec create artifact --in ./examples/lammps-experiment.yaml -a custom.gpu.available=yes -o ./examples/generated-compatibility-spec.json ``` And that's it! We would next (likely during CI) push this compatibility artifact to a URI that is likely (TBA) linked to the image. diff --git a/pkg/extractor/extractor.go b/pkg/extractor/extractor.go index 22f6686..85e4c5d 100644 --- a/pkg/extractor/extractor.go +++ b/pkg/extractor/extractor.go @@ -15,6 +15,7 @@ type Extractor interface { Extract(interface{}) (ExtractorData, error) Validate() bool Sections() []string + // GetSection(string) ExtractorData } // ExtractorData is returned by an extractor diff --git a/pkg/graph/cluster.go b/pkg/graph/cluster.go new file mode 100644 index 0000000..cde389c --- /dev/null +++ b/pkg/graph/cluster.go @@ -0,0 +1,188 @@ +package graph + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/compspec/compspec-go/pkg/utils" + "github.com/converged-computing/jsongraph-go/jsongraph/metadata" + "github.com/converged-computing/jsongraph-go/jsongraph/v2/graph" + jgf "github.com/converged-computing/jsongraph-go/jsongraph/v2/graph" +) + +// A ClusterGraph is meant to be a plain (flux oriented) JGF to describe a cluster (nodes) +type ClusterGraph struct { + *jgf.JsonGraph + + Name string + + // Top level counter for node labels (JGF v2) that maps to ids (JGF v1) + nodeCounter int32 + + // Counters for specific resource types (e.g., rack, node) + resourceCounters map[string]int32 +} + +// HasNode determines if the graph has a node, named by label +func (c *ClusterGraph) HasNode(name string) bool { + _, ok := c.Graph.Nodes[name] + return ok +} + +// Save graph to a cached file +func (c *ClusterGraph) SaveGraph(path string) error { + exists, err := utils.PathExists(path) + if err != nil { + return err + } + // Don't overwrite if exists + if exists { + fmt.Printf("Graph %s already exists, will not overwrite\n", path) + return nil + } + content, err := json.MarshalIndent(c.Graph, "", " ") + if err != nil { + return err + } + fmt.Printf("Saving graph to %s\n", path) + err = os.WriteFile(path, content, 0644) + if err != nil { + return err + } + return nil +} + +// Path gets a new path +func getNodePath(root, subpath string) string { + if subpath == "" { + return fmt.Sprintf("/%s", root) + } + return fmt.Sprintf("/%s/%s", root, subpath) +} + +// AddNode adds a node to the graph +// g.AddNode("rack", 1, false, "", root) +func (c *ClusterGraph) AddNode( + resource string, + name string, + size int32, + exclusive bool, + unit string, +) *graph.Node { + node := c.getNode(resource, name, size, exclusive, unit) + c.Graph.Nodes[*node.Label] = *node + return node +} + +// Add an edge from source to dest with some relationship +func (c *ClusterGraph) AddEdge(source, dest graph.Node, relation string) { + edge := getEdge(*source.Label, *dest.Label, relation) + c.Graph.Edges = append(c.Graph.Edges, edge) +} + +// getNode is a private shared function that can also be used to generate the root! +func (c *ClusterGraph) getNode( + resource string, + name string, + size int32, + exclusive bool, + unit string, +) *graph.Node { + + // Get the identifier for the resource type + counter, ok := c.resourceCounters[resource] + if !ok { + counter = 0 + } + + // The current count in the graph (global) + count := c.nodeCounter + + // The id in the metadata is the counter for that resource type + resourceCounter := fmt.Sprintf("%d", counter) + + // The resource name is the type + the resource counter + resourceName := fmt.Sprintf("%s%d", name, counter) + + // New Metadata with expected fluxion data + m := metadata.Metadata{} + m.AddElement("type", resource) + m.AddElement("basename", name) + m.AddElement("id", resourceCounter) + m.AddElement("name", resourceName) + + // uniq_id should be the same as the label, but as an integer + m.AddElement("uniq_id", count) + m.AddElement("rank", -1) + m.AddElement("exclusive", exclusive) + m.AddElement("unit", unit) + m.AddElement("size", size) + m.AddElement("paths", map[string]string{"containment": getNodePath(name, "")}) + + // Update the resource counter + counter += 1 + c.resourceCounters[resource] = counter + + // Update the global counter + c.nodeCounter += 1 + + // Assemble the node! + // Label for v2 will be identifier "id" for JGF v1 + label := fmt.Sprintf("%d", count) + node := graph.Node{Label: &label, Metadata: m} + return &node +} + +/* +{ + "id": "1", + "metadata": { + "type": "rack", + "basename": "rack", + "name": "rack0", + "id": 0, + "uniq_id": 1, + "rank": -1, + "exclusive": false, + "unit": "", + "size": 1, + "paths": { + "containment": "/tiny0/rack0" + } + } + },*/ + +// Init a new FlexGraph from a graphml filename +// The cluster root is slightly different so we don't use getNode here +func NewClusterGraph(name string) (ClusterGraph, error) { + + // prepare a graph to load targets into + g := jgf.NewGraph() + + // New Metadata with expected fluxion data + m := metadata.Metadata{} + m.AddElement("type", "cluster") + m.AddElement("basename", name) + m.AddElement("id", 0) + m.AddElement("uniq_id", 0) + m.AddElement("rank", -1) + m.AddElement("exclusive", false) + m.AddElement("unit", "") + m.AddElement("size", 1) + m.AddElement("paths", map[string]string{"containment": getNodePath(name, "")}) + + // Root cluster node + label := "0" + node := graph.Node{Label: &label, Metadata: m} + + // Set the root node + g.Graph.Nodes[label] = node + + // Create a new cluster! + // Start counting at 1 - index 0 is the cluster root + resourceCounters := map[string]int32{"cluster": int32(1)} + cluster := ClusterGraph{g, name, 1, resourceCounters} + + return cluster, nil +} diff --git a/pkg/graph/graph.go b/pkg/graph/compatibility.go similarity index 100% rename from pkg/graph/graph.go rename to pkg/graph/compatibility.go diff --git a/pkg/graph/edges.go b/pkg/graph/edges.go index 6cb463f..056ba03 100644 --- a/pkg/graph/edges.go +++ b/pkg/graph/edges.go @@ -4,7 +4,7 @@ import ( "github.com/converged-computing/jsongraph-go/jsongraph/v2/graph" ) -// Get an edge with a specific containment (typically "contains" or "in") -func getEdge(source string, dest string, containment string) graph.Edge { - return graph.Edge{Source: source, Target: dest, Relation: containment} +// Get an edge with a specific relationship (typically "contains" or "in") +func getEdge(source string, dest string, relation string) graph.Edge { + return graph.Edge{Source: source, Target: dest, Relation: relation} } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index a4ec477..80168a2 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -4,6 +4,7 @@ import ( "bufio" "errors" "fmt" + "math" "math/rand" "os" "strings" @@ -21,6 +22,21 @@ func PathExists(path string) (bool, error) { return true, nil } +// chunkify a count of processors across sockets +func Chunkify(items []string, count int) [][]string { + var chunks [][]string + chunkSize := int(math.Ceil(float64(len(items) / count))) + + for i := 0; i < len(items); i += chunkSize { + end := i + chunkSize + if end > len(items) { + end = len(items) + } + chunks = append(chunks, items[i:end]) + } + return chunks +} + // SplitDelimiterList splits a list of items by an additional delimiter func SplitDelimiterList(items []string, delim string) (map[string]string, error) { diff --git a/plugins/extractors/plugins.go b/plugins/extractors/plugins.go new file mode 100644 index 0000000..330016c --- /dev/null +++ b/plugins/extractors/plugins.go @@ -0,0 +1,80 @@ +package extractors + +import ( + "strings" + + "github.com/compspec/compspec-go/plugins" + "github.com/compspec/compspec-go/plugins/extractors/kernel" + "github.com/compspec/compspec-go/plugins/extractors/library" + "github.com/compspec/compspec-go/plugins/extractors/nfd" + "github.com/compspec/compspec-go/plugins/extractors/system" +) + +// Add new plugin names here. They should correspond with the package name, then NewPlugin() +var ( + KernelExtractor = "kernel" + SystemExtractor = "system" + LibraryExtractor = "library" + NFDExtractor = "nfd" + pluginNames = []string{KernelExtractor, SystemExtractor, LibraryExtractor, NFDExtractor} +) + +// Get plugins parses a request and returns a list of plugins +// We honor the order that the plugins and sections are provided in +func GetPlugins(names []string) (PluginsRequest, error) { + + if len(names) == 0 { + names = pluginNames + } + + request := PluginsRequest{} + + // Prepare an extractor for each, and validate the requested sections + // TODO: this could also be done with an init -> Register pattern + for _, name := range names { + + // If we are given a list of section names, parse. + name, sections := plugins.ParseSections(name) + + if strings.HasPrefix(name, KernelExtractor) { + p, err := kernel.NewPlugin(sections) + if err != nil { + return request, err + } + // Save the name, the instantiated interface, and sections + pr := PluginRequest{Name: name, Extractor: p, Sections: sections} + request = append(request, pr) + } + + if strings.HasPrefix(name, NFDExtractor) { + p, err := nfd.NewPlugin(sections) + if err != nil { + return request, err + } + // Save the name, the instantiated interface, and sections + pr := PluginRequest{Name: name, Extractor: p, Sections: sections} + request = append(request, pr) + } + + if strings.HasPrefix(name, SystemExtractor) { + p, err := system.NewPlugin(sections) + if err != nil { + return request, err + } + // Save the name, the instantiated interface, and sections + pr := PluginRequest{Name: name, Extractor: p, Sections: sections} + request = append(request, pr) + } + + if strings.HasPrefix(name, LibraryExtractor) { + p, err := library.NewPlugin(sections) + if err != nil { + return request, err + } + // Save the name, the instantiated interface, and sections + pr := PluginRequest{Name: name, Extractor: p, Sections: sections} + request = append(request, pr) + } + } + return request, nil +} diff --git a/plugins/extractors/request.go b/plugins/extractors/request.go new file mode 100644 index 0000000..ea4acbe --- /dev/null +++ b/plugins/extractors/request.go @@ -0,0 +1,59 @@ +package extractors + +import ( + "fmt" + + "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/plugins" +) + +// A plugin request has a Name and sections +type PluginRequest struct { + Name string + Sections []string + Extractor extractor.Extractor +} + +// These functions make it possible to use the PluginRequest as a PluginInformation interface +func (p *PluginRequest) GetName() string { + return p.Name +} +func (p *PluginRequest) GetType() string { + return "extractor" +} +func (p *PluginRequest) GetDescription() string { + return p.Extractor.Description() +} +func (p *PluginRequest) GetSections() []plugins.PluginSection { + sections := make([]plugins.PluginSection, len(p.Extractor.Sections())) + + for _, section := range p.Extractor.Sections() { + newSection := plugins.PluginSection{Name: section} + sections = append(sections, newSection) + } + return sections +} + +type PluginsRequest []PluginRequest + +// Do the extraction for a plugin request, meaning across a set of plugins +func (r *PluginsRequest) Extract(allowFail bool) (Result, error) { + + // Prepare Result + result := Result{} + results := map[string]extractor.ExtractorData{} + + for _, p := range *r { + r, err := p.Extractor.Extract(p.Sections) + + // We can allow failure + if err != nil && !allowFail { + return result, fmt.Errorf("There was an extraction error for %s: %s\n", p.Name, err) + } else if err != nil && allowFail { + fmt.Printf("Allowing failure - ignoring extraction error for %s: %s\n", p.Name, err) + } + results[p.Name] = r + } + result.Results = results + return result, nil +} diff --git a/plugins/extractors/result.go b/plugins/extractors/result.go new file mode 100644 index 0000000..fde8ca7 --- /dev/null +++ b/plugins/extractors/result.go @@ -0,0 +1,95 @@ +package extractors + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + "github.com/compspec/compspec-go/pkg/extractor" + "github.com/compspec/compspec-go/plugins" +) + +// A Result wraps named extractor data, just for easy dumping to json +type Result struct { + Results map[string]extractor.ExtractorData `json:"extractors,omitempty"` +} + +// Load a filename into the result object! +func (r *Result) Load(filename string) error { + + file, err := os.ReadFile(filename) + if err != nil { + return err + } + err = json.Unmarshal(file, r) + if err != nil { + return err + } + return nil +} + +// ToJson serializes a result to json +func (r *Result) ToJson() ([]byte, error) { + b, err := json.MarshalIndent(r, "", " ") + if err != nil { + return []byte{}, err + } + return b, err +} + +// Print prints the result to the terminal +func (r *Result) Print() { + for name, result := range r.Results { + fmt.Printf(" --Result for %s\n", name) + result.Print() + } +} + +// AddCustomFields adds or updates an existing result with +// custom metadata, either new or to overwrite +func (r *Result) AddCustomFields(fields []string) { + + for _, field := range fields { + if !strings.Contains(field, "=") { + fmt.Printf("warning: field %s does not contain an '=', skipping\n", field) + continue + } + parts := strings.Split(field, "=") + if len(parts) < 2 { + fmt.Printf("warning: field %s has an empty value, skipping\n", field) + continue + } + + // No reason the value cannot have additional = + field = parts[0] + value := strings.Join(parts[1:], "=") + + // Get the extractor, section, and subfield from the field + f, err := plugins.ParseField(field) + if err != nil { + fmt.Printf(err.Error(), field) + continue + } + + // Is the extractor name in the result? + _, ok := r.Results[f.Extractor] + if !ok { + sections := extractor.Sections{} + r.Results[f.Extractor] = extractor.ExtractorData{Sections: sections} + } + data := r.Results[f.Extractor] + + // Is the section name in the extractor data? + _, ok = data.Sections[f.Section] + if !ok { + data.Sections[f.Section] = extractor.ExtractorSection{} + } + section := data.Sections[f.Section] + section[f.Field] = value + + // Wrap it back up! + data.Sections[f.Section] = section + r.Results[f.Extractor] = data + } +} diff --git a/plugins/extractors/system/extractors.go b/plugins/extractors/system/extractors.go index 2229a84..beb9162 100644 --- a/plugins/extractors/system/extractors.go +++ b/plugins/extractors/system/extractors.go @@ -3,6 +3,7 @@ package system import ( "fmt" "os" + "runtime" "strings" "github.com/compspec/compspec-go/pkg/extractor" @@ -111,10 +112,17 @@ func getCpuFeatures(p map[string]string) (string, error) { } // getCPUInformation gets information about the system -// TODO this is not used. func getCPUInformation() (extractor.ExtractorSection, error) { info := extractor.ExtractorSection{} + // This really needs to be better, the hard part is that + // proc/cpuinfo is different between arm and others, + // and arm doesn't show physical/virtual cores + cores := runtime.NumCPU() + + // This is a guess at best + info["cores"] = fmt.Sprintf("%d", cores) + //stat, err := linuxproc.ReadCPUInfo(CpuInfoFile) //if err != nil { // return info, fmt.Errorf("cannot read %s: %s", CpuInfoFile, err) diff --git a/plugins/extractors/system/system.go b/plugins/extractors/system/system.go index bcf2a07..ee6c56b 100644 --- a/plugins/extractors/system/system.go +++ b/plugins/extractors/system/system.go @@ -20,7 +20,7 @@ const ( ) var ( - validSections = []string{ProcessorSection, OsSection, ArchSection, MemorySection} + validSections = []string{ProcessorSection, OsSection, ArchSection, MemorySection, CPUSection} ) type SystemExtractor struct { @@ -70,7 +70,13 @@ func (e SystemExtractor) Extract(interface{}) (extractor.ExtractorData, error) { } sections[OsSection] = section } - + if name == CPUSection { + section, err := getCPUInformation() + if err != nil { + return data, err + } + sections[CPUSection] = section + } if name == ArchSection { section, err := getArchInformation() if err != nil { diff --git a/plugins/field.go b/plugins/field.go index 6326ff2..2c269fc 100644 --- a/plugins/field.go +++ b/plugins/field.go @@ -21,7 +21,7 @@ func ParseField(field string) (Field, error) { // We need at least an extractor name, section, and value if len(parts) < 3 { - return f, fmt.Errorf("warning: field %s value needs to have at least .
.\n", field) + return f, fmt.Errorf("warning: field %s value needs to have at least .
.\n", field) } f.Extractor = parts[0] diff --git a/plugins/list.go b/plugins/list.go index 9883fca..9b00cc0 100644 --- a/plugins/list.go +++ b/plugins/list.go @@ -7,7 +7,7 @@ import ( ) // List plugins available, print in a pretty table! -func (r *PluginsRequest) List() error { +func List(ps []PluginInformation) error { // Write out table with nodes t := table.NewWriter() @@ -18,24 +18,28 @@ func (r *PluginsRequest) List() error { // keep count of plugins (just extractors for now) count := 0 - extractorCount := 0 + pluginCount := 0 - // TODO add description column - for _, p := range *r { - extractorCount += 1 - for i, section := range p.Extractor.Sections() { + // This will iterate across plugin types (e.g., extraction and converter) + for _, p := range ps { + pluginCount += 1 + + // This iterates across plugins in the family + for i, section := range p.GetSections() { // Add the extractor plugin description only for first in the list if i == 0 { t.AppendSeparator() - t.AppendRow(table.Row{p.Extractor.Description(), "", "", ""}) + t.AppendRow(table.Row{p.GetDescription(), "", "", ""}) } + count += 1 - t.AppendRow([]interface{}{"", "extractor", p.Name, section}) + t.AppendRow([]interface{}{"", p.GetType(), section.Name}) } + } t.AppendSeparator() - t.AppendFooter(table.Row{"Total", "", extractorCount, count}) + t.AppendFooter(table.Row{"Total", "", pluginCount, count}) t.SetStyle(table.StyleColoredCyanWhiteOnBlack) t.Render() return nil diff --git a/plugins/plugins.go b/plugins/plugins.go index fbde2bf..b375d1b 100644 --- a/plugins/plugins.go +++ b/plugins/plugins.go @@ -2,25 +2,11 @@ package plugins import ( "strings" - - "github.com/compspec/compspec-go/plugins/extractors/kernel" - "github.com/compspec/compspec-go/plugins/extractors/library" - "github.com/compspec/compspec-go/plugins/extractors/nfd" - "github.com/compspec/compspec-go/plugins/extractors/system" -) - -// Add new plugin names here. They should correspond with the package name, then NewPlugin() -var ( - KernelExtractor = "kernel" - SystemExtractor = "system" - LibraryExtractor = "library" - NFDExtractor = "nfd" - pluginNames = []string{KernelExtractor, SystemExtractor, LibraryExtractor, NFDExtractor} ) // parseSections will return sections from the name string // We could use regex here instead -func parseSections(raw string) (string, []string) { +func ParseSections(raw string) (string, []string) { sections := []string{} @@ -39,63 +25,3 @@ func parseSections(raw string) (string, []string) { sections = strings.Split(raw, ",") return name, sections } - -// Get plugins parses a request and returns a list of plugins -// We honor the order that the plugins and sections are provided in -func GetPlugins(names []string) (PluginsRequest, error) { - - if len(names) == 0 { - names = pluginNames - } - - request := PluginsRequest{} - - // Prepare an extractor for each, and validate the requested sections - // TODO: this could also be done with an init -> Register pattern - for _, name := range names { - - // If we are given a list of section names, parse. - name, sections := parseSections(name) - - if strings.HasPrefix(name, KernelExtractor) { - p, err := kernel.NewPlugin(sections) - if err != nil { - return request, err - } - // Save the name, the instantiated interface, and sections - pr := PluginRequest{Name: name, Extractor: p, Sections: sections} - request = append(request, pr) - } - - if strings.HasPrefix(name, NFDExtractor) { - p, err := nfd.NewPlugin(sections) - if err != nil { - return request, err - } - // Save the name, the instantiated interface, and sections - pr := PluginRequest{Name: name, Extractor: p, Sections: sections} - request = append(request, pr) - } - - if strings.HasPrefix(name, SystemExtractor) { - p, err := system.NewPlugin(sections) - if err != nil { - return request, err - } - // Save the name, the instantiated interface, and sections - pr := PluginRequest{Name: name, Extractor: p, Sections: sections} - request = append(request, pr) - } - - if strings.HasPrefix(name, LibraryExtractor) { - p, err := library.NewPlugin(sections) - if err != nil { - return request, err - } - // Save the name, the instantiated interface, and sections - pr := PluginRequest{Name: name, Extractor: p, Sections: sections} - request = append(request, pr) - } - } - return request, nil -} diff --git a/plugins/request.go b/plugins/request.go index 97447bd..03d9726 100644 --- a/plugins/request.go +++ b/plugins/request.go @@ -1,111 +1,19 @@ package plugins -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/compspec/compspec-go/pkg/extractor" -) - -// A plugin request has a Name and sections -type PluginRequest struct { - Name string - Sections []string - Extractor extractor.Extractor - // TODO add checker here eventually too. -} - -type PluginsRequest []PluginRequest - -// A Result wraps named extractor data, just for easy dumping to json -type Result struct { - Results map[string]extractor.ExtractorData `json:"extractors,omitempty"` -} - -// ToJson serializes a result to json -func (r *Result) ToJson() ([]byte, error) { - b, err := json.MarshalIndent(r, "", " ") - if err != nil { - return []byte{}, err - } - return b, err -} - -// Print prints the result to the terminal -func (r *Result) Print() { - for name, result := range r.Results { - fmt.Printf(" --Result for %s\n", name) - result.Print() - } +// A Plugin(s)Information interface is an easy way to combine plugins across spaces +// primarily to expose metadata, etc. +type PluginsInformation interface { + GetPlugins() []PluginInformation } -// AddCustomFields adds or updates an existing result with -// custom metadata, either new or to overwrite -func (r *Result) AddCustomFields(fields []string) { - - for _, field := range fields { - if !strings.Contains(field, "=") { - fmt.Printf("warning: field %s does not contain an '=', skipping\n", field) - continue - } - parts := strings.Split(field, "=") - if len(parts) < 2 { - fmt.Printf("warning: field %s has an empty value, skipping\n", field) - continue - } - - // No reason the value cannot have additional = - field = parts[0] - value := strings.Join(parts[1:], "=") - - // Get the extractor, section, and subfield from the field - f, err := ParseField(field) - if err != nil { - fmt.Printf(err.Error(), field) - continue - } - - // Is the extractor name in the result? - _, ok := r.Results[f.Extractor] - if !ok { - sections := extractor.Sections{} - r.Results[f.Extractor] = extractor.ExtractorData{Sections: sections} - } - data := r.Results[f.Extractor] - - // Is the section name in the extractor data? - _, ok = data.Sections[f.Section] - if !ok { - data.Sections[f.Section] = extractor.ExtractorSection{} - } - section := data.Sections[f.Section] - section[f.Field] = value - - // Wrap it back up! - data.Sections[f.Section] = section - r.Results[f.Extractor] = data - } +type PluginInformation interface { + GetName() string + GetType() string + GetSections() []PluginSection + GetDescription() string } -// Do the extraction for a plugin request, meaning across a set of plugins -func (r *PluginsRequest) Extract(allowFail bool) (Result, error) { - - // Prepare Result - result := Result{} - results := map[string]extractor.ExtractorData{} - - for _, p := range *r { - r, err := p.Extractor.Extract(p.Sections) - - // We can allow failure - if err != nil && !allowFail { - return result, fmt.Errorf("There was an extraction error for %s: %s\n", p.Name, err) - } else if err != nil && allowFail { - fmt.Printf("Allowing failure - ignoring extraction error for %s: %s\n", p.Name, err) - } - results[p.Name] = r - } - result.Results = results - return result, nil +type PluginSection struct { + Description string + Name string }