diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9d90b648438..4688a4e303d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -30,8 +30,6 @@ jobs: DOCKER_REPO: "gresearch" GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" DOCKER_BUILDX_BUILDER: "${{ steps.buildx.outputs.name }}" - DOCKER_BUILDX_CACHE_FROM: "type=gha" - DOCKER_BUILDX_CACHE_TO: "type=gha,mode=max" - name: Output full commit sha if: github.event_name == 'push' && github.ref == 'refs/heads/master' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2ce2ad9b2d9..920e8e4d39d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -90,8 +90,6 @@ jobs: DOCKER_REPO: "gresearch" GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" DOCKER_BUILDX_BUILDER: "${{ steps.buildx.outputs.name }}" - DOCKER_BUILDX_CACHE_FROM: "type=gha" - DOCKER_BUILDX_CACHE_TO: "type=gha,mode=max" invoke-chart-push: name: Invoke Chart push needs: release diff --git a/.goreleaser.yml b/.goreleaser.yml index bfb198b292a..3b32929e0ee 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -19,11 +19,6 @@ env: # To use a builder other than "default", set this variable. # Necessary for, e.g., GitHub actions cache integration. - DOCKER_BUILDX_BUILDER={{ if index .Env "DOCKER_BUILDX_BUILDER" }}{{ .Env.DOCKER_BUILDX_BUILDER }}{{ else }}default{{ end }} - # Setup to enable Docker to use, e.g., the GitHub actions cache; see - # https://docs.docker.com/build/building/cache/backends/ - # https://github.com/moby/buildkit#export-cache - - DOCKER_BUILDX_CACHE_FROM={{ if index .Env "DOCKER_BUILDX_CACHE_FROM" }}{{ .Env.DOCKER_BUILDX_CACHE_FROM }}{{ else }}type=inline{{ end }} - - DOCKER_BUILDX_CACHE_TO={{ if index .Env "DOCKER_BUILDX_CACHE_TO" }}{{ .Env.DOCKER_BUILDX_CACHE_TO }}{{ else }}type=inline{{ end }} - GOVERSION={{ if index .Env "GOVERSION" }}{{ .Env.GOVERSION }}{{ else }}go1.20{{ end }} builds: @@ -205,8 +200,6 @@ dockers: - "{{ .Env.DOCKER_REPO }}armada-bundle:{{ .Version }}" build_flag_templates: &BUILD_FLAG_TEMPLATES - --builder={{ .Env.DOCKER_BUILDX_BUILDER }} - - --cache-to={{ .Env.DOCKER_BUILDX_CACHE_TO }} - - --cache-from={{ .Env.DOCKER_BUILDX_CACHE_FROM }} - --label=org.opencontainers.image.source=https://github.com/armadaproject/armada - --label=org.opencontainers.image.version={{ .Version }} - --label=org.opencontainers.image.created={{ time "2006-01-02T15:04:05Z07:00" }} diff --git a/client/DotNet/Armada.Client/ClientGenerated.cs b/client/DotNet/Armada.Client/ClientGenerated.cs index 8eab118a450..5be30e9da02 100644 --- a/client/DotNet/Armada.Client/ClientGenerated.cs +++ b/client/DotNet/Armada.Client/ClientGenerated.cs @@ -2077,6 +2077,31 @@ public partial class ApiJobUtilisationEvent public System.Collections.Generic.IDictionary TotalCumulativeUsage { get; set; } + } + + [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] + public partial class ApiPriorityClassPoolResourceLimits + { + [Newtonsoft.Json.JsonProperty("maximumResourceFraction", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] + public System.Collections.Generic.IDictionary MaximumResourceFraction { get; set; } + + + } + + [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] + public partial class ApiPriorityClassResourceLimits + { + /// Limits resources assigned to jobs of this priority class. + /// Specifically, jobs of this priority class are only scheduled if doing so does not exceed this limit. + [Newtonsoft.Json.JsonProperty("maximumResourceFraction", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] + public System.Collections.Generic.IDictionary MaximumResourceFraction { get; set; } + + /// Per-pool override of maximum_resource_fraction. + /// If missing for a particular pool, maximum_resource_fraction is used instead for that pool. + [Newtonsoft.Json.JsonProperty("maximumResourceFractionByPool", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] + public System.Collections.Generic.IDictionary MaximumResourceFractionByPool { get; set; } + + } [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.0.27.0 (Newtonsoft.Json v12.0.0.0)")] @@ -2097,6 +2122,11 @@ public partial class ApiQueue [Newtonsoft.Json.JsonProperty("resourceLimits", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public System.Collections.Generic.IDictionary ResourceLimits { get; set; } + /// Map from priority class name to resource limit overrides for this queue and priority class. + /// If provided for a priority class, global limits for that priority class do not apply to this queue. + [Newtonsoft.Json.JsonProperty("resourceLimitsByPriorityClassName", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] + public System.Collections.Generic.IDictionary ResourceLimitsByPriorityClassName { get; set; } + [Newtonsoft.Json.JsonProperty("userOwners", Required = Newtonsoft.Json.Required.Default, NullValueHandling = Newtonsoft.Json.NullValueHandling.Ignore)] public System.Collections.Generic.ICollection UserOwners { get; set; } diff --git a/cmd/armadactl/cmd/queue.go b/cmd/armadactl/cmd/queue.go index 675b97fef4c..ea6db4b449f 100644 --- a/cmd/armadactl/cmd/queue.go +++ b/cmd/armadactl/cmd/queue.go @@ -2,7 +2,6 @@ package cmd import ( "fmt" - "strconv" "github.com/spf13/cobra" @@ -47,17 +46,11 @@ Job priority is evaluated inside queue, queue has its own priority.`, return fmt.Errorf("error reading groupOwners: %s", err) } - resourceLimits, err := flagGetStringToString(cmd.Flags().GetStringToString).toFloat64("resourceLimits") - if err != nil { - return fmt.Errorf("error reading resourceLimits: %s", err) - } - queue, err := queue.NewQueue(&api.Queue{ Name: name, PriorityFactor: priorityFactor, UserOwners: owners, GroupOwners: groups, - ResourceLimits: resourceLimits, }) if err != nil { return fmt.Errorf("invalid queue data: %s", err) @@ -69,9 +62,6 @@ Job priority is evaluated inside queue, queue has its own priority.`, cmd.Flags().Float64("priorityFactor", 1, "Set queue priority factor - lower number makes queue more important, must be > 0.") cmd.Flags().StringSlice("owners", []string{}, "Comma separated list of queue owners, defaults to current user.") cmd.Flags().StringSlice("groupOwners", []string{}, "Comma separated list of queue group owners, defaults to empty list.") - cmd.Flags().StringToString("resourceLimits", map[string]string{}, - "Command separated list of resource limits pairs, defaults to empty list.\nExample: --resourceLimits cpu=0.3,memory=0.2", - ) return cmd } @@ -151,17 +141,11 @@ func queueUpdateCmdWithApp(a *armadactl.App) *cobra.Command { return fmt.Errorf("error reading groupOwners: %s", err) } - resourceLimits, err := flagGetStringToString(cmd.Flags().GetStringToString).toFloat64("resourceLimits") - if err != nil { - return fmt.Errorf("error reading resourceLimits: %s", err) - } - queue, err := queue.NewQueue(&api.Queue{ Name: name, PriorityFactor: priorityFactor, UserOwners: owners, GroupOwners: groups, - ResourceLimits: resourceLimits, }) if err != nil { return fmt.Errorf("invalid queue data: %s", err) @@ -174,28 +158,5 @@ func queueUpdateCmdWithApp(a *armadactl.App) *cobra.Command { cmd.Flags().Float64("priorityFactor", 1, "Set queue priority factor - lower number makes queue more important, must be > 0.") cmd.Flags().StringSlice("owners", []string{}, "Comma separated list of queue owners, defaults to current user.") cmd.Flags().StringSlice("groupOwners", []string{}, "Comma separated list of queue group owners, defaults to empty list.") - cmd.Flags().StringToString("resourceLimits", map[string]string{}, - "Command separated list of resource limits pairs, defaults to empty list. Example: --resourceLimits cpu=0.3,memory=0.2", - ) return cmd } - -type flagGetStringToString func(string) (map[string]string, error) - -func (f flagGetStringToString) toFloat64(flagName string) (map[string]float64, error) { - limits, err := f(flagName) - if err != nil { - return nil, err - } - - result := make(map[string]float64, len(limits)) - for resourceName, limit := range limits { - limitFloat, err := strconv.ParseFloat(limit, 64) - if err != nil { - return nil, fmt.Errorf("failed to parse %s as float64. %s", resourceName, err) - } - result[resourceName] = limitFloat - } - - return result, nil -} diff --git a/cmd/armadactl/cmd/queue_test.go b/cmd/armadactl/cmd/queue_test.go index d0d42387c8f..eab9ad14431 100644 --- a/cmd/armadactl/cmd/queue_test.go +++ b/cmd/armadactl/cmd/queue_test.go @@ -40,13 +40,11 @@ func TestCreate(t *testing.T) { PriorityFactor *float64 Owners []string GroupOwners []string - ResourceLimits map[string]float64 }{ - "default flags": {nil, nil, nil, nil, nil}, - "valid priority": {[]flag{{"priorityFactor", "1.0"}}, makeFloat64Pointer(1.0), nil, nil, nil}, - "valid owners": {[]flag{{"owners", "user1,user2"}}, nil, []string{"user1", "user2"}, nil, nil}, - "valid group owners": {[]flag{{"groupOwners", "group1,group2"}}, nil, nil, []string{"group1", "group2"}, nil}, - "valid resource limits": {[]flag{{"resourceLimits", "cpu=0.3,memory=0.2"}}, nil, nil, nil, map[string]float64{"cpu": 0.3, "memory": 0.2}}, + "default flags": {nil, nil, nil, nil}, + "valid priority": {[]flag{{"priorityFactor", "1.0"}}, makeFloat64Pointer(1.0), nil, nil}, + "valid owners": {[]flag{{"owners", "user1,user2"}}, nil, []string{"user1", "user2"}, nil}, + "valid group owners": {[]flag{{"groupOwners", "group1,group2"}}, nil, nil, []string{"group1", "group2"}}, } for name, test := range tests { @@ -75,11 +73,6 @@ func TestCreate(t *testing.T) { require.True(t, reflect.DeepEqual(q.Permissions, permissions)) } - if test.ResourceLimits != nil { - for resourceName, resourceLimit := range q.ResourceLimits { - require.Equal(t, test.ResourceLimits[string(resourceName)], float64(resourceLimit), "resource limit mismatch") - } - } return nil } return nil @@ -127,13 +120,11 @@ func TestUpdate(t *testing.T) { PriorityFactor *float64 Owners []string GroupOwners []string - ResourceLimits map[string]float64 }{ - "default flags": {nil, nil, nil, nil, nil}, - "valid priority": {[]flag{{"priorityFactor", "1.0"}}, makeFloat64Pointer(1.0), nil, nil, nil}, - "valid owners": {[]flag{{"owners", "user1,user2"}}, nil, []string{"user1", "user2"}, nil, nil}, - "valid group owners": {[]flag{{"groupOwners", "group1,group2"}}, nil, nil, []string{"group1", "group2"}, nil}, - "valid resource limits": {[]flag{{"resourceLimits", "cpu=0.3,memory=0.2"}}, nil, nil, nil, map[string]float64{"cpu": 0.3, "memory": 0.2}}, + "default flags": {nil, nil, nil, nil}, + "valid priority": {[]flag{{"priorityFactor", "1.0"}}, makeFloat64Pointer(1.0), nil, nil}, + "valid owners": {[]flag{{"owners", "user1,user2"}}, nil, []string{"user1", "user2"}, nil}, + "valid group owners": {[]flag{{"groupOwners", "group1,group2"}}, nil, nil, []string{"group1", "group2"}}, } for name, test := range tests { @@ -159,12 +150,6 @@ func TestUpdate(t *testing.T) { if test.Owners != nil { require.True(t, reflect.DeepEqual(q.Permissions, permissions)) } - - if test.ResourceLimits != nil { - for resourceName, resourceLimit := range q.ResourceLimits { - require.Equal(t, test.ResourceLimits[string(resourceName)], float64(resourceLimit), "resource limit mismatch") - } - } return nil } return nil diff --git a/config/scheduler/config.yaml b/config/scheduler/config.yaml index 739127ce978..9845b8cb391 100644 --- a/config/scheduler/config.yaml +++ b/config/scheduler/config.yaml @@ -24,6 +24,10 @@ schedulerMetrics: - "memory" - "ephemeral-storage" - "nvidia.com/gpu" + resourceRenaming: + nvidia.com/gpu: "gpu" + amd.com/gpu: "gpu" + ephemeral-storage: "ephemeralStorage" matchedRegexIndexByErrorMessageCacheSize: 100 resetInterval: "1h" pulsar: diff --git a/go.mod b/go.mod index a896a0a5309..3a74c232a3a 100644 --- a/go.mod +++ b/go.mod @@ -22,14 +22,14 @@ require ( github.com/golang/protobuf v1.5.3 github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.4.0 - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/go-memdb v1.3.4 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v1.0.2 github.com/instrumenta/kubeval v0.0.0-20190918223246-8d013ec9fc56 - github.com/jackc/pgtype v1.14.1 + github.com/jackc/pgtype v1.14.2 github.com/jackc/pgx/v4 v4.17.2 // indirect github.com/jolestar/go-commons-pool v2.0.0+incompatible github.com/jstemmer/go-junit-report/v2 v2.0.0 @@ -50,8 +50,8 @@ require ( github.com/stretchr/testify v1.8.4 github.com/weaveworks/promrus v1.2.0 golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/net v0.21.0 - golang.org/x/oauth2 v0.16.0 + golang.org/x/net v0.22.0 + golang.org/x/oauth2 v0.18.0 golang.org/x/sync v0.6.0 golang.org/x/tools v0.18.0 // indirect google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect @@ -91,7 +91,6 @@ require ( golang.org/x/time v0.3.0 gonum.org/v1/gonum v0.14.0 google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 - gopkg.in/yaml.v3 v3.0.1 ) require ( @@ -185,10 +184,10 @@ require ( github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0 // indirect go.mongodb.org/mongo-driver v1.13.1 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/crypto v0.19.0 // indirect + golang.org/x/crypto v0.21.0 // indirect golang.org/x/mod v0.15.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect @@ -197,6 +196,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c // indirect lukechampine.com/uint128 v1.2.0 // indirect diff --git a/go.sum b/go.sum index 6b0692e99e5..bc9b2b3148d 100644 --- a/go.sum +++ b/go.sum @@ -95,6 +95,7 @@ github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.4.3 h1:GYHcksoJ9K6HyAUpGxwZURrbTkXA0Dh4otXGqbhdrjA= github.com/benbjohnson/immutable v0.4.3/go.mod h1:qJIKKSmdqz1tVzNtst1DZzvaqOU1onk1rc03IeM3Owk= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -352,8 +353,8 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -454,8 +455,8 @@ github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCM github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgtype v1.14.1 h1:LyDar7M2K0tShCWqzJ/ctzF1QC3Wzc9c8a6cHE0PFdc= -github.com/jackc/pgtype v1.14.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.2 h1:QBdZQTKpPdBlw2AdKwHEyqUcm/lrl2cwWAHjCMyln/o= +github.com/jackc/pgtype v1.14.2/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= @@ -826,6 +827,7 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -835,6 +837,7 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -857,8 +860,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -947,8 +950,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -960,8 +963,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1042,6 +1045,7 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1049,14 +1053,14 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20180810153555-6e3c4e7365dd/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1099,6 +1103,7 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/internal/armada/repository/queue.go b/internal/armada/repository/queue.go index 9d1b16b5f56..17dee6a27a4 100644 --- a/internal/armada/repository/queue.go +++ b/internal/armada/repository/queue.go @@ -5,6 +5,7 @@ import ( "github.com/go-redis/redis" "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" "github.com/armadaproject/armada/pkg/api" "github.com/armadaproject/armada/pkg/client/queue" @@ -47,22 +48,22 @@ func NewRedisQueueRepository(db redis.UniversalClient) *RedisQueueRepository { func (r *RedisQueueRepository) GetAllQueues() ([]queue.Queue, error) { result, err := r.db.HGetAll(queueHashKey).Result() if err != nil { - return nil, fmt.Errorf("[RedisQueueRepository.GetAllQueues] error reading from database: %s", err) + return nil, errors.WithStack(err) } - queues := make([]queue.Queue, 0) + queues := make([]queue.Queue, len(result)) + i := 0 for _, v := range result { apiQueue := &api.Queue{} - e := proto.Unmarshal([]byte(v), apiQueue) - if e != nil { - return nil, fmt.Errorf("[RedisQueueRepository.GetAllQueues] error unmarshalling queue: %s", err) + if err := proto.Unmarshal([]byte(v), apiQueue); err != nil { + return nil, errors.WithStack(err) } queue, err := queue.NewQueue(apiQueue) if err != nil { return nil, err } - - queues = append(queues, queue) + queues[i] = queue + i++ } return queues, nil } diff --git a/internal/armada/server/event_test.go b/internal/armada/server/event_test.go index a46de427a32..5585799eb32 100644 --- a/internal/armada/server/event_test.go +++ b/internal/armada/server/event_test.go @@ -380,7 +380,6 @@ func withEventServer(t *testing.T, action func(s *EventServer)) { Name: "", Permissions: nil, PriorityFactor: 1, - ResourceLimits: nil, }) require.NoError(t, err) action(server) diff --git a/internal/armadactl/queue.go b/internal/armadactl/queue.go index a1255bb1520..e09744e9b75 100644 --- a/internal/armadactl/queue.go +++ b/internal/armadactl/queue.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/pkg/errors" - "gopkg.in/yaml.v3" + "sigs.k8s.io/yaml" "github.com/armadaproject/armada/pkg/client" "github.com/armadaproject/armada/pkg/client/queue" @@ -64,10 +64,21 @@ func (a *App) GetQueue(name string) error { if err != nil { return errors.Errorf("[armadactl.GetQueue] error unmarshalling queue %s: %s", name, err) } - fmt.Fprintf(a.Out, string(b)) + fmt.Fprintf(a.Out, headerYaml()+string(b)) return nil } +func headerYaml() string { + b, err := yaml.Marshal(client.Resource{ + Version: client.APIVersionV1, + Kind: client.ResourceKindQueue, + }) + if err != nil { + panic(err) + } + return string(b) +} + // UpdateQueue calls app.QueueAPI.Update with the provided parameters. func (a *App) UpdateQueue(queue queue.Queue) error { if err := a.Params.QueueAPI.Update(queue); err != nil { diff --git a/internal/jobservice/events/client_moq.go b/internal/jobservice/events/client_moq.go index 47f954bb325..efb2cf595d8 100644 --- a/internal/jobservice/events/client_moq.go +++ b/internal/jobservice/events/client_moq.go @@ -5,9 +5,11 @@ package events import ( "context" - "github.com/armadaproject/armada/pkg/api" - "github.com/gogo/protobuf/types" "sync" + + "github.com/gogo/protobuf/types" + + "github.com/armadaproject/armada/pkg/api" ) // Ensure, that JobEventReaderMock does implement JobEventReader. diff --git a/internal/jobservice/repository/sql_job_service_moq.go b/internal/jobservice/repository/sql_job_service_moq.go index b665b524337..6c637c22040 100644 --- a/internal/jobservice/repository/sql_job_service_moq.go +++ b/internal/jobservice/repository/sql_job_service_moq.go @@ -5,9 +5,10 @@ package repository import ( "context" - js "github.com/armadaproject/armada/pkg/api/jobservice" "sync" "time" + + js "github.com/armadaproject/armada/pkg/api/jobservice" ) // Ensure, that SQLJobServiceMock does implement SQLJobService. diff --git a/internal/lookout/ui/src/components/lookoutV2/JobsTableActionBar.tsx b/internal/lookout/ui/src/components/lookoutV2/JobsTableActionBar.tsx index c9ee9053779..94129d445d4 100644 --- a/internal/lookout/ui/src/components/lookoutV2/JobsTableActionBar.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/JobsTableActionBar.tsx @@ -36,6 +36,7 @@ export interface JobsTableActionBarProps { getJobsService: IGetJobsService updateJobsService: UpdateJobsService onClearFilters: () => void + onClearGroups: () => void onAddCustomView: (name: string) => void onDeleteCustomView: (name: string) => void onLoadCustomView: (name: string) => void @@ -62,6 +63,7 @@ export const JobsTableActionBar = memo( getJobsService, updateJobsService, onClearFilters, + onClearGroups, onAddCustomView, onDeleteCustomView, onLoadCustomView, @@ -123,6 +125,9 @@ export const JobsTableActionBar = memo( + { + // Set grouping to an empty array to clear all groups + onGroupingChange([]) + + // Reset expanded states as no groups will exist + setExpanded({}) + + // Clear any related UI states if necessary + // For example, resetting selection + setSelectedRows({}) + } + const shiftSelectRow = async (row: Row) => { if (lastSelectedRow === undefined || row.depth !== lastSelectedRow.depth) { return @@ -767,6 +779,7 @@ export const JobsTableContainer = ({ getJobsService={getJobsService} updateJobsService={updateJobsService} onClearFilters={clearFilters} + onClearGroups={clearGroups} onAddCustomView={addCustomView} onDeleteCustomView={deleteCustomView} onLoadCustomView={loadCustomView} diff --git a/internal/lookout/ui/src/hooks/useJobsTableData.ts b/internal/lookout/ui/src/hooks/useJobsTableData.ts index efa95f33ebd..6ec3f7147ff 100644 --- a/internal/lookout/ui/src/hooks/useJobsTableData.ts +++ b/internal/lookout/ui/src/hooks/useJobsTableData.ts @@ -61,6 +61,13 @@ const aggregatableFields = new Map([ [StandardColumnId.State, "state"], ]) +const groupableFields = new Map([ + [StandardColumnId.Queue, "queue"], + [StandardColumnId.Namespace, "namespace"], + [StandardColumnId.JobSet, "jobSet"], + [StandardColumnId.State, "state"], +]) + export function columnIsAggregatable(columnId: ColumnId): boolean { return aggregatableFields.has(columnId) } @@ -173,7 +180,10 @@ export const useFetchJobsTableData = ({ const groupedField = columnToGroupedField(groupedCol) // Override the group order if needed - if (rowRequest.order.field !== groupedCol) { + if ( + rowRequest.order.field !== groupedCol && + Array.from(groupableFields.values()).includes(rowRequest.order.field) + ) { rowRequest.order = defaultGroupOrder } diff --git a/internal/lookout/ui/src/models/lookoutV2Models.ts b/internal/lookout/ui/src/models/lookoutV2Models.ts index 4e08c1719a2..d402f44e73c 100644 --- a/internal/lookout/ui/src/models/lookoutV2Models.ts +++ b/internal/lookout/ui/src/models/lookoutV2Models.ts @@ -78,6 +78,9 @@ export type Job = { lastActiveRunId?: string lastTransitionTime: string cancelReason?: string + node?: string + cluster?: string + exitCode?: number } export type JobKey = keyof Job diff --git a/internal/lookout/ui/src/utils/jobsTableColumns.tsx b/internal/lookout/ui/src/utils/jobsTableColumns.tsx index f87d2994628..309511fde06 100644 --- a/internal/lookout/ui/src/utils/jobsTableColumns.tsx +++ b/internal/lookout/ui/src/utils/jobsTableColumns.tsx @@ -51,6 +51,9 @@ export enum StandardColumnId { SelectorCol = "selectorCol", Count = "jobCount", + Node = "node", + Cluster = "cluster", + ExitCode = "exitCode", } export const ANNOTATION_COLUMN_PREFIX = "annotation_" @@ -394,6 +397,30 @@ export const JOB_COLUMNS: JobTableColumn[] = [ enableSorting: true, }, }), + accessorColumn({ + id: StandardColumnId.Node, + accessor: "node", + displayName: "Node", + additionalOptions: { + size: 200, + }, + }), + accessorColumn({ + id: StandardColumnId.Cluster, + accessor: "cluster", + displayName: "Cluster", + additionalOptions: { + size: 200, + }, + }), + accessorColumn({ + id: StandardColumnId.ExitCode, + accessor: "exitCode", + displayName: "Exit Code", + additionalOptions: { + size: 100, + }, + }), ] export const DEFAULT_COLUMNS_TO_DISPLAY: Set = new Set([ diff --git a/internal/lookoutingesterv2/instructions/instructions.go b/internal/lookoutingesterv2/instructions/instructions.go index f4e34246199..34c3d07f8ba 100644 --- a/internal/lookoutingesterv2/instructions/instructions.go +++ b/internal/lookoutingesterv2/instructions/instructions.go @@ -212,7 +212,7 @@ func (c *InstructionConverter) handleSubmitJob( } update.JobsToCreate = append(update.JobsToCreate, &job) - annotationInstructions := createUserAnnotationInstructions(jobId, queue, jobSet, event.GetObjectMeta().GetAnnotations()) + annotationInstructions := createUserAnnotationInstructions(jobId, queue, jobSet, annotations) update.UserAnnotationsToCreate = append(update.UserAnnotationsToCreate, annotationInstructions...) return err diff --git a/internal/lookoutingesterv2/instructions/instructions_test.go b/internal/lookoutingesterv2/instructions/instructions_test.go index 816443cf0a6..74b093bfdd7 100644 --- a/internal/lookoutingesterv2/instructions/instructions_test.go +++ b/internal/lookoutingesterv2/instructions/instructions_test.go @@ -208,6 +208,23 @@ func TestConvert(t *testing.T) { }, } + expectedCreateUserAnnotations := []*model.CreateUserAnnotationInstruction{ + { + JobId: testfixtures.JobIdString, + Key: "a", + Value: "0", + Queue: testfixtures.Queue, + Jobset: testfixtures.JobSetName, + }, + { + JobId: testfixtures.JobIdString, + Key: "b", + Value: "1", + Queue: testfixtures.Queue, + Jobset: testfixtures.JobSetName, + }, + } + otherJobIdUlid := util.ULID() otherJobId := util.StringFromUlid(otherJobIdUlid) otherJobIdProto := armadaevents.ProtoUuidFromUlid(otherJobIdUlid) @@ -243,8 +260,9 @@ func TestConvert(t *testing.T) { }, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + UserAnnotationsToCreate: expectedCreateUserAnnotations, + MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, useLegacyEventConversion: true, }, @@ -261,11 +279,12 @@ func TestConvert(t *testing.T) { MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - JobsToUpdate: []*model.UpdateJobInstruction{&expectedLeased, &expectedPending, &expectedRunning, &expectedJobSucceeded}, - JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLeasedRun}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedPendingRun, &expectedRunningRun, &expectedJobRunSucceeded}, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + JobsToUpdate: []*model.UpdateJobInstruction{&expectedLeased, &expectedPending, &expectedRunning, &expectedJobSucceeded}, + JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLeasedRun}, + JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedPendingRun, &expectedRunningRun, &expectedJobRunSucceeded}, + UserAnnotationsToCreate: expectedCreateUserAnnotations, + MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, useLegacyEventConversion: false, }, @@ -288,10 +307,11 @@ func TestConvert(t *testing.T) { }, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - JobsToUpdate: []*model.UpdateJobInstruction{&expectedLeased, &expectedPending, &expectedRunning, &expectedJobSucceeded}, - JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLeasedRun}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedPendingRun, &expectedRunningRun, &expectedJobRunSucceeded}, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + JobsToUpdate: []*model.UpdateJobInstruction{&expectedLeased, &expectedPending, &expectedRunning, &expectedJobSucceeded}, + JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLeasedRun}, + JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedPendingRun, &expectedRunningRun, &expectedJobRunSucceeded}, + UserAnnotationsToCreate: expectedCreateUserAnnotations, MessageIds: []pulsar.MessageID{ pulsarutils.NewMessageId(1), pulsarutils.NewMessageId(2), @@ -315,11 +335,12 @@ func TestConvert(t *testing.T) { MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - JobsToUpdate: []*model.UpdateJobInstruction{&expectedPending, &expectedRunning, &expectedJobSucceeded}, - JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLegacyPendingRun}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedRunningRun, &expectedJobRunSucceeded}, - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + JobsToUpdate: []*model.UpdateJobInstruction{&expectedPending, &expectedRunning, &expectedJobSucceeded}, + JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLegacyPendingRun}, + JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedRunningRun, &expectedJobRunSucceeded}, + UserAnnotationsToCreate: expectedCreateUserAnnotations, + MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, useLegacyEventConversion: true, }, @@ -342,10 +363,11 @@ func TestConvert(t *testing.T) { }, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, - JobsToUpdate: []*model.UpdateJobInstruction{&expectedPending, &expectedRunning, &expectedJobSucceeded}, - JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLegacyPendingRun}, - JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedRunningRun, &expectedJobRunSucceeded}, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + JobsToUpdate: []*model.UpdateJobInstruction{&expectedPending, &expectedRunning, &expectedJobSucceeded}, + JobRunsToCreate: []*model.CreateJobRunInstruction{&expectedLegacyPendingRun}, + JobRunsToUpdate: []*model.UpdateJobRunInstruction{&expectedRunningRun, &expectedJobRunSucceeded}, + UserAnnotationsToCreate: expectedCreateUserAnnotations, MessageIds: []pulsar.MessageID{ pulsarutils.NewMessageId(1), pulsarutils.NewMessageId(2), @@ -523,7 +545,8 @@ func TestConvert(t *testing.T) { }, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + UserAnnotationsToCreate: expectedCreateUserAnnotations, MessageIds: []pulsar.MessageID{ pulsarutils.NewMessageId(1), pulsarutils.NewMessageId(2), @@ -560,7 +583,8 @@ func TestConvert(t *testing.T) { }, }, expected: &model.InstructionSet{ - JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + JobsToCreate: []*model.CreateJobInstruction{expectedSubmit}, + UserAnnotationsToCreate: expectedCreateUserAnnotations, MessageIds: []pulsar.MessageID{ pulsarutils.NewMessageId(1), pulsarutils.NewMessageId(2), @@ -598,6 +622,8 @@ func TestConvert(t *testing.T) { assert.Equal(t, tc.expected.JobsToUpdate, instructionSet.JobsToUpdate) assert.Equal(t, tc.expected.JobRunsToCreate, instructionSet.JobRunsToCreate) assert.Equal(t, tc.expected.JobRunsToUpdate, instructionSet.JobRunsToUpdate) + assert.Equal(t, tc.expected.UserAnnotationsToCreate, instructionSet.UserAnnotationsToCreate) + assert.Equal(t, tc.expected.MessageIds, instructionSet.MessageIds) }) } } @@ -675,33 +701,6 @@ func TestTruncatesStringsThatAreTooLong(t *testing.T) { assert.Len(t, *actual.JobRunsToUpdate[0].Node, 512) } -func TestAnnotations(t *testing.T) { - annotations := map[string]string{userAnnotationPrefix + "a": "b", "1": "2"} - expected := []*model.CreateUserAnnotationInstruction{ - { - JobId: testfixtures.JobIdString, - Key: "1", - Value: "2", - Queue: testfixtures.Queue, - Jobset: testfixtures.JobSetName, - }, - { - JobId: testfixtures.JobIdString, - Key: "a", - Value: "b", - Queue: testfixtures.Queue, - Jobset: testfixtures.JobSetName, - }, - } - instructions := createUserAnnotationInstructions( - testfixtures.JobIdString, - testfixtures.Queue, - testfixtures.JobSetName, - extractUserAnnotations(userAnnotationPrefix, annotations), - ) - assert.Equal(t, expected, instructions) -} - func TestExtractNodeName(t *testing.T) { podError := armadaevents.PodError{} assert.Nil(t, extractNodeName(&podError)) diff --git a/internal/lookoutv2/conversions/convert.go b/internal/lookoutv2/conversions/convert.go index 5d53a84897a..92eb0c4f1fb 100644 --- a/internal/lookoutv2/conversions/convert.go +++ b/internal/lookoutv2/conversions/convert.go @@ -37,6 +37,9 @@ func ToSwaggerJob(job *model.Job) *models.Job { State: job.State, Submitted: strfmt.DateTime(job.Submitted), CancelReason: job.CancelReason, + Node: job.Node, + Cluster: job.Cluster, + ExitCode: job.ExitCode, } } diff --git a/internal/lookoutv2/gen/models/job.go b/internal/lookoutv2/gen/models/job.go index bf88fa809b7..fbab4765659 100644 --- a/internal/lookoutv2/gen/models/job.go +++ b/internal/lookoutv2/gen/models/job.go @@ -105,6 +105,18 @@ type Job struct { // Min Length: 1 // Format: date-time Submitted strfmt.DateTime `json:"submitted"` + + // node + // Required: false + Node *string `json:"node,omitempty"` + + // cluster + // Required: true + Cluster string `json:"cluster"` + + //exitCode + // Required: false + ExitCode *int32 `json:"exitCode,omitempty"` } // Validate validates this job diff --git a/internal/lookoutv2/model/model.go b/internal/lookoutv2/model/model.go index c3f88ec69f0..80ebe0df0bf 100644 --- a/internal/lookoutv2/model/model.go +++ b/internal/lookoutv2/model/model.go @@ -40,6 +40,9 @@ type Job struct { State string Submitted time.Time CancelReason *string + Node *string + Cluster string + ExitCode *int32 } // PostgreSQLTime is a wrapper around time.Time that converts to UTC when diff --git a/internal/lookoutv2/repository/getjobs.go b/internal/lookoutv2/repository/getjobs.go index 7e283003f6d..86217db0827 100644 --- a/internal/lookoutv2/repository/getjobs.go +++ b/internal/lookoutv2/repository/getjobs.go @@ -125,6 +125,7 @@ func (r *SqlGetJobsRepository) getJobs(ctx *armadacontext.Context, filters []*mo log.WithError(err).Error("failed getting run rows") return err } + annotationRows, err = makeAnnotationRows(ctx, tx, tempTableName) if err != nil { log.WithError(err).Error("failed getting annotation rows") @@ -135,7 +136,6 @@ func (r *SqlGetJobsRepository) getJobs(ctx *armadacontext.Context, filters []*mo if err != nil { return nil, err } - jobs, err := rowsToJobs(jobRows, runRows, annotationRows) if err != nil { return nil, err @@ -201,6 +201,13 @@ func (r *SqlGetJobsRepository) getJobsJsonb(ctx *armadacontext.Context, filters return err } } + if len(job.Runs) > 0 { + lastRun := job.Runs[len(job.Runs)-1] // Get the last run + job.Node = lastRun.Node + job.Cluster = lastRun.Cluster + job.ExitCode = lastRun.ExitCode + + } jobs = append(jobs, job) } return nil @@ -251,6 +258,13 @@ func rowsToJobs(jobRows []*jobRow, runRows []*runRow, annotationRows []*annotati for i, jobId := range orderedJobIds { job := jobMap[jobId] sortRuns(job.Runs) + if len(job.Runs) > 0 { + lastRun := job.Runs[len(job.Runs)-1] // Get the last run + job.Node = lastRun.Node + job.Cluster = lastRun.Cluster + job.ExitCode = lastRun.ExitCode + + } jobs[i] = job } diff --git a/internal/lookoutv2/repository/getjobs_test.go b/internal/lookoutv2/repository/getjobs_test.go index 3fc3342e29b..414e2468089 100644 --- a/internal/lookoutv2/repository/getjobs_test.go +++ b/internal/lookoutv2/repository/getjobs_test.go @@ -1997,3 +1997,69 @@ func TestGetJobsActiveJobSet(t *testing.T) { }) require.NoError(t, err) } + +func TestGetJobsWithLatestRunDetails(t *testing.T) { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + runIdLatest := uuid.NewString() + // Simulate job submission and multiple runs, with the latest run being successful + NewJobSimulator(converter, store). + Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). + Pending(uuid.NewString(), "first-cluster", baseTime). + Running(uuid.NewString(), "first-node", baseTime.Add(time.Minute)). + Pending(runIdLatest, "latest-cluster", baseTime.Add(2*time.Minute)). + Running(runIdLatest, "latest-node", baseTime.Add(3*time.Minute)). + RunSucceeded(runIdLatest, baseTime.Add(4*time.Minute)). + Build(). + Job() + + result, err := repo.GetJobs(armadacontext.TODO(), []*model.Filter{}, false, &model.Order{}, 0, 10) + require.NoError(t, err) + require.Len(t, result.Jobs, 1) + + // Adjusting assertions to dereference pointer fields + if assert.NotNil(t, result.Jobs[0].Node) { + assert.Equal(t, "latest-node", *result.Jobs[0].Node) + } + if assert.NotNil(t, result.Jobs[0].ExitCode) { + assert.Equal(t, int32(0), *result.Jobs[0].ExitCode) + } + if assert.NotNil(t, result.Jobs[0].Cluster) { + assert.Equal(t, "latest-cluster", result.Jobs[0].Cluster) + } + + return nil + }) + require.NoError(t, err) +} + +func TestGetJobsWithSpecificRunDetails(t *testing.T) { + err := withGetJobsSetup(func(converter *instructions.InstructionConverter, store *lookoutdb.LookoutDb, repo *SqlGetJobsRepository) error { + runIdSpecific := uuid.NewString() + // Simulate job submission and a specific failed run + NewJobSimulator(converter, store). + Submit(queue, jobSet, owner, namespace, baseTime, basicJobOpts). + Pending(runIdSpecific, "specific-cluster", baseTime). + Running(runIdSpecific, "specific-node", baseTime.Add(time.Minute)). + RunFailed(runIdSpecific, "specific-node", 2, "Specific failure message", baseTime.Add(2*time.Minute)). + Build(). + Job() + + result, err := repo.GetJobs(armadacontext.TODO(), []*model.Filter{}, false, &model.Order{}, 0, 10) + require.NoError(t, err) + require.Len(t, result.Jobs, 1) + + // Adjusting assertions to dereference pointer fields + if assert.NotNil(t, result.Jobs[0].Node) { + assert.Equal(t, "specific-node", *result.Jobs[0].Node) + } + if assert.NotNil(t, result.Jobs[0].ExitCode) { + assert.Equal(t, int32(2), *result.Jobs[0].ExitCode) + } + if assert.NotNil(t, result.Jobs[0].Cluster) { + assert.Equal(t, "specific-cluster", result.Jobs[0].Cluster) + } + + return nil + }) + require.NoError(t, err) +} diff --git a/internal/lookoutv2/repository/util.go b/internal/lookoutv2/repository/util.go index a7b1de95cac..8841c66e4d4 100644 --- a/internal/lookoutv2/repository/util.go +++ b/internal/lookoutv2/repository/util.go @@ -225,6 +225,7 @@ func (js *JobSimulator) Pending(runId string, cluster string, timestamp time.Tim js.job.LastActiveRunId = &runId js.job.LastTransitionTime = ts js.job.State = string(lookout.JobPending) + js.job.Cluster = cluster rp := &runPatch{ runId: runId, cluster: &cluster, @@ -264,6 +265,7 @@ func (js *JobSimulator) Running(runId string, node string, timestamp time.Time) js.job.LastActiveRunId = &runId js.job.LastTransitionTime = ts js.job.State = string(lookout.JobRunning) + js.job.Node = &node updateRun(js.job, &runPatch{ runId: runId, jobRunState: lookout.JobRunRunning, @@ -613,6 +615,9 @@ func timestampOrNow(timestamp time.Time) time.Time { } func updateRun(job *model.Job, patch *runPatch) { + if patch.exitCode != nil { + job.ExitCode = patch.exitCode + } for _, run := range job.Runs { if run.RunId == patch.runId { patchRun(run, patch) diff --git a/internal/scheduler/configuration/configuration.go b/internal/scheduler/configuration/configuration.go index 19a5b39290e..ffd1f610fae 100644 --- a/internal/scheduler/configuration/configuration.go +++ b/internal/scheduler/configuration/configuration.go @@ -73,6 +73,12 @@ type MetricsConfig struct { TrackedErrorRegexes []string // Metrics are exported for these resources. TrackedResourceNames []v1.ResourceName + // Optionally rename resources in exported metrics. + // E.g., if ResourceRenaming["nvidia.com/gpu"] = "gpu", then metrics for resource "nvidia.com/gpu" use resource name "gpu" instead. + // This can be used to avoid illegal Prometheus metric names (e.g., for "nvidia.com/gpu" as "/" is not allowed). + // Allowed characters in resource names are [a-zA-Z_:][a-zA-Z0-9_:]* + // It can also be used to track multiple resources within the same metric, e.g., "nvidia.com/gpu" and "amd.com/gpu". + ResourceRenaming map[v1.ResourceName]string // Controls the cycle time metrics. // TODO(albin): Not used yet. CycleTimeConfig PrometheusSummaryConfig diff --git a/internal/scheduler/constraints/constraints.go b/internal/scheduler/constraints/constraints.go index 38d95a5106a..d8f6cb6a9a4 100644 --- a/internal/scheduler/constraints/constraints.go +++ b/internal/scheduler/constraints/constraints.go @@ -10,6 +10,7 @@ import ( "github.com/armadaproject/armada/internal/armada/configuration" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + "github.com/armadaproject/armada/pkg/client/queue" ) const ( @@ -31,6 +32,9 @@ const ( // This means the gang can not be scheduled without first increasing the burst size. GangExceedsGlobalBurstSizeUnschedulableReason = "gang cardinality too large: exceeds global max burst size" GangExceedsQueueBurstSizeUnschedulableReason = "gang cardinality too large: exceeds queue max burst size" + + UnschedulableReasonMaximumResourcesPerQueueExceeded = "per-queue resource limit exceeded" + UnschedulableReasonMaximumResourcesExceeded = "resource limit exceeded" ) // IsTerminalUnschedulableReason returns true if reason indicates @@ -58,12 +62,22 @@ type SchedulingConstraints struct { // Jobs leased to this executor must be at least this large. // Used, e.g., to avoid scheduling CPU-only jobs onto clusters with GPUs. MinimumJobSize schedulerobjects.ResourceList - // Scheduling constraints for specific priority classes. + // Scheduling constraints by priority class. PriorityClassSchedulingConstraintsByPriorityClassName map[string]PriorityClassSchedulingConstraints + // Scheduling constraints for specific queues. + // If present for a particular queue, global limits (i.e., PriorityClassSchedulingConstraintsByPriorityClassName) + // do not apply for that queue. + QueueSchedulingConstraintsByQueueName map[string]QueueSchedulingConstraints // Limits total resources scheduled per invocation. MaximumResourcesToSchedule schedulerobjects.ResourceList } +// QueueSchedulingConstraints contains per-queue scheduling constraints. +type QueueSchedulingConstraints struct { + // Scheduling constraints by priority class. + PriorityClassSchedulingConstraintsByPriorityClassName map[string]PriorityClassSchedulingConstraints +} + // PriorityClassSchedulingConstraints contains scheduling constraints that apply to jobs of a specific priority class. type PriorityClassSchedulingConstraints struct { PriorityClassName string @@ -71,11 +85,12 @@ type PriorityClassSchedulingConstraints struct { MaximumResourcesPerQueue schedulerobjects.ResourceList } -func SchedulingConstraintsFromSchedulingConfig( +func NewSchedulingConstraints( pool string, totalResources schedulerobjects.ResourceList, minimumJobSize schedulerobjects.ResourceList, config configuration.SchedulingConfig, + queues []queue.Queue, ) SchedulingConstraints { priorityClassSchedulingConstraintsByPriorityClassName := make(map[string]PriorityClassSchedulingConstraints, len(config.Preemption.PriorityClasses)) for name, priorityClass := range config.Preemption.PriorityClasses { @@ -89,6 +104,28 @@ func SchedulingConstraintsFromSchedulingConfig( MaximumResourcesPerQueue: absoluteFromRelativeLimits(totalResources, maximumResourceFractionPerQueue), } } + + queueSchedulingConstraintsByQueueName := make(map[string]QueueSchedulingConstraints, len(queues)) + for _, queue := range queues { + priorityClassSchedulingConstraintsByPriorityClassNameForQueue := make(map[string]PriorityClassSchedulingConstraints, len(queue.ResourceLimitsByPriorityClassName)) + for priorityClassName, priorityClassResourceLimits := range queue.ResourceLimitsByPriorityClassName { + maximumResourceFraction := priorityClassResourceLimits.MaximumResourceFraction + if m, ok := priorityClassResourceLimits.MaximumResourceFractionByPool[pool]; ok { + // Use pool-specific maximum resource fraction if available. + maximumResourceFraction = m.MaximumResourceFraction + } + priorityClassSchedulingConstraintsByPriorityClassNameForQueue[priorityClassName] = PriorityClassSchedulingConstraints{ + PriorityClassName: priorityClassName, + MaximumResourcesPerQueue: absoluteFromRelativeLimits(totalResources, maximumResourceFraction), + } + } + if len(priorityClassSchedulingConstraintsByPriorityClassNameForQueue) > 0 { + queueSchedulingConstraintsByQueueName[queue.Name] = QueueSchedulingConstraints{ + PriorityClassSchedulingConstraintsByPriorityClassName: priorityClassSchedulingConstraintsByPriorityClassNameForQueue, + } + } + } + maximumResourceFractionToSchedule := config.MaximumResourceFractionToSchedule if m, ok := config.MaximumResourceFractionToScheduleByPool[pool]; ok { // Use pool-specific config is available. @@ -99,6 +136,7 @@ func SchedulingConstraintsFromSchedulingConfig( MinimumJobSize: minimumJobSize, MaximumResourcesToSchedule: absoluteFromRelativeLimits(totalResources, maximumResourceFractionToSchedule), PriorityClassSchedulingConstraintsByPriorityClassName: priorityClassSchedulingConstraintsByPriorityClassName, + QueueSchedulingConstraintsByQueueName: queueSchedulingConstraintsByQueueName, } } @@ -164,13 +202,21 @@ func (constraints *SchedulingConstraints) CheckConstraints( return false, QueueRateLimitExceededByGangUnschedulableReason, nil } - // PriorityClassSchedulingConstraintsByPriorityClassName check. - priorityClassName := gctx.GangInfo.PriorityClassName - if priorityClassConstraint, ok := constraints.PriorityClassSchedulingConstraintsByPriorityClassName[priorityClassName]; ok { - if !qctx.AllocatedByPriorityClass[priorityClassName].IsStrictlyLessOrEqual(priorityClassConstraint.MaximumResourcesPerQueue) { - return false, MaximumResourcesPerQueueExceededUnschedulableReason, nil + // QueueSchedulingConstraintsByQueueName / PriorityClassSchedulingConstraintsByPriorityClassName checks. + if queueConstraint, ok := constraints.QueueSchedulingConstraintsByQueueName[gctx.Queue]; ok { + if priorityClassConstraint, ok := queueConstraint.PriorityClassSchedulingConstraintsByPriorityClassName[gctx.PriorityClassName]; ok { + if !qctx.AllocatedByPriorityClass[gctx.PriorityClassName].IsStrictlyLessOrEqual(priorityClassConstraint.MaximumResourcesPerQueue) { + return false, UnschedulableReasonMaximumResourcesPerQueueExceeded, nil + } + } + } else { + if priorityClassConstraint, ok := constraints.PriorityClassSchedulingConstraintsByPriorityClassName[gctx.PriorityClassName]; ok { + if !qctx.AllocatedByPriorityClass[gctx.PriorityClassName].IsStrictlyLessOrEqual(priorityClassConstraint.MaximumResourcesPerQueue) { + return false, UnschedulableReasonMaximumResourcesExceeded, nil + } } } + return true, "", nil } diff --git a/internal/scheduler/constraints/constraints_test.go b/internal/scheduler/constraints/constraints_test.go index 081058191dc..630302a5201 100644 --- a/internal/scheduler/constraints/constraints_test.go +++ b/internal/scheduler/constraints/constraints_test.go @@ -2,6 +2,16 @@ package constraints import ( "testing" + "time" + + "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/common/types" + "github.com/armadaproject/armada/pkg/api" + "github.com/armadaproject/armada/pkg/client/queue" + + "golang.org/x/time/rate" + + "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -10,26 +20,156 @@ import ( schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" ) +type constraintTest struct { + constraints SchedulingConstraints + sctx *schedulercontext.SchedulingContext + gctx *schedulercontext.GangSchedulingContext + queue string + priorityClassName string + expectedCheckRoundConstraintsReason string + expectedCheckConstraintsReason string +} + func TestConstraints(t *testing.T) { - tests := map[string]struct { - constraints SchedulingConstraints - sctx *schedulercontext.SchedulingContext - globalUnschedulableReason string - queue string - priorityClassName string - perQueueAndPriorityClassUnschedulableReason string - }{} // TODO: Add tests. + tests := map[string]*constraintTest{ + "no-constraints": makeConstraintsTest(NewSchedulingConstraints( + "pool-1", + makeResourceList("1000", "1000Gi"), + makeResourceList("0", "0"), + makeSchedulingConfig(), + []queue.Queue{}, + )), + "empty-queue-constraints": makeConstraintsTest(NewSchedulingConstraints( + "pool-1", + makeResourceList("1000", "1000Gi"), + makeResourceList("0", "0"), + makeSchedulingConfig(), + []queue.Queue{{Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{}}}, + )), + "within-constraints": makeConstraintsTest(NewSchedulingConstraints( + "pool-1", + makeResourceList("1000", "1000Gi"), + makeResourceList("0", "0"), + configuration.SchedulingConfig{ + MaximumResourceFractionToSchedule: map[string]float64{"cpu": 0.1, "memory": 0.1}, + MaxQueueLookback: 1000, + Preemption: configuration.PreemptionConfig{ + PriorityClasses: map[string]types.PriorityClass{"priority-class-1": {MaximumResourceFractionPerQueueByPool: map[string]map[string]float64{"pool-1": {"cpu": 0.9, "memory": 0.9}}}}, + }, + }, + []queue.Queue{{Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{"priority-class-1": {MaximumResourceFraction: map[string]float64{"cpu": 0.9, "memory": 0.9}}}}}, + )), + "exceeds-queue-priority-class-constraint": func() *constraintTest { + t := makeConstraintsTest(NewSchedulingConstraints( + "pool-1", + makeResourceList("1000", "1000Gi"), + makeResourceList("0", "0"), + makeSchedulingConfig(), + []queue.Queue{ + { + Name: "queue-1", + ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{ + "priority-class-1": { + MaximumResourceFraction: map[string]float64{"cpu": 0.000001, "memory": 0.9}, + }, + }, + }, + }, + )) + t.expectedCheckConstraintsReason = "per-queue resource limit exceeded" + return t + }(), + "exceeds-queue-priority-class-pool-constraint": func() *constraintTest { + t := makeConstraintsTest(NewSchedulingConstraints( + "pool-1", + makeResourceList("1000", "1000Gi"), + makeResourceList("0", "0"), + makeSchedulingConfig(), + []queue.Queue{ + { + Name: "queue-1", + ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{ + "priority-class-1": { + MaximumResourceFractionByPool: map[string]api.PriorityClassPoolResourceLimits{ + "pool-1": { + MaximumResourceFraction: map[string]float64{"cpu": 0.000001, "memory": 0.9}, + }, + }, + }, + }, + }, + }, + )) + t.expectedCheckConstraintsReason = "per-queue resource limit exceeded" + return t + }(), + "exceeds-priority-class-constraint": func() *constraintTest { + t := makeConstraintsTest(NewSchedulingConstraints( + "pool-1", + makeResourceList("1000", "1000Gi"), + makeResourceList("0", "0"), + configuration.SchedulingConfig{ + MaximumResourceFractionToSchedule: map[string]float64{"cpu": 0.1, "memory": 0.1}, + MaxQueueLookback: 1000, + Preemption: configuration.PreemptionConfig{ + PriorityClasses: map[string]types.PriorityClass{"priority-class-1": {MaximumResourceFractionPerQueueByPool: map[string]map[string]float64{"pool-1": {"cpu": 0.00000001, "memory": 0.9}}}}, + }, + }, + []queue.Queue{}, + )) + t.expectedCheckConstraintsReason = "resource limit exceeded" + return t + }(), + "priority-class-constraint-ignored-if-there-is-a-queue-constraint": makeConstraintsTest(NewSchedulingConstraints( + "pool-1", + makeResourceList("1000", "1000Gi"), + makeResourceList("0", "0"), + configuration.SchedulingConfig{ + MaximumResourceFractionToSchedule: map[string]float64{"cpu": 0.1, "memory": 0.1}, + MaxQueueLookback: 1000, + Preemption: configuration.PreemptionConfig{ + PriorityClasses: map[string]types.PriorityClass{"priority-class-1": {MaximumResourceFractionPerQueueByPool: map[string]map[string]float64{"pool-1": {"cpu": 0.00000001, "memory": 0.9}}}}, + }, + }, + []queue.Queue{{Name: "queue-1", ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{"priority-class-1": {MaximumResourceFraction: map[string]float64{"cpu": 0.9, "memory": 0.9}}}}}, + )), + "below-minimum-job-size": func() *constraintTest { + t := makeConstraintsTest(NewSchedulingConstraints( + "pool-1", + makeResourceList("1000", "1000Gi"), + makeResourceList("5", "1Mi"), + makeSchedulingConfig(), + []queue.Queue{}, + )) + t.expectedCheckConstraintsReason = "job requests 1 cpu, but the minimum is 5" + return t + }(), + "above-maximum-resources-to-schedule": func() *constraintTest { + t := makeConstraintsTest(NewSchedulingConstraints( + "pool-1", + makeResourceList("1000", "1000Gi"), + makeResourceList("0", "0"), + configuration.SchedulingConfig{ + MaximumResourceFractionToSchedule: map[string]float64{"cpu": 0.00001, "memory": 0.1}, + MaxQueueLookback: 1000, + }, + []queue.Queue{}, + )) + t.expectedCheckRoundConstraintsReason = "maximum resources scheduled" + return t + }(), + } for name, tc := range tests { t.Run(name, func(t *testing.T) { - ok, unschedulableReason, err := tc.constraints.CheckRoundConstraints(tc.sctx, tc.queue) + ok, unscheduledReason, err := tc.constraints.CheckRoundConstraints(tc.sctx, tc.queue) require.NoError(t, err) - require.Equal(t, tc.globalUnschedulableReason == "", ok) - require.Equal(t, tc.globalUnschedulableReason, unschedulableReason) + require.Equal(t, tc.expectedCheckRoundConstraintsReason == "", ok) + require.Equal(t, tc.expectedCheckRoundConstraintsReason, unscheduledReason) - ok, unschedulableReason, err = tc.constraints.CheckConstraints(tc.sctx, nil) + ok, unscheduledReason, err = tc.constraints.CheckConstraints(tc.sctx, tc.gctx) require.NoError(t, err) - require.Equal(t, tc.perQueueAndPriorityClassUnschedulableReason == "", ok) - require.Equal(t, tc.perQueueAndPriorityClassUnschedulableReason, unschedulableReason) + require.Equal(t, tc.expectedCheckConstraintsReason == "", ok) + require.Equal(t, tc.expectedCheckConstraintsReason, unscheduledReason) }) } } @@ -62,3 +202,49 @@ func TestScaleQuantity(t *testing.T) { }) } } + +func makeConstraintsTest(constraints SchedulingConstraints) *constraintTest { + return &constraintTest{ + constraints: constraints, + sctx: &schedulercontext.SchedulingContext{ + Pool: "pool-1", + WeightSum: 100, + ScheduledResources: makeResourceList("1", "1Gi"), + Limiter: rate.NewLimiter(1e9, 1e6), + QueueSchedulingContexts: map[string]*schedulercontext.QueueSchedulingContext{ + "queue-1": { + Queue: "queue-1", + Weight: 1, + Limiter: rate.NewLimiter(1e9, 1e6), + Allocated: makeResourceList("30", "1Gi"), + AllocatedByPriorityClass: schedulerobjects.QuantityByTAndResourceType[string]{"priority-class-1": makeResourceList("20", "1Gi")}, + }, + }, + Started: time.Now(), + }, + gctx: &schedulercontext.GangSchedulingContext{ + GangInfo: schedulercontext.GangInfo{ + PriorityClassName: "priority-class-1", + }, + Queue: "queue-1", + TotalResourceRequests: makeResourceList("1", "1Gi"), + JobSchedulingContexts: []*schedulercontext.JobSchedulingContext{{}}, + }, + queue: "queue-1", + priorityClassName: "priority-class-1", + expectedCheckConstraintsReason: "", + expectedCheckRoundConstraintsReason: "", + } +} + +func makeSchedulingConfig() configuration.SchedulingConfig { + return configuration.SchedulingConfig{ + MaximumResourceFractionToSchedule: map[string]float64{"cpu": 0.1, "memory": 0.1}, + MaxQueueLookback: 1000, + Preemption: configuration.PreemptionConfig{PriorityClasses: map[string]types.PriorityClass{"priority-class-1": {}}}, + } +} + +func makeResourceList(cpu string, memory string) schedulerobjects.ResourceList { + return schedulerobjects.ResourceList{Resources: map[string]resource.Quantity{"cpu": resource.MustParse(cpu), "memory": resource.MustParse(memory)}} +} diff --git a/internal/scheduler/context/context.go b/internal/scheduler/context/context.go index 1db201cb4e6..0941ccf3a02 100644 --- a/internal/scheduler/context/context.go +++ b/internal/scheduler/context/context.go @@ -559,6 +559,15 @@ func NewGangSchedulingContext(jctxs []*JobSchedulingContext) *GangSchedulingCont } } +// JobIds returns a sliced composed of the ids of the jobs that make up the gang. +func (gctx *GangSchedulingContext) JobIds() []string { + rv := make([]string, len(gctx.JobSchedulingContexts)) + for i, jctx := range gctx.JobSchedulingContexts { + rv[i] = jctx.JobId + } + return rv +} + // Cardinality returns the number of jobs in the gang. func (gctx *GangSchedulingContext) Cardinality() int { return len(gctx.JobSchedulingContexts) @@ -790,7 +799,8 @@ type PodSchedulingContext struct { NodeId string // If set, indicates that the pod was scheduled on a specific node type. WellKnownNodeTypeName string - // Priority at which this pod was scheduled. + // Priority this pod was most recently attempted to be scheduled at. + // If scheduling was successful, resources were marked as allocated to the job at this priority. ScheduledAtPriority int32 // Maximum priority that this pod preempted other pods at. PreemptedAtPriority int32 diff --git a/internal/scheduler/database/queue_repository.go b/internal/scheduler/database/queue_repository.go deleted file mode 100644 index cb3c34b9839..00000000000 --- a/internal/scheduler/database/queue_repository.go +++ /dev/null @@ -1,38 +0,0 @@ -package database - -import ( - "github.com/go-redis/redis" - - legacyrepository "github.com/armadaproject/armada/internal/armada/repository" -) - -// QueueRepository is an interface to be implemented by structs which provide queue information -type QueueRepository interface { - GetAllQueues() ([]*Queue, error) -} - -// LegacyQueueRepository is a QueueRepository which is backed by Armada's redis store -type LegacyQueueRepository struct { - backingRepo legacyrepository.QueueRepository -} - -func NewLegacyQueueRepository(db redis.UniversalClient) *LegacyQueueRepository { - return &LegacyQueueRepository{ - backingRepo: legacyrepository.NewRedisQueueRepository(db), - } -} - -func (r *LegacyQueueRepository) GetAllQueues() ([]*Queue, error) { - legacyQueues, err := r.backingRepo.GetAllQueues() - if err != nil { - return nil, err - } - queues := make([]*Queue, len(legacyQueues)) - for i, legacyQueue := range legacyQueues { - queues[i] = &Queue{ - Name: legacyQueue.Name, - Weight: float64(legacyQueue.PriorityFactor), - } - } - return queues, nil -} diff --git a/internal/scheduler/database/queue_repository_test.go b/internal/scheduler/database/queue_repository_test.go deleted file mode 100644 index 718d0904290..00000000000 --- a/internal/scheduler/database/queue_repository_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package database - -import ( - "testing" - - "github.com/go-redis/redis" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" - - clientQueue "github.com/armadaproject/armada/pkg/client/queue" -) - -func TestLegacyQueueRepository_GetAllQueues(t *testing.T) { - tests := map[string]struct { - queues []clientQueue.Queue - expectedQueues []*Queue - }{ - "Not empty": { - queues: []clientQueue.Queue{ - { - Name: "test-queue-1", - PriorityFactor: 10, - }, - { - Name: "test-queue-2", - PriorityFactor: 20, - }, - }, - expectedQueues: []*Queue{ - { - Name: "test-queue-1", - Weight: 10, - }, - { - Name: "test-queue-2", - Weight: 20, - }, - }, - }, - "Empty": { - queues: []clientQueue.Queue{}, - expectedQueues: []*Queue{}, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - rc := redis.NewClient(&redis.Options{Addr: "localhost:6379", DB: 10}) - rc.FlushDB() - defer func() { - rc.FlushDB() - _ = rc.Close() - }() - repo := NewLegacyQueueRepository(rc) - for _, queue := range tc.queues { - err := repo.backingRepo.CreateQueue(queue) - require.NoError(t, err) - } - retrievedQueues, err := repo.GetAllQueues() - require.NoError(t, err) - sortFunc := func(a, b *Queue) int { - if a.Name > b.Name { - return -1 - } else if a.Name > b.Name { - return 1 - } else { - return 0 - } - } - slices.SortFunc(tc.expectedQueues, sortFunc) - slices.SortFunc(retrievedQueues, sortFunc) - assert.Equal(t, tc.expectedQueues, retrievedQueues) - }) - } -} diff --git a/internal/scheduler/failureestimator/failureestimator.go b/internal/scheduler/failureestimator/failureestimator.go index c131ded8da8..724556a6c01 100644 --- a/internal/scheduler/failureestimator/failureestimator.go +++ b/internal/scheduler/failureestimator/failureestimator.go @@ -59,6 +59,9 @@ type FailureEstimator struct { parameterIndexByNode map[string]int parameterIndexByQueue map[string]int + // Maps node names to the cluster they belong to. + clusterByNode map[string]string + // Samples that have not been processed yet. samples []Sample @@ -106,6 +109,8 @@ func New( parameterIndexByNode: make(map[string]int, 16), parameterIndexByQueue: make(map[string]int, 16), + clusterByNode: make(map[string]string), + numInnerIterations: numInnerIterations, innerOptimiser: innerOptimiser, outerOptimiser: outerOptimiser, @@ -113,7 +118,7 @@ func New( failureProbabilityByNodeDesc: prometheus.NewDesc( fmt.Sprintf("%s_%s_node_failure_probability", namespace, subsystem), "Estimated per-node failure probability.", - []string{"node"}, + []string{"node", "cluster"}, nil, ), failureProbabilityByQueueDesc: prometheus.NewDesc( @@ -141,10 +146,11 @@ func (fe *FailureEstimator) IsDisabled() bool { // Push adds a sample to the internal buffer of the failure estimator. // Samples added via Push are processed on the next call to Update. -func (fe *FailureEstimator) Push(node, queue string, success bool) { +func (fe *FailureEstimator) Push(node, queue, cluster string, success bool) { fe.mu.Lock() defer fe.mu.Unlock() + fe.clusterByNode[node] = cluster i, ok := fe.parameterIndexByNode[node] if !ok { i = len(fe.parameterIndexByNode) + len(fe.parameterIndexByQueue) @@ -273,7 +279,7 @@ func (fe *FailureEstimator) Collect(ch chan<- prometheus.Metric) { for k, i := range fe.parameterIndexByNode { failureProbability := 1 - fe.parameters.AtVec(i) failureProbability = math.Round(failureProbability*100) / 100 - ch <- prometheus.MustNewConstMetric(fe.failureProbabilityByNodeDesc, prometheus.GaugeValue, failureProbability, k) + ch <- prometheus.MustNewConstMetric(fe.failureProbabilityByNodeDesc, prometheus.GaugeValue, failureProbability, k, fe.clusterByNode[k]) } for k, j := range fe.parameterIndexByQueue { failureProbability := 1 - fe.parameters.AtVec(j) diff --git a/internal/scheduler/failureestimator/failureestimator_test.go b/internal/scheduler/failureestimator/failureestimator_test.go index 5a4bbb3c8bd..9c5e80dac30 100644 --- a/internal/scheduler/failureestimator/failureestimator_test.go +++ b/internal/scheduler/failureestimator/failureestimator_test.go @@ -20,7 +20,7 @@ func TestUpdate(t *testing.T) { require.NoError(t, err) // Test initialisation. - fe.Push("node", "queue", false) + fe.Push("node", "queue", "cluster", false) nodeParameterIndex, ok := fe.parameterIndexByNode["node"] require.True(t, ok) queueParameterIndex, ok := fe.parameterIndexByQueue["queue"] @@ -31,7 +31,7 @@ func TestUpdate(t *testing.T) { require.Equal(t, 0.5, fe.parameters.AtVec(1)) for i := 0; i < 100; i++ { - fe.Push(fmt.Sprintf("node-%d", i), "queue-0", false) + fe.Push(fmt.Sprintf("node-%d", i), "queue-0", "cluster", false) } nodeParameterIndex, ok = fe.parameterIndexByNode["node-99"] require.True(t, ok) @@ -52,14 +52,14 @@ func TestUpdate(t *testing.T) { assert.Less(t, queueSuccessProbability, 0.5-eps) // Test that the estimates move in the expected direction on success. - fe.Push("node", "queue", true) + fe.Push("node", "queue", "cluster", true) fe.Update() assert.Greater(t, fe.parameters.AtVec(0), nodeSuccessProbability) assert.Greater(t, fe.parameters.AtVec(1), queueSuccessProbability) for i := 0; i < 1000; i++ { for i := 0; i < 10; i++ { - fe.Push("node", "queue", false) + fe.Push("node", "queue", "cluster", false) } fe.Update() } @@ -70,7 +70,7 @@ func TestUpdate(t *testing.T) { for i := 0; i < 1000; i++ { for i := 0; i < 10; i++ { - fe.Push("node", "queue", true) + fe.Push("node", "queue", "cluster", true) } fe.Update() } diff --git a/internal/scheduler/gang_scheduler.go b/internal/scheduler/gang_scheduler.go index cfbd0633eaf..3dd21513e9d 100644 --- a/internal/scheduler/gang_scheduler.go +++ b/internal/scheduler/gang_scheduler.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/hashicorp/go-memdb" + "github.com/pkg/errors" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/util" @@ -106,8 +107,9 @@ func (sch *GangScheduler) Schedule(ctx *armadacontext.Context, gctx *schedulerco // This deferred function ensures unschedulable jobs are registered as such. gangAddedToSchedulingContext := false defer func() { - // Do nothing if an error occurred. + // If an error occurred, augment the error message and return. if err != nil { + err = errors.WithMessagef(err, "failed scheduling gang %s composed of jobs %v", gctx.Id, gctx.JobIds()) return } diff --git a/internal/scheduler/gang_scheduler_test.go b/internal/scheduler/gang_scheduler_test.go index d3993772b03..7e13e675f10 100644 --- a/internal/scheduler/gang_scheduler_test.go +++ b/internal/scheduler/gang_scheduler_test.go @@ -41,7 +41,7 @@ func TestGangScheduler(t *testing.T) { ExpectedScheduledIndices []int // Cumulative number of jobs we expect to schedule successfully. // Each index `i` is the expected value when processing gang `i`. - ExpectedScheduledJobs []int + ExpectedCumulativeScheduledJobs []int // If present, assert that gang `i` is scheduled on nodes with node // uniformity label `ExpectedNodeUniformity[i]`. ExpectedNodeUniformity map[int]string @@ -54,9 +54,9 @@ func TestGangScheduler(t *testing.T) { Gangs: [][]*jobdb.Job{ testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32)), }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 0), - ExpectedScheduledJobs: []int{32}, - ExpectedRuntimeGangCardinality: []int{32}, + ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedCumulativeScheduledJobs: []int{32}, + ExpectedRuntimeGangCardinality: []int{32}, }, "simple failure": { SchedulingConfig: testfixtures.TestSchedulingConfig(), @@ -64,9 +64,9 @@ func TestGangScheduler(t *testing.T) { Gangs: [][]*jobdb.Job{ testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33)), }, - ExpectedScheduledIndices: nil, - ExpectedScheduledJobs: []int{0}, - ExpectedRuntimeGangCardinality: []int{0}, + ExpectedScheduledIndices: nil, + ExpectedCumulativeScheduledJobs: []int{0}, + ExpectedRuntimeGangCardinality: []int{0}, }, "simple success where min cardinality is met": { SchedulingConfig: testfixtures.TestSchedulingConfig(), @@ -77,9 +77,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 40), ), }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 0), - ExpectedScheduledJobs: []int{32}, - ExpectedRuntimeGangCardinality: []int{32}, + ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedCumulativeScheduledJobs: []int{32}, + ExpectedRuntimeGangCardinality: []int{32}, }, "simple failure where min cardinality is not met": { SchedulingConfig: testfixtures.TestSchedulingConfig(), @@ -90,9 +90,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 40), ), }, - ExpectedScheduledIndices: nil, - ExpectedScheduledJobs: []int{0}, - ExpectedRuntimeGangCardinality: []int{0}, + ExpectedScheduledIndices: nil, + ExpectedCumulativeScheduledJobs: []int{0}, + ExpectedRuntimeGangCardinality: []int{0}, }, "one success and one failure": { SchedulingConfig: testfixtures.TestSchedulingConfig(), @@ -101,9 +101,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32)), testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 0), - ExpectedScheduledJobs: []int{32, 32}, - ExpectedRuntimeGangCardinality: []int{32, 0}, + ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedCumulativeScheduledJobs: []int{32, 32}, + ExpectedRuntimeGangCardinality: []int{32, 0}, }, "one success and one failure using min cardinality": { SchedulingConfig: testfixtures.TestSchedulingConfig(), @@ -115,9 +115,9 @@ func TestGangScheduler(t *testing.T) { ), testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 0), - ExpectedScheduledJobs: []int{32, 32}, - ExpectedRuntimeGangCardinality: []int{32, 0}, + ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedCumulativeScheduledJobs: []int{32, 32}, + ExpectedRuntimeGangCardinality: []int{32, 0}, }, "multiple nodes": { SchedulingConfig: testfixtures.TestSchedulingConfig(), @@ -125,9 +125,9 @@ func TestGangScheduler(t *testing.T) { Gangs: [][]*jobdb.Job{ testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 64)), }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 0), - ExpectedScheduledJobs: []int{64}, - ExpectedRuntimeGangCardinality: []int{64}, + ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedCumulativeScheduledJobs: []int{64}, + ExpectedRuntimeGangCardinality: []int{64}, }, "MaximumResourceFractionToSchedule": { SchedulingConfig: testfixtures.WithRoundLimitsConfig( @@ -140,9 +140,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 16)), testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 8)), }, - ExpectedScheduledIndices: []int{0, 1}, - ExpectedScheduledJobs: []int{8, 24, 24}, - ExpectedRuntimeGangCardinality: []int{8, 16, 0}, + ExpectedScheduledIndices: []int{0, 1}, + ExpectedCumulativeScheduledJobs: []int{8, 24, 24}, + ExpectedRuntimeGangCardinality: []int{8, 16, 0}, }, "MaximumResourceFractionToScheduleByPool": { SchedulingConfig: testfixtures.WithRoundLimitsConfig( @@ -160,9 +160,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), }, - ExpectedScheduledIndices: []int{0, 1, 2}, - ExpectedScheduledJobs: []int{1, 2, 3, 3, 3}, - ExpectedRuntimeGangCardinality: []int{1, 1, 1, 0, 0}, + ExpectedScheduledIndices: []int{0, 1, 2}, + ExpectedCumulativeScheduledJobs: []int{1, 2, 3, 3, 3}, + ExpectedRuntimeGangCardinality: []int{1, 1, 1, 0, 0}, }, "MaximumResourceFractionToScheduleByPool non-existing pool": { SchedulingConfig: testfixtures.WithRoundLimitsConfig( @@ -180,9 +180,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), }, - ExpectedScheduledIndices: []int{0, 1, 2, 3}, - ExpectedScheduledJobs: []int{1, 2, 3, 4, 4}, - ExpectedRuntimeGangCardinality: []int{1, 1, 1, 1, 0}, + ExpectedScheduledIndices: []int{0, 1, 2, 3}, + ExpectedCumulativeScheduledJobs: []int{1, 2, 3, 4, 4}, + ExpectedRuntimeGangCardinality: []int{1, 1, 1, 1, 0}, }, "MaximumResourceFractionPerQueue": { SchedulingConfig: testfixtures.WithPerPriorityLimitsConfig( @@ -205,9 +205,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass3, 4)), testfixtures.WithGangAnnotationsJobs(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass3, 5)), }, - ExpectedScheduledIndices: []int{0, 2, 4, 6}, - ExpectedScheduledJobs: []int{1, 1, 3, 3, 6, 6, 10, 10}, - ExpectedRuntimeGangCardinality: []int{1, 0, 2, 0, 3, 0, 4, 0}, + ExpectedScheduledIndices: []int{0, 2, 4, 6}, + ExpectedCumulativeScheduledJobs: []int{1, 1, 3, 3, 6, 6, 10, 10}, + ExpectedRuntimeGangCardinality: []int{1, 0, 2, 0, 3, 0, 4, 0}, }, "resolution has no impact on jobs of size a multiple of the resolution": { SchedulingConfig: testfixtures.WithIndexedResourcesConfig( @@ -226,9 +226,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 5), - ExpectedScheduledJobs: testfixtures.IntRange(1, 6), - ExpectedRuntimeGangCardinality: []int{1, 1, 1, 1, 1, 1}, + ExpectedScheduledIndices: testfixtures.IntRange(0, 5), + ExpectedCumulativeScheduledJobs: testfixtures.IntRange(1, 6), + ExpectedRuntimeGangCardinality: []int{1, 1, 1, 1, 1, 1}, }, "jobs of size not a multiple of the resolution blocks scheduling new jobs": { SchedulingConfig: testfixtures.WithIndexedResourcesConfig( @@ -245,9 +245,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1)), }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 2), - ExpectedScheduledJobs: []int{1, 2, 3, 3}, - ExpectedRuntimeGangCardinality: []int{1, 1, 1, 0}, + ExpectedScheduledIndices: testfixtures.IntRange(0, 2), + ExpectedCumulativeScheduledJobs: []int{1, 2, 3, 3}, + ExpectedRuntimeGangCardinality: []int{1, 1, 1, 0}, }, "consider all nodes in the bucket": { SchedulingConfig: testfixtures.WithIndexedResourcesConfig( @@ -283,9 +283,9 @@ func TestGangScheduler(t *testing.T) { Gangs: [][]*jobdb.Job{ testfixtures.WithGangAnnotationsJobs(testfixtures.N1GpuJobs("A", testfixtures.PriorityClass0, 1)), }, - ExpectedScheduledIndices: testfixtures.IntRange(0, 0), - ExpectedScheduledJobs: []int{1}, - ExpectedRuntimeGangCardinality: []int{1}, + ExpectedScheduledIndices: testfixtures.IntRange(0, 0), + ExpectedCumulativeScheduledJobs: []int{1}, + ExpectedRuntimeGangCardinality: []int{1}, }, "NodeUniformityLabel set but not indexed": { SchedulingConfig: testfixtures.TestSchedulingConfig(), @@ -300,9 +300,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), )), }, - ExpectedScheduledIndices: nil, - ExpectedScheduledJobs: []int{0}, - ExpectedRuntimeGangCardinality: []int{0}, + ExpectedScheduledIndices: nil, + ExpectedCumulativeScheduledJobs: []int{0}, + ExpectedRuntimeGangCardinality: []int{0}, }, "NodeUniformityLabel not set": { SchedulingConfig: testfixtures.WithIndexedNodeLabelsConfig( @@ -317,9 +317,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 1), )), }, - ExpectedScheduledIndices: nil, - ExpectedScheduledJobs: []int{0}, - ExpectedRuntimeGangCardinality: []int{0}, + ExpectedScheduledIndices: nil, + ExpectedCumulativeScheduledJobs: []int{0}, + ExpectedRuntimeGangCardinality: []int{0}, }, "NodeUniformityLabel insufficient capacity": { SchedulingConfig: testfixtures.WithIndexedNodeLabelsConfig( @@ -341,9 +341,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.WithNodeUniformityLabelAnnotationJobs("foo", testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 3)), ), }, - ExpectedScheduledIndices: nil, - ExpectedScheduledJobs: []int{0}, - ExpectedRuntimeGangCardinality: []int{0}, + ExpectedScheduledIndices: nil, + ExpectedCumulativeScheduledJobs: []int{0}, + ExpectedRuntimeGangCardinality: []int{0}, }, "NodeUniformityLabel": { SchedulingConfig: testfixtures.WithIndexedNodeLabelsConfig( @@ -379,9 +379,9 @@ func TestGangScheduler(t *testing.T) { testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 4), )), }, - ExpectedScheduledIndices: []int{0}, - ExpectedScheduledJobs: []int{4}, - ExpectedRuntimeGangCardinality: []int{4}, + ExpectedScheduledIndices: []int{0}, + ExpectedCumulativeScheduledJobs: []int{4}, + ExpectedRuntimeGangCardinality: []int{4}, }, "NodeUniformityLabel NumScheduled tiebreak": { SchedulingConfig: testfixtures.WithIndexedNodeLabelsConfig( @@ -407,10 +407,10 @@ func TestGangScheduler(t *testing.T) { ), ), }, - ExpectedScheduledIndices: []int{0}, - ExpectedScheduledJobs: []int{3}, - ExpectedNodeUniformity: map[int]string{0: "b"}, - ExpectedRuntimeGangCardinality: []int{3}, + ExpectedScheduledIndices: []int{0}, + ExpectedCumulativeScheduledJobs: []int{3}, + ExpectedNodeUniformity: map[int]string{0: "b"}, + ExpectedRuntimeGangCardinality: []int{3}, }, "NodeUniformityLabel PreemptedAtPriority tiebreak": { SchedulingConfig: testfixtures.WithIndexedNodeLabelsConfig( @@ -444,10 +444,10 @@ func TestGangScheduler(t *testing.T) { ), ), }, - ExpectedScheduledIndices: []int{0}, - ExpectedScheduledJobs: []int{2}, - ExpectedNodeUniformity: map[int]string{0: "b"}, - ExpectedRuntimeGangCardinality: []int{2}, + ExpectedScheduledIndices: []int{0}, + ExpectedCumulativeScheduledJobs: []int{2}, + ExpectedNodeUniformity: map[int]string{0: "b"}, + ExpectedRuntimeGangCardinality: []int{2}, }, "AwayNodeTypes": { SchedulingConfig: func() configuration.SchedulingConfig { @@ -513,9 +513,56 @@ func TestGangScheduler(t *testing.T) { gangs = append(gangs, []*jobdb.Job{testfixtures.TestJob("A", jobId, "armada-preemptible-away-both", testfixtures.Test1Cpu4GiPodReqs("A", jobId, 30000))}) return }(), - ExpectedScheduledIndices: []int{1}, - ExpectedScheduledJobs: []int{0, 1}, - ExpectedRuntimeGangCardinality: []int{0, 1}, + ExpectedScheduledIndices: []int{1}, + ExpectedCumulativeScheduledJobs: []int{0, 1}, + ExpectedRuntimeGangCardinality: []int{0, 1}, + }, + "Home-away scheduling": { + SchedulingConfig: func() configuration.SchedulingConfig { + config := testfixtures.TestSchedulingConfig() + config.Preemption.PriorityClasses = map[string]types.PriorityClass{ + "armada-preemptible": { + Priority: 30000, + Preemptible: true, + }, + "armada-preemptible-away": { + Priority: 30000, + Preemptible: true, + AwayNodeTypes: []types.AwayNodeType{ + {Priority: 29000, WellKnownNodeTypeName: "node-type-a"}, + }, + }, + } + config.Preemption.DefaultPriorityClass = "armada-preemptible" + config.WellKnownNodeTypes = []configuration.WellKnownNodeType{ + { + Name: "node-type-a", + Taints: []v1.Taint{ + {Key: "taint-a", Value: "true", Effect: v1.TaintEffectNoSchedule}, + }, + }, + } + return config + }(), + Nodes: func() []*schedulerobjects.Node { + nodes := testfixtures.N32CpuNodes(1, []int32{29000, 30000}) + for _, node := range nodes { + node.Taints = []v1.Taint{ + {Key: "taint-a", Value: "true", Effect: v1.TaintEffectNoSchedule}, + } + } + return nodes + }(), + Gangs: func() (gangs [][]*jobdb.Job) { + jobId := util.ULID() + gangs = append(gangs, []*jobdb.Job{testfixtures.TestJob("A", jobId, "armada-preemptible-away", testfixtures.Test32Cpu256GiPodReqs("A", jobId, 30000))}) + jobId = util.ULID() + gangs = append(gangs, []*jobdb.Job{testfixtures.TestJob("A", jobId, "armada-preemptible-away", testfixtures.Test32Cpu256GiPodReqs("A", jobId, 30000))}) + return + }(), + ExpectedScheduledIndices: []int{0}, + ExpectedCumulativeScheduledJobs: []int{1, 1}, + ExpectedRuntimeGangCardinality: []int{1, 0}, }, } for name, tc := range tests { @@ -579,11 +626,12 @@ func TestGangScheduler(t *testing.T) { ) require.NoError(t, err) } - constraints := schedulerconstraints.SchedulingConstraintsFromSchedulingConfig( + constraints := schedulerconstraints.NewSchedulingConstraints( "pool", tc.TotalResources, schedulerobjects.ResourceList{Resources: tc.MinimumJobSize}, tc.SchedulingConfig, + nil, ) sch, err := NewGangScheduler(sctx, constraints, nodeDb) require.NoError(t, err) @@ -640,7 +688,7 @@ func TestGangScheduler(t *testing.T) { // Verify accounting scheduledGangs++ require.Equal(t, scheduledGangs, sch.schedulingContext.NumScheduledGangs) - require.Equal(t, tc.ExpectedScheduledJobs[i], sch.schedulingContext.NumScheduledJobs) + require.Equal(t, tc.ExpectedCumulativeScheduledJobs[i], sch.schedulingContext.NumScheduledJobs) require.Equal(t, 0, sch.schedulingContext.NumEvictedJobs) } else { require.NotEmpty(t, reason) @@ -654,7 +702,7 @@ func TestGangScheduler(t *testing.T) { // Verify accounting require.Equal(t, scheduledGangs, sch.schedulingContext.NumScheduledGangs) - require.Equal(t, tc.ExpectedScheduledJobs[i], sch.schedulingContext.NumScheduledJobs) + require.Equal(t, tc.ExpectedCumulativeScheduledJobs[i], sch.schedulingContext.NumScheduledJobs) require.Equal(t, 0, sch.schedulingContext.NumEvictedJobs) } } diff --git a/internal/scheduler/metrics.go b/internal/scheduler/metrics.go index 3fc95b1b26f..48bf523707a 100644 --- a/internal/scheduler/metrics.go +++ b/internal/scheduler/metrics.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "k8s.io/apimachinery/pkg/util/clock" + "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/logging" commonmetrics "github.com/armadaproject/armada/internal/common/metrics" @@ -49,7 +50,7 @@ func (m metricProvider) GetRunningJobMetrics(queueName string) []*commonmetrics. // The metrics themselves are calculated asynchronously every refreshPeriod type MetricsCollector struct { jobDb *jobdb.JobDb - queueRepository database.QueueRepository + queueRepository repository.QueueRepository executorRepository database.ExecutorRepository poolAssigner PoolAssigner refreshPeriod time.Duration @@ -59,7 +60,7 @@ type MetricsCollector struct { func NewMetricsCollector( jobDb *jobdb.JobDb, - queueRepository database.QueueRepository, + queueRepository repository.QueueRepository, executorRepository database.ExecutorRepository, poolAssigner PoolAssigner, refreshPeriod time.Duration, diff --git a/internal/scheduler/metrics/metrics.go b/internal/scheduler/metrics/metrics.go index c6875fa9659..d3a7b046db8 100644 --- a/internal/scheduler/metrics/metrics.go +++ b/internal/scheduler/metrics/metrics.go @@ -8,6 +8,8 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/scheduler/configuration" @@ -20,8 +22,6 @@ const ( namespace = "armada" subsystem = "scheduler" - jobsResourceLabel = "jobs" - podUnschedulable = "podUnschedulable" leaseExpired = "leaseExpired" podError = "podError" @@ -39,6 +39,10 @@ const ( succeeded = "succeeded" ) +// A valid metric name contains only: letters, digits(not as the first character), underscores, and colons. +// validated by the following regex +var metricNameValidationRegex = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + type Metrics struct { config configuration.MetricsConfig @@ -58,9 +62,8 @@ type Metrics struct { // Messages that match no regex map to -1. matchedRegexIndexByErrorMessage *lru.Cache - // Job metrics. - transitions *prometheus.CounterVec - resourceSeconds *prometheus.CounterVec + // Map from resource name to the counter and counterSeconds Vecs for that resource. + resourceCounters map[v1.ResourceName]*prometheus.CounterVec } func New(config configuration.MetricsConfig) (*Metrics, error) { @@ -93,24 +96,7 @@ func New(config configuration.MetricsConfig) (*Metrics, error) { errorRegexes: errorRegexes, matchedRegexIndexByErrorMessage: matchedRegexIndexByError, - transitions: prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "resources_total", - Help: "Job state transition resource counters.", - }, - []string{"state", "category", "subCategory", "queue", "cluster", "nodeType", "node", "resource"}, - ), - resourceSeconds: prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "resource_seconds_total", - Help: "Job state transition resource-second counters.", - }, - []string{"priorState", "state", "category", "subCategory", "queue", "cluster", "nodeType", "node", "resource"}, - ), + resourceCounters: make(map[v1.ResourceName]*prometheus.CounterVec), }, nil } @@ -140,8 +126,9 @@ func (m *Metrics) Describe(ch chan<- *prometheus.Desc) { if m.IsDisabled() { return } - m.transitions.Describe(ch) - m.resourceSeconds.Describe(ch) + for _, metric := range m.resourceCounters { + metric.Describe(ch) + } } // Collect and then reset all metrics. @@ -150,15 +137,15 @@ func (m *Metrics) Collect(ch chan<- prometheus.Metric) { if m.IsDisabled() { return } - m.transitions.Collect(ch) - m.resourceSeconds.Collect(ch) - + for _, metric := range m.resourceCounters { + metric.Collect(ch) + } // Reset metrics periodically. t := time.Now() if t.Sub(m.timeOfMostRecentReset) > m.resetInterval { - m.transitions.Reset() - m.resourceSeconds.Reset() - + for _, metric := range m.resourceCounters { + metric.Reset() + } m.timeOfMostRecentReset = t } } @@ -228,46 +215,34 @@ func (m *Metrics) UpdateQueued(job *jobdb.Job) error { labels = append(labels, "") // No category for queued. labels = append(labels, "") // No subCategory for queued. labels = appendLabelsFromJob(labels, job) - if err := m.updateCounterVecFromJob(m.transitions, labels[1:], job); err != nil { - return err - } - return nil + + return m.updateMetrics(labels, job, 0) } func (m *Metrics) UpdatePending(job *jobdb.Job) error { latestRun := job.LatestRun() - priorState, priorStateTime := getPriorState(job, latestRun, latestRun.PendingTime()) + duration, priorState := stateDuration(job, latestRun, latestRun.PendingTime()) labels := m.buffer[0:0] labels = append(labels, priorState) labels = append(labels, pending) labels = append(labels, "") // No category for pending. labels = append(labels, "") // No subCategory for pending. labels = appendLabelsFromJob(labels, job) - if err := m.updateResourceSecondsCounterVec(m.resourceSeconds, labels, job, latestRun.PendingTime(), priorStateTime); err != nil { - return err - } - if err := m.updateCounterVecFromJob(m.transitions, labels[1:], job); err != nil { - return err - } - return nil + + return m.updateMetrics(labels, job, duration) } func (m *Metrics) UpdateCancelled(job *jobdb.Job) error { latestRun := job.LatestRun() - priorState, priorStateTime := getPriorState(job, latestRun, latestRun.TerminatedTime()) + duration, priorState := stateDuration(job, latestRun, latestRun.TerminatedTime()) labels := m.buffer[0:0] labels = append(labels, priorState) labels = append(labels, cancelled) labels = append(labels, "") // No category for cancelled. labels = append(labels, "") // No subCategory for cancelled. labels = appendLabelsFromJob(labels, job) - if err := m.updateResourceSecondsCounterVec(m.resourceSeconds, labels, job, latestRun.TerminatedTime(), priorStateTime); err != nil { - return err - } - if err := m.updateCounterVecFromJob(m.transitions, labels[1:], job); err != nil { - return err - } - return nil + + return m.updateMetrics(labels, job, duration) } func (m *Metrics) UpdateFailed(ctx *armadacontext.Context, job *jobdb.Job, jobRunErrorsByRunId map[uuid.UUID]*armadaevents.Error) error { @@ -279,93 +254,68 @@ func (m *Metrics) UpdateFailed(ctx *armadacontext.Context, job *jobdb.Job, jobRu return m.UpdatePreempted(job) } latestRun := job.LatestRun() - priorState, priorStateTime := getPriorState(job, latestRun, latestRun.TerminatedTime()) + duration, priorState := stateDuration(job, latestRun, latestRun.TerminatedTime()) labels := m.buffer[0:0] labels = append(labels, priorState) labels = append(labels, failed) labels = append(labels, category) labels = append(labels, subCategory) labels = appendLabelsFromJob(labels, job) - if err := m.updateResourceSecondsCounterVec(m.resourceSeconds, labels, job, latestRun.TerminatedTime(), priorStateTime); err != nil { - return err - } - if err := m.updateCounterVecFromJob(m.transitions, labels[1:], job); err != nil { - return err - } - return nil + + return m.updateMetrics(labels, job, duration) } func (m *Metrics) UpdateSucceeded(job *jobdb.Job) error { labels := m.buffer[0:0] latestRun := job.LatestRun() - priorState, priorStateTime := getPriorState(job, latestRun, latestRun.TerminatedTime()) + duration, priorState := stateDuration(job, latestRun, latestRun.TerminatedTime()) labels = append(labels, priorState) labels = append(labels, succeeded) labels = append(labels, "") // No category for succeeded. labels = append(labels, "") // No subCategory for succeeded. labels = appendLabelsFromJob(labels, job) - if err := m.updateResourceSecondsCounterVec(m.resourceSeconds, labels, job, latestRun.TerminatedTime(), priorStateTime); err != nil { - return err - } - if err := m.updateCounterVecFromJob(m.transitions, labels[1:], job); err != nil { - return err - } - return nil + + return m.updateMetrics(labels, job, duration) } func (m *Metrics) UpdateLeased(jctx *schedulercontext.JobSchedulingContext) error { job := jctx.Job.(*jobdb.Job) latestRun := job.LatestRun() - priorState, priorStateTime := getPriorState(job, latestRun, &jctx.Created) + duration, priorState := stateDuration(job, latestRun, &jctx.Created) labels := m.buffer[0:0] labels = append(labels, priorState) labels = append(labels, leased) labels = append(labels, "") // No category for leased. labels = append(labels, "") // No subCategory for leased. labels = appendLabelsFromJobSchedulingContext(labels, jctx) - if err := m.updateResourceSecondsCounterVec(m.resourceSeconds, labels, job, &jctx.Created, priorStateTime); err != nil { - return err - } - if err := m.updateCounterVecFromJob(m.transitions, labels[1:], job); err != nil { - return err - } - return nil + + return m.updateMetrics(labels, job, duration) } func (m *Metrics) UpdatePreempted(job *jobdb.Job) error { latestRun := job.LatestRun() - priorState, priorStateTime := getPriorState(job, latestRun, latestRun.PreemptedTime()) + duration, priorState := stateDuration(job, latestRun, latestRun.PreemptedTime()) labels := m.buffer[0:0] labels = append(labels, priorState) labels = append(labels, preempted) labels = append(labels, "") // No category for preempted. labels = append(labels, "") // No subCategory for preempted. labels = appendLabelsFromJob(labels, job) - if err := m.updateResourceSecondsCounterVec(m.resourceSeconds, labels, job, latestRun.PreemptedTime(), priorStateTime); err != nil { - return err - } - if err := m.updateCounterVecFromJob(m.transitions, labels[1:], job); err != nil { - return err - } - return nil + + return m.updateMetrics(labels, job, duration) } func (m *Metrics) UpdateRunning(job *jobdb.Job) error { latestRun := job.LatestRun() - priorState, priorStateTime := getPriorState(job, latestRun, latestRun.RunningTime()) + duration, priorState := stateDuration(job, latestRun, latestRun.RunningTime()) labels := m.buffer[0:0] labels = append(labels, priorState) labels = append(labels, running) labels = append(labels, "") // No category for running. labels = append(labels, "") // No subCategory for running. labels = appendLabelsFromJob(labels, job) - if err := m.updateResourceSecondsCounterVec(m.resourceSeconds, labels, job, latestRun.RunningTime(), priorStateTime); err != nil { - return err - } - if err := m.updateCounterVecFromJob(m.transitions, labels[1:], job); err != nil { - return err - } - return nil + + return m.updateMetrics(labels, job, duration) } func (m *Metrics) failedCategoryAndSubCategoryFromJob(ctx *armadacontext.Context, job *jobdb.Job, jobRunErrorsByRunId map[uuid.UUID]*armadaevents.Error) (category, subCategory string) { @@ -477,72 +427,91 @@ func errorTypeAndMessageFromError(ctx *armadacontext.Context, err *armadaevents. } } -// updateCounterVecFromJob is a helper method to increment vector counters. -func (m *Metrics) updateCounterVecFromJob(vec *prometheus.CounterVec, labels []string, job *jobdb.Job) error { - i := len(labels) - - // Number of jobs. - labels = append(labels, jobsResourceLabel) - if c, err := vec.GetMetricWithLabelValues(labels...); err != nil { +func (m *Metrics) updateMetrics(labels []string, job *jobdb.Job, stateDuration time.Duration) error { + // update jobs and jobs-seconds metrics + jobs, jobsSeconds := m.counterVectorsFromResource(v1.ResourceName("jobs")) + if c, err := jobs.GetMetricWithLabelValues(labels[1:]...); err != nil { // we don't need priorState label here return err } else { c.Add(1) } - - // Total resource requests of jobs. - requests := job.GetResourceRequirements().Requests - for _, resourceName := range m.config.TrackedResourceNames { - labels[i] = string(resourceName) - if c, err := vec.GetMetricWithLabelValues(labels...); err != nil { - return err - } else { - q := requests[resourceName] - v := float64(q.MilliValue()) / 1000 - c.Add(v) - } - } - - return nil -} - -// updateResourceSecondsCounterVec is a helper method to increment vector counters by the number of seconds per resource a jobs has consumed in a given state. -func (m *Metrics) updateResourceSecondsCounterVec(vec *prometheus.CounterVec, labels []string, job *jobdb.Job, stateTime, priorStateTime *time.Time) error { - if stateTime == nil || priorStateTime == nil { - return nil - } - i := len(labels) - // Number of jobs. - labels = append(labels, jobsResourceLabel) - stateDuration := stateDuration(priorStateTime, stateTime).Seconds() - if c, err := vec.GetMetricWithLabelValues(labels...); err != nil { + if c, err := jobsSeconds.GetMetricWithLabelValues(labels...); err != nil { return err } else { - c.Add(stateDuration) + c.Add(stateDuration.Seconds()) } + requests := job.GetResourceRequirements().Requests - for _, resourceName := range m.config.TrackedResourceNames { - labels[i] = string(resourceName) - if c, err := vec.GetMetricWithLabelValues(labels...); err != nil { + for _, resource := range m.config.TrackedResourceNames { + if r, ok := m.config.ResourceRenaming[resource]; ok { + resource = v1.ResourceName(r) + } + if !metricNameValidationRegex.MatchString(resource.String()) { + logrus.Warnf("Resource name is not valid for a metric name: %s", resource) + continue + } + metric, metricSeconds := m.counterVectorsFromResource(resource) + if metric == nil || metricSeconds == nil { + continue + } + c, err := metric.GetMetricWithLabelValues(labels[1:]...) // we don't need priorState label here + if err != nil { + return err + } + cSeconds, err := metricSeconds.GetMetricWithLabelValues(labels...) + if err != nil { return err - } else { - q := requests[resourceName] - v := float64(q.MilliValue()) / 1000 - c.Add(v * stateDuration) } + q := requests[resource] + v := float64(q.MilliValue()) / 1000 + c.Add(v) + cSeconds.Add(v * stateDuration.Seconds()) } return nil } -func stateDuration(start *time.Time, end *time.Time) time.Duration { - if start != nil && end != nil { - return end.Sub(*start) +// counterVectorsFromResource returns the counter and counterSeconds Vectors for the given resource name. +// If the counter and counterSeconds Vecs do not exist, they are created and stored in the resourceCounters map. +func (m *Metrics) counterVectorsFromResource(resource v1.ResourceName) (*prometheus.CounterVec, *prometheus.CounterVec) { + c, ok := m.resourceCounters[resource] + if !ok { + name := resource.String() + "_total" + c = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: resource.String() + "resource counter.", + }, + []string{"state", "category", "subCategory", "queue", "cluster", "nodeType", "node"}, + ) + m.resourceCounters[resource] = c } - return time.Duration(0) + + resourceSeconds := v1.ResourceName(resource.String() + "_seconds") + cSeconds, ok := m.resourceCounters[resourceSeconds] + if !ok { + name := resourceSeconds.String() + "_total" + cSeconds = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: resource.String() + "-second resource counter.", + }, + []string{"priorState", "state", "category", "subCategory", "queue", "cluster", "nodeType", "node"}, + ) + m.resourceCounters[resourceSeconds] = cSeconds + } + return c, cSeconds } -func getPriorState(job *jobdb.Job, run *jobdb.JobRun, stateTime *time.Time) (string, *time.Time) { +// stateDuration returns: +// - the duration of the current state (stateTime - priorTime) +// - the prior state name +func stateDuration(job *jobdb.Job, run *jobdb.JobRun, stateTime *time.Time) (time.Duration, string) { if stateTime == nil { - return "", nil + return 0, "" } queuedTime := time.Unix(0, job.Created()) @@ -571,5 +540,6 @@ func getPriorState(job *jobdb.Job, run *jobdb.JobRun, stateTime *time.Time) (str } } // succeeded, failed, cancelled, preempted are not prior states - return prior, priorTime + + return stateTime.Sub(*priorTime), prior } diff --git a/internal/scheduler/metrics_test.go b/internal/scheduler/metrics_test.go index e0a8bb59811..37b7df00e44 100644 --- a/internal/scheduler/metrics_test.go +++ b/internal/scheduler/metrics_test.go @@ -4,6 +4,8 @@ import ( "testing" "time" + "github.com/armadaproject/armada/pkg/client/queue" + "github.com/golang/mock/gomock" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" @@ -13,7 +15,6 @@ import ( "github.com/armadaproject/armada/internal/common/armadacontext" commonmetrics "github.com/armadaproject/armada/internal/common/metrics" - "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/jobdb" schedulermocks "github.com/armadaproject/armada/internal/scheduler/mocks" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" @@ -34,12 +35,12 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { initialJobs []*jobdb.Job defaultPool string poolMappings map[string]string - queues []*database.Queue + queues []queue.Queue expected []prometheus.Metric }{ "queued metrics": { initialJobs: queuedJobs, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, defaultPool: testfixtures.TestPool, expected: []prometheus.Metric{ commonmetrics.NewQueueSizeMetric(3.0, testfixtures.TestQueue), @@ -63,7 +64,7 @@ func TestMetricsCollector_TestCollect_QueueMetrics(t *testing.T) { }, "running metrics": { initialJobs: runningJobs, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, defaultPool: testfixtures.TestPool, expected: []prometheus.Metric{ commonmetrics.NewQueueSizeMetric(0.0, testfixtures.TestQueue), @@ -247,7 +248,7 @@ func TestMetricsCollector_TestCollect_ClusterMetrics(t *testing.T) { txn.Commit() queueRepository := schedulermocks.NewMockQueueRepository(ctrl) - queueRepository.EXPECT().GetAllQueues().Return([]*database.Queue{}, nil).Times(1) + queueRepository.EXPECT().GetAllQueues().Return([]queue.Queue{}, nil).Times(1) poolAssigner := &MockPoolAssigner{testfixtures.TestPool, map[string]string{}} executorRepository := schedulermocks.NewMockExecutorRepository(ctrl) diff --git a/internal/scheduler/mocks/executor_repository.go b/internal/scheduler/mocks/executor_repository.go new file mode 100644 index 00000000000..2dbc2e40542 --- /dev/null +++ b/internal/scheduler/mocks/executor_repository.go @@ -0,0 +1,81 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/armadaproject/armada/internal/scheduler/database (interfaces: ExecutorRepository) + +// Package schedulermocks is a generated GoMock package. +package schedulermocks + +import ( + reflect "reflect" + time "time" + + armadacontext "github.com/armadaproject/armada/internal/common/armadacontext" + schedulerobjects "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + gomock "github.com/golang/mock/gomock" +) + +// MockExecutorRepository is a mock of ExecutorRepository interface. +type MockExecutorRepository struct { + ctrl *gomock.Controller + recorder *MockExecutorRepositoryMockRecorder +} + +// MockExecutorRepositoryMockRecorder is the mock recorder for MockExecutorRepository. +type MockExecutorRepositoryMockRecorder struct { + mock *MockExecutorRepository +} + +// NewMockExecutorRepository creates a new mock instance. +func NewMockExecutorRepository(ctrl *gomock.Controller) *MockExecutorRepository { + mock := &MockExecutorRepository{ctrl: ctrl} + mock.recorder = &MockExecutorRepositoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockExecutorRepository) EXPECT() *MockExecutorRepositoryMockRecorder { + return m.recorder +} + +// GetExecutors mocks base method. +func (m *MockExecutorRepository) GetExecutors(arg0 *armadacontext.Context) ([]*schedulerobjects.Executor, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetExecutors", arg0) + ret0, _ := ret[0].([]*schedulerobjects.Executor) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetExecutors indicates an expected call of GetExecutors. +func (mr *MockExecutorRepositoryMockRecorder) GetExecutors(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExecutors", reflect.TypeOf((*MockExecutorRepository)(nil).GetExecutors), arg0) +} + +// GetLastUpdateTimes mocks base method. +func (m *MockExecutorRepository) GetLastUpdateTimes(arg0 *armadacontext.Context) (map[string]time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLastUpdateTimes", arg0) + ret0, _ := ret[0].(map[string]time.Time) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLastUpdateTimes indicates an expected call of GetLastUpdateTimes. +func (mr *MockExecutorRepositoryMockRecorder) GetLastUpdateTimes(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastUpdateTimes", reflect.TypeOf((*MockExecutorRepository)(nil).GetLastUpdateTimes), arg0) +} + +// StoreExecutor mocks base method. +func (m *MockExecutorRepository) StoreExecutor(arg0 *armadacontext.Context, arg1 *schedulerobjects.Executor) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StoreExecutor", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// StoreExecutor indicates an expected call of StoreExecutor. +func (mr *MockExecutorRepositoryMockRecorder) StoreExecutor(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreExecutor", reflect.TypeOf((*MockExecutorRepository)(nil).StoreExecutor), arg0, arg1) +} diff --git a/internal/scheduler/mocks/generate.go b/internal/scheduler/mocks/generate.go index 3e310b318a6..f6e54d48b9b 100644 --- a/internal/scheduler/mocks/generate.go +++ b/internal/scheduler/mocks/generate.go @@ -1,6 +1,8 @@ package schedulermocks // Mock implementations used by scheduler tests -//go:generate mockgen -destination=./mock_leases_getter.go -package=schedulermocks "k8s.io/client-go/kubernetes/typed/coordination/v1" LeasesGetter,LeaseInterface -//go:generate mockgen -destination=./mock_repositories.go -package=schedulermocks "github.com/armadaproject/armada/internal/scheduler/database" ExecutorRepository,QueueRepository,JobRepository -//go:generate mockgen -destination=./mock_grpc.go -package=schedulermocks "github.com/armadaproject/armada/pkg/executorapi" ExecutorApi_LeaseJobRunsServer +//go:generate mockgen -destination=./leases_getter.go -package=schedulermocks "k8s.io/client-go/kubernetes/typed/coordination/v1" LeasesGetter,LeaseInterface +//go:generate mockgen -destination=./job_repository.go -package=schedulermocks "github.com/armadaproject/armada/internal/scheduler/database" JobRepository +//go:generate mockgen -destination=./executor_repository.go -package=schedulermocks "github.com/armadaproject/armada/internal/scheduler/database" ExecutorRepository +//go:generate mockgen -destination=./queue_repository.go -package=schedulermocks "github.com/armadaproject/armada/internal/armada/repository" QueueRepository +//go:generate mockgen -destination=./grpc.go -package=schedulermocks "github.com/armadaproject/armada/pkg/executorapi" ExecutorApi_LeaseJobRunsServer diff --git a/internal/scheduler/mocks/mock_grpc.go b/internal/scheduler/mocks/grpc.go similarity index 100% rename from internal/scheduler/mocks/mock_grpc.go rename to internal/scheduler/mocks/grpc.go diff --git a/internal/scheduler/mocks/mock_repositories.go b/internal/scheduler/mocks/job_repository.go similarity index 53% rename from internal/scheduler/mocks/mock_repositories.go rename to internal/scheduler/mocks/job_repository.go index c2924402b9b..c5cbb4abb3f 100644 --- a/internal/scheduler/mocks/mock_repositories.go +++ b/internal/scheduler/mocks/job_repository.go @@ -1,126 +1,19 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/armadaproject/armada/internal/scheduler/database (interfaces: ExecutorRepository,QueueRepository,JobRepository) +// Source: github.com/armadaproject/armada/internal/scheduler/database (interfaces: JobRepository) // Package schedulermocks is a generated GoMock package. package schedulermocks import ( reflect "reflect" - time "time" armadacontext "github.com/armadaproject/armada/internal/common/armadacontext" database "github.com/armadaproject/armada/internal/scheduler/database" - schedulerobjects "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" armadaevents "github.com/armadaproject/armada/pkg/armadaevents" gomock "github.com/golang/mock/gomock" uuid "github.com/google/uuid" ) -// MockExecutorRepository is a mock of ExecutorRepository interface. -type MockExecutorRepository struct { - ctrl *gomock.Controller - recorder *MockExecutorRepositoryMockRecorder -} - -// MockExecutorRepositoryMockRecorder is the mock recorder for MockExecutorRepository. -type MockExecutorRepositoryMockRecorder struct { - mock *MockExecutorRepository -} - -// NewMockExecutorRepository creates a new mock instance. -func NewMockExecutorRepository(ctrl *gomock.Controller) *MockExecutorRepository { - mock := &MockExecutorRepository{ctrl: ctrl} - mock.recorder = &MockExecutorRepositoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockExecutorRepository) EXPECT() *MockExecutorRepositoryMockRecorder { - return m.recorder -} - -// GetExecutors mocks base method. -func (m *MockExecutorRepository) GetExecutors(arg0 *armadacontext.Context) ([]*schedulerobjects.Executor, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetExecutors", arg0) - ret0, _ := ret[0].([]*schedulerobjects.Executor) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetExecutors indicates an expected call of GetExecutors. -func (mr *MockExecutorRepositoryMockRecorder) GetExecutors(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExecutors", reflect.TypeOf((*MockExecutorRepository)(nil).GetExecutors), arg0) -} - -// GetLastUpdateTimes mocks base method. -func (m *MockExecutorRepository) GetLastUpdateTimes(arg0 *armadacontext.Context) (map[string]time.Time, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLastUpdateTimes", arg0) - ret0, _ := ret[0].(map[string]time.Time) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetLastUpdateTimes indicates an expected call of GetLastUpdateTimes. -func (mr *MockExecutorRepositoryMockRecorder) GetLastUpdateTimes(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastUpdateTimes", reflect.TypeOf((*MockExecutorRepository)(nil).GetLastUpdateTimes), arg0) -} - -// StoreExecutor mocks base method. -func (m *MockExecutorRepository) StoreExecutor(arg0 *armadacontext.Context, arg1 *schedulerobjects.Executor) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StoreExecutor", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// StoreExecutor indicates an expected call of StoreExecutor. -func (mr *MockExecutorRepositoryMockRecorder) StoreExecutor(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StoreExecutor", reflect.TypeOf((*MockExecutorRepository)(nil).StoreExecutor), arg0, arg1) -} - -// MockQueueRepository is a mock of QueueRepository interface. -type MockQueueRepository struct { - ctrl *gomock.Controller - recorder *MockQueueRepositoryMockRecorder -} - -// MockQueueRepositoryMockRecorder is the mock recorder for MockQueueRepository. -type MockQueueRepositoryMockRecorder struct { - mock *MockQueueRepository -} - -// NewMockQueueRepository creates a new mock instance. -func NewMockQueueRepository(ctrl *gomock.Controller) *MockQueueRepository { - mock := &MockQueueRepository{ctrl: ctrl} - mock.recorder = &MockQueueRepositoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockQueueRepository) EXPECT() *MockQueueRepositoryMockRecorder { - return m.recorder -} - -// GetAllQueues mocks base method. -func (m *MockQueueRepository) GetAllQueues() ([]*database.Queue, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAllQueues") - ret0, _ := ret[0].([]*database.Queue) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAllQueues indicates an expected call of GetAllQueues. -func (mr *MockQueueRepositoryMockRecorder) GetAllQueues() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllQueues", reflect.TypeOf((*MockQueueRepository)(nil).GetAllQueues)) -} - // MockJobRepository is a mock of JobRepository interface. type MockJobRepository struct { ctrl *gomock.Controller diff --git a/internal/scheduler/mocks/mock_leases_getter.go b/internal/scheduler/mocks/leases_getter.go similarity index 100% rename from internal/scheduler/mocks/mock_leases_getter.go rename to internal/scheduler/mocks/leases_getter.go diff --git a/internal/scheduler/mocks/queue_repository.go b/internal/scheduler/mocks/queue_repository.go new file mode 100644 index 00000000000..9e935fcfd9d --- /dev/null +++ b/internal/scheduler/mocks/queue_repository.go @@ -0,0 +1,107 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/armadaproject/armada/internal/armada/repository (interfaces: QueueRepository) + +// Package schedulermocks is a generated GoMock package. +package schedulermocks + +import ( + reflect "reflect" + + queue "github.com/armadaproject/armada/pkg/client/queue" + gomock "github.com/golang/mock/gomock" +) + +// MockQueueRepository is a mock of QueueRepository interface. +type MockQueueRepository struct { + ctrl *gomock.Controller + recorder *MockQueueRepositoryMockRecorder +} + +// MockQueueRepositoryMockRecorder is the mock recorder for MockQueueRepository. +type MockQueueRepositoryMockRecorder struct { + mock *MockQueueRepository +} + +// NewMockQueueRepository creates a new mock instance. +func NewMockQueueRepository(ctrl *gomock.Controller) *MockQueueRepository { + mock := &MockQueueRepository{ctrl: ctrl} + mock.recorder = &MockQueueRepositoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockQueueRepository) EXPECT() *MockQueueRepositoryMockRecorder { + return m.recorder +} + +// CreateQueue mocks base method. +func (m *MockQueueRepository) CreateQueue(arg0 queue.Queue) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateQueue", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateQueue indicates an expected call of CreateQueue. +func (mr *MockQueueRepositoryMockRecorder) CreateQueue(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateQueue", reflect.TypeOf((*MockQueueRepository)(nil).CreateQueue), arg0) +} + +// DeleteQueue mocks base method. +func (m *MockQueueRepository) DeleteQueue(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteQueue", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteQueue indicates an expected call of DeleteQueue. +func (mr *MockQueueRepositoryMockRecorder) DeleteQueue(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteQueue", reflect.TypeOf((*MockQueueRepository)(nil).DeleteQueue), arg0) +} + +// GetAllQueues mocks base method. +func (m *MockQueueRepository) GetAllQueues() ([]queue.Queue, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllQueues") + ret0, _ := ret[0].([]queue.Queue) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllQueues indicates an expected call of GetAllQueues. +func (mr *MockQueueRepositoryMockRecorder) GetAllQueues() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllQueues", reflect.TypeOf((*MockQueueRepository)(nil).GetAllQueues)) +} + +// GetQueue mocks base method. +func (m *MockQueueRepository) GetQueue(arg0 string) (queue.Queue, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetQueue", arg0) + ret0, _ := ret[0].(queue.Queue) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetQueue indicates an expected call of GetQueue. +func (mr *MockQueueRepositoryMockRecorder) GetQueue(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueue", reflect.TypeOf((*MockQueueRepository)(nil).GetQueue), arg0) +} + +// UpdateQueue mocks base method. +func (m *MockQueueRepository) UpdateQueue(arg0 queue.Queue) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateQueue", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateQueue indicates an expected call of UpdateQueue. +func (mr *MockQueueRepositoryMockRecorder) UpdateQueue(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateQueue", reflect.TypeOf((*MockQueueRepository)(nil).UpdateQueue), arg0) +} diff --git a/internal/scheduler/nodedb/encoding.go b/internal/scheduler/nodedb/encoding.go index a5d88716e80..b35b2db9bf9 100644 --- a/internal/scheduler/nodedb/encoding.go +++ b/internal/scheduler/nodedb/encoding.go @@ -8,12 +8,16 @@ import ( "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) -// NodeIndexKey returns a []byte to be used as a key with the NodeIndex memdb index with layout +// NodeIndexKey returns a []byte to be used as a key with the NodeIndex memdb index. +// This key should be used for lookup. Use the rounded version below for inserts. // -// 0 8 16 32 -// | nodeTypeId | resources[0] | resources[1] | ... | +// The layout of the key is: // -// where the numbers indicate number of bytes. +// 0 8 16 32 x x+8 +// | nodeTypeId | resources[0] | resources[1] | ... | nodeIndex | +// +// where the numbers indicate byte index. +// NodeIndex ensures each key is unique and so must be unique across all nodes. // // The key layout is such that an index ordered first by the nodeTypeId, then resources[0], and so on. // The byte representation is appended to out, which is returned. @@ -22,13 +26,24 @@ func NodeIndexKey(out []byte, nodeTypeId uint64, resources []resource.Quantity) for _, q := range resources { out = EncodeQuantity(out, q) } + // Because the key returned by this function should be used with a lower-bound operation on allocatable resources + // we set the nodeIndex to 0. + out = EncodeUint64(out, 0) return out } // RoundedNodeIndexKeyFromResourceList works like NodeIndexKey, except that prior to constructing the key // the i-th resource is rounded down to the closest multiple of resourceResolutionMillis[i]. +// This rounding makes iterating over nodes with at least some amount of available resources more efficient. // It also takes as arguments a list of resource names and a resourceList, instead of a list of resources. -func RoundedNodeIndexKeyFromResourceList(out []byte, nodeTypeId uint64, resourceNames []string, resourceResolutionMillis []int64, rl schedulerobjects.ResourceList) []byte { +func RoundedNodeIndexKeyFromResourceList( + out []byte, + nodeTypeId uint64, + resourceNames []string, + resourceResolutionMillis []int64, + rl schedulerobjects.ResourceList, + nodeIndex uint64, +) []byte { out = EncodeUint64(out, nodeTypeId) for i, name := range resourceNames { resolution := resourceResolutionMillis[i] @@ -36,6 +51,7 @@ func RoundedNodeIndexKeyFromResourceList(out []byte, nodeTypeId uint64, resource q = roundQuantityToResolution(q, resolution) out = EncodeQuantity(out, q) } + out = EncodeUint64(out, nodeIndex) return out } @@ -52,7 +68,7 @@ func EncodeQuantity(out []byte, val resource.Quantity) []byte { return EncodeInt64(out, val.MilliValue()) } -// EncodeInt64 returns the canonical byte representation of a int64 used within the nodeDb. +// EncodeInt64 returns the canonical byte representation of an int64 used within the nodeDb. // The resulting []byte is such that for two int64 a and b, a.Cmp(b) = bytes.Compare(enc(a), enc(b)). // The byte representation is appended to out, which is returned. func EncodeInt64(out []byte, val int64) []byte { @@ -65,6 +81,10 @@ func EncodeInt64(out []byte, val int64) []byte { // becomes the maximum positive uint. scaled := val ^ int64(-1<<(size*8-1)) + // TODO(albin): It's possible (though unlikely) that this shifting causes nodeType clashes, + // since they're computed by hashing labels etc. and so may be big integers. + // This would reduce the efficiency of nodeType indexing but shouldn't affect correctness. + binary.BigEndian.PutUint64(out[len(out)-8:], uint64(scaled)) return out } diff --git a/internal/scheduler/nodedb/encoding_test.go b/internal/scheduler/nodedb/encoding_test.go index 7844712afeb..c6e3a53e7c9 100644 --- a/internal/scheduler/nodedb/encoding_test.go +++ b/internal/scheduler/nodedb/encoding_test.go @@ -99,6 +99,119 @@ func TestEncodeQuantity(t *testing.T) { } } +func TestRoundQuantityToResolution(t *testing.T) { + tests := map[string]struct { + q resource.Quantity + resolutionMillis int64 + expected resource.Quantity + }{ + "1Ki": { + q: resource.MustParse("1Ki"), + resolutionMillis: 1, + expected: resource.MustParse("1Ki"), + }, + "resolution equal to quantity": { + q: resource.MustParse("1Ki"), + resolutionMillis: 1024 * 1000, + expected: resource.MustParse("1Ki"), + }, + "0": { + q: resource.MustParse("0"), + resolutionMillis: 1, + expected: resource.MustParse("0"), + }, + "1m": { + q: resource.MustParse("1m"), + resolutionMillis: 1, + expected: resource.MustParse("1m"), + }, + "1": { + q: resource.MustParse("1"), + resolutionMillis: 1, + expected: resource.MustParse("1"), + }, + "resolution 3": { + q: resource.MustParse("1"), + resolutionMillis: 3, + expected: resource.MustParse("999m"), + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + qc := tc.q.DeepCopy() + actual := roundQuantityToResolution(tc.q, tc.resolutionMillis) + assert.True(t, qc.Equal(tc.q)) + assert.Truef(t, actual.Equal(tc.expected), "expected %s, but got %s", tc.expected.String(), actual.String()) + + qDec := tc.q.DeepCopy() + qDec.ToDec() + qDecCopy := qDec.DeepCopy() + actualDec := roundQuantityToResolution(qDec, tc.resolutionMillis) + assert.True(t, qDecCopy.Equal(qDec)) + assert.Truef(t, actualDec.Equal(tc.expected), "expected %s, but got %s", tc.expected.String(), actual.String()) + }) + } +} + +func TestNodeIndexKeyComparison(t *testing.T) { + v1 := resource.MustParse("1") + actualRoundedKey := RoundedNodeIndexKeyFromResourceList( + nil, + 0, + []string{ + "cpu", + "memory", + "nvidia.com/gpu", + "nvidia.com/mig-1g.10gb", + "nvidia.com/mig-1g.20gb", + "nvidia.com/mig-1g.40gb", + }, + []int64{ + v1.MilliValue(), + v1.MilliValue(), + v1.MilliValue(), + v1.MilliValue(), + v1.MilliValue(), + v1.MilliValue(), + }, + schedulerobjects.ResourceList{ + Resources: map[string]resource.Quantity{ + "cpu": *resource.NewScaledQuantity(999958006, -9), + "memory": *resource.NewScaledQuantity(11823681536, 0), + "nvidia.com/gpu": *resource.NewScaledQuantity(0, 0), + "nvidia.com/mig-1g.10gb": *resource.NewScaledQuantity(0, 0), + "nvidia.com/mig-1g.20gb": *resource.NewScaledQuantity(0, 0), + "nvidia.com/mig-1g.40gb": *resource.NewScaledQuantity(0, 0), + }, + }, + 0, + ) + actualKey := NodeIndexKey( + nil, + 0, + []resource.Quantity{ + *resource.NewScaledQuantity(999958006, -9), + *resource.NewScaledQuantity(11823681536, 0), + *resource.NewScaledQuantity(0, 0), + *resource.NewScaledQuantity(0, 0), + *resource.NewScaledQuantity(0, 0), + *resource.NewScaledQuantity(0, 0), + }, + ) + expected := []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nodeTypeId + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, // cpu + 0x80, 0x00, 0x0a, 0xc0, 0xea, 0x56, 0x80, 0x00, // memory + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nvidia.com.gpu + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nvidia.com/mig-1g.10gb + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nvidia.com/mig-1g.20gb + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nvidia.com/mig-1g.40gb + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // nodeIndex + } + assert.Equal(t, expected, actualRoundedKey) + assert.Equal(t, expected, actualKey) +} + func TestNodeIndexKey(t *testing.T) { type nodeIndexKeyValues struct { nodeTypeId uint64 @@ -205,6 +318,7 @@ func TestRoundedNodeIndexKeyFromResourceList(t *testing.T) { schedulerobjects.ResourceList{ Resources: map[string]resource.Quantity{"foo": resource.MustParse("1"), "bar": resource.MustParse("2")}, }, + 0, ), ) assert.NotEqual( @@ -218,6 +332,7 @@ func TestRoundedNodeIndexKeyFromResourceList(t *testing.T) { schedulerobjects.ResourceList{ Resources: map[string]resource.Quantity{"foo": resource.MustParse("1"), "bar": resource.MustParse("2")}, }, + 0, ), ) } diff --git a/internal/scheduler/nodedb/nodedb.go b/internal/scheduler/nodedb/nodedb.go index 6bc5be98218..49eb1ec4903 100644 --- a/internal/scheduler/nodedb/nodedb.go +++ b/internal/scheduler/nodedb/nodedb.go @@ -39,9 +39,15 @@ const ( var empty struct{} type Node struct { - Id string - Name string + // Unique id and index of this node. + // TODO(albin): Having both id and index is redundant. + // Currently, the id is "cluster name" + "node name" and index an integer assigned on node creation. + Id string + Index uint64 + + // Executor this node belongs to and node name, which must be unique per executor. Executor string + Name string // We need to store taints and labels separately from the node type: the latter only includes // indexed taints and labels, but we need all of them when checking pod requirements. @@ -65,9 +71,11 @@ type Node struct { // shallow copies of fields that are not mutated by methods of NodeDb. func (node *Node) UnsafeCopy() *Node { return &Node{ - Id: node.Id, - Name: node.Name, + Id: node.Id, + Index: node.Index, + Executor: node.Executor, + Name: node.Name, Taints: node.Taints, Labels: node.Labels, @@ -139,16 +147,19 @@ func (nodeDb *NodeDb) create(node *schedulerobjects.Node) (*Node, error) { nodeDb.indexedNodeLabelValues[key][value] = empty } } + index := uint64(nodeDb.numNodes) nodeDb.numNodes++ nodeDb.numNodesByNodeType[nodeType.Id]++ nodeDb.totalResources.Add(totalResources) nodeDb.nodeTypes[nodeType.Id] = nodeType nodeDb.mu.Unlock() - entry := &Node{ - Id: node.Id, - Name: node.Name, + return &Node{ + Id: node.Id, + Index: index, + Executor: node.Executor, + Name: node.Name, Taints: taints, Labels: labels, @@ -163,8 +174,7 @@ func (nodeDb *NodeDb) create(node *schedulerobjects.Node) (*Node, error) { AllocatedByQueue: allocatedByQueue, AllocatedByJobId: allocatedByJobId, EvictedJobRunIds: evictedJobRunIds, - } - return entry, nil + }, nil } func (nodeDb *NodeDb) CreateAndInsertWithApiJobsWithTxn(txn *memdb.Txn, jobs []*api.Job, node *schedulerobjects.Node) error { @@ -256,8 +266,10 @@ type NodeDb struct { // // Lower resolution makes scheduling faster, but may lead to jobs incorrectly being considered unschedulable. indexedResourceResolutionMillis []int64 - // Map from priority class priority to the index tracking allocatable resources at that priority. + // Map from priority class priority to the database index tracking allocatable resources at that priority. indexNameByPriority map[int32]string + // Map from priority class priority to the index of node.keys corresponding to that priority. + keyIndexByPriority map[int32]int // Taint keys that to create indexes for. // Should include taints frequently used for scheduling. // Since the NodeDb can efficiently sort out nodes with taints not tolerated @@ -317,7 +329,7 @@ func NewNodeDb( nodeDbPriorities = append(nodeDbPriorities, types.AllowedPriorities(priorityClasses)...) indexedResourceNames := util.Map(indexedResources, func(v configuration.IndexedResource) string { return v.Name }) - schema, indexNameByPriority := nodeDbSchema(nodeDbPriorities, indexedResourceNames) + schema, indexNameByPriority, keyIndexByPriority := nodeDbSchema(nodeDbPriorities, indexedResourceNames) db, err := memdb.NewMemDB(schema) if err != nil { return nil, errors.WithStack(err) @@ -359,6 +371,7 @@ func NewNodeDb( func(v configuration.IndexedResource) int64 { return v.Resolution.MilliValue() }, ), indexNameByPriority: indexNameByPriority, + keyIndexByPriority: keyIndexByPriority, indexedTaints: mapFromSlice(indexedTaints), indexedNodeLabels: mapFromSlice(indexedNodeLabels), indexedNodeLabelValues: indexedNodeLabelValues, @@ -432,7 +445,7 @@ func (nodeDb *NodeDb) IndexedNodeLabelValues(label string) (map[string]struct{}, func (nodeDb *NodeDb) NumNodes() int { nodeDb.mu.Lock() defer nodeDb.mu.Unlock() - return nodeDb.numNodes + return int(nodeDb.numNodes) } func (nodeDb *NodeDb) TotalResources() schedulerobjects.ResourceList { @@ -561,12 +574,17 @@ func deleteEvictedJobSchedulingContextIfExistsWithTxn(txn *memdb.Txn, jobId stri // SelectNodeForJobWithTxn selects a node on which the job can be scheduled. func (nodeDb *NodeDb) SelectNodeForJobWithTxn(txn *memdb.Txn, jctx *schedulercontext.JobSchedulingContext) (*Node, error) { req := jctx.PodRequirements - priorityClass := interfaces.PriorityClassFromLegacySchedulerJob(nodeDb.priorityClasses, nodeDb.defaultPriorityClass, jctx.Job) + // If the job has already been scheduled, get the priority at which it was scheduled. + // Otherwise, get the original priority the job was submitted with. + priority, ok := nodeDb.GetScheduledAtPriority(jctx.JobId) + if !ok { + priority = req.Priority + } pctx := &schedulercontext.PodSchedulingContext{ Created: time.Now(), - ScheduledAtPriority: -1, + ScheduledAtPriority: priority, PreemptedAtPriority: MinPriority, NumNodes: nodeDb.numNodes, NumExcludedNodesByReason: make(map[string]int), @@ -593,14 +611,6 @@ func (nodeDb *NodeDb) SelectNodeForJobWithTxn(txn *memdb.Txn, jctx *schedulercon if it, err := txn.Get("nodes", "id", nodeId); err != nil { return nil, errors.WithStack(err) } else { - priority, ok := nodeDb.GetScheduledAtPriority(jctx.JobId) - if !ok { - // We only get here if the node ID label was set by the user - // (instead of the scheduler); home-away preemption is ignored - // in that case. - priority = req.Priority - } - pctx.ScheduledAtPriority = priority if node, err := nodeDb.selectNodeForPodWithItAtPriority(it, jctx, priority, true); err != nil { return nil, err } else { @@ -609,7 +619,7 @@ func (nodeDb *NodeDb) SelectNodeForJobWithTxn(txn *memdb.Txn, jctx *schedulercon } } - node, err := nodeDb.selectNodeForJobWithTxnAtPriority(txn, jctx, req.Priority) + node, err := nodeDb.selectNodeForJobWithTxnAtPriority(txn, jctx) if err != nil { return nil, err } @@ -659,25 +669,22 @@ func (nodeDb *NodeDb) selectNodeForJobWithTxnAndAwayNodeType( jctx.AdditionalTolerations = append(jctx.AdditionalTolerations, v1.Toleration{Key: taint.Key, Value: taint.Value, Effect: taint.Effect}) } - node, err = nodeDb.selectNodeForJobWithTxnAtPriority(txn, jctx, awayNodeType.Priority) + jctx.PodSchedulingContext.ScheduledAtPriority = awayNodeType.Priority + node, err = nodeDb.selectNodeForJobWithTxnAtPriority(txn, jctx) return } func (nodeDb *NodeDb) selectNodeForJobWithTxnAtPriority( txn *memdb.Txn, jctx *schedulercontext.JobSchedulingContext, - priority int32, ) (*Node, error) { - req := jctx.PodRequirements + pctx := jctx.PodSchedulingContext matchingNodeTypeIds, numExcludedNodesByReason, err := nodeDb.NodeTypesMatchingJob(jctx) if err != nil { return nil, err } - pctx := jctx.PodSchedulingContext - pctx.ScheduledAtPriority = priority - // Try scheduling at evictedPriority. If this succeeds, no preemption is necessary. pctx.NumExcludedNodesByReason = maps.Clone(numExcludedNodesByReason) if node, err := nodeDb.selectNodeForPodAtPriority(txn, jctx, matchingNodeTypeIds, evictedPriority); err != nil { @@ -691,7 +698,7 @@ func (nodeDb *NodeDb) selectNodeForJobWithTxnAtPriority( // Try scheduling at the job priority. If this fails, scheduling is impossible and we return. // This is an optimisation to avoid looking for preemption targets for unschedulable jobs. pctx.NumExcludedNodesByReason = maps.Clone(numExcludedNodesByReason) - if node, err := nodeDb.selectNodeForPodAtPriority(txn, jctx, matchingNodeTypeIds, req.Priority); err != nil { + if node, err := nodeDb.selectNodeForPodAtPriority(txn, jctx, matchingNodeTypeIds, pctx.ScheduledAtPriority); err != nil { return nil, err } else if err := assertPodSchedulingContextNode(pctx, node); err != nil { return nil, err @@ -746,7 +753,6 @@ func (nodeDb *NodeDb) selectNodeForJobWithUrgencyPreemption( jctx *schedulercontext.JobSchedulingContext, matchingNodeTypeIds []uint64, ) (*Node, error) { - req := jctx.PodRequirements pctx := jctx.PodSchedulingContext numExcludedNodesByReason := pctx.NumExcludedNodesByReason for _, priority := range nodeDb.nodeDbPriorities { @@ -755,7 +761,9 @@ func (nodeDb *NodeDb) selectNodeForJobWithUrgencyPreemption( continue } - if priority > req.Priority { + // Using pctx.ScheduledAtPriority instead of jctx.PodRequirements.Priority, + // since the pctx.ScheduledAtPriority may differ, e.g., in case of home-away scheduling. + if priority > pctx.ScheduledAtPriority { break } @@ -791,11 +799,16 @@ func (nodeDb *NodeDb) selectNodeForPodAtPriority( if !ok { return nil, errors.Errorf("no index for priority %d; must be in %v", priority, nodeDb.indexNameByPriority) } + keyIndex, ok := nodeDb.keyIndexByPriority[priority] + if !ok { + return nil, errors.Errorf("no key index for priority %d; must be in %v", priority, nodeDb.keyIndexByPriority) + } it, err := NewNodeTypesIterator( txn, matchingNodeTypeIds, indexName, priority, + keyIndex, nodeDb.indexedResources, indexResourceRequests, nodeDb.indexedResourceResolutionMillis, @@ -905,13 +918,21 @@ func (nodeDb *NodeDb) selectNodeForJobWithFairPreemption(txn *memdb.Txn, jctx *s } nodesById[nodeId] = node evictedJobSchedulingContextsByNodeId[nodeId] = append(evictedJobSchedulingContextsByNodeId[nodeId], evictedJobSchedulingContext) - if priority := evictedJctx.PodRequirements.Priority; priority > maxPriority { + + priority, ok := nodeDb.GetScheduledAtPriority(evictedJctx.JobId) + if !ok { + priority = evictedJctx.PodRequirements.Priority + } + if priority > maxPriority { maxPriority = priority } matches, _, reason, err := JobRequirementsMet( node.Taints, node.Labels, node.TotalResources, + // At this point, we've unbound the jobs running on the node. + // Hence, we should check if the job is schedulable at evictedPriority, + // since that indicates the job can be scheduled without causing further preemptions. node.AllocatableByPriority[evictedPriority], jctx, ) @@ -1158,7 +1179,7 @@ func (nodeDb *NodeDb) Upsert(node *Node) error { func (nodeDb *NodeDb) UpsertWithTxn(txn *memdb.Txn, node *Node) error { keys := make([][]byte, len(nodeDb.nodeDbPriorities)) for i, p := range nodeDb.nodeDbPriorities { - keys[i] = nodeDb.nodeDbKey(keys[i], node.NodeTypeId, node.AllocatableByPriority[p]) + keys[i] = nodeDb.nodeDbKey(keys[i], node.NodeTypeId, node.AllocatableByPriority[p], node.Index) } node.Keys = keys @@ -1204,18 +1225,18 @@ func (nodeDb *NodeDb) AddEvictedJobSchedulingContextWithTxn(txn *memdb.Txn, inde return nil } -func nodeDbSchema(priorities []int32, resources []string) (*memdb.DBSchema, map[int32]string) { - nodesTable, indexNameByPriority := nodesTableSchema(priorities, resources) +func nodeDbSchema(priorities []int32, resources []string) (*memdb.DBSchema, map[int32]string, map[int32]int) { + nodesTable, indexNameByPriority, keyIndexByPriority := nodesTableSchema(priorities, resources) evictionsTable := evictionsTableSchema() return &memdb.DBSchema{ Tables: map[string]*memdb.TableSchema{ nodesTable.Name: nodesTable, evictionsTable.Name: evictionsTable, }, - }, indexNameByPriority + }, indexNameByPriority, keyIndexByPriority } -func nodesTableSchema(priorities []int32, resources []string) (*memdb.TableSchema, map[int32]string) { +func nodesTableSchema(priorities []int32, resources []string) (*memdb.TableSchema, map[int32]string, map[int32]int) { indexes := make(map[string]*memdb.IndexSchema, len(priorities)+1) indexes["id"] = &memdb.IndexSchema{ Name: "id", @@ -1223,19 +1244,21 @@ func nodesTableSchema(priorities []int32, resources []string) (*memdb.TableSchem Indexer: &memdb.StringFieldIndex{Field: "Id"}, } indexNameByPriority := make(map[int32]string, len(priorities)) + keyIndexByPriority := make(map[int32]int, len(priorities)) for i, priority := range priorities { name := nodeIndexName(i) indexNameByPriority[priority] = name + keyIndexByPriority[priority] = i indexes[name] = &memdb.IndexSchema{ Name: name, - Unique: false, + Unique: true, Indexer: &NodeIndex{KeyIndex: i}, } } return &memdb.TableSchema{ Name: "nodes", Indexes: indexes, - }, indexNameByPriority + }, indexNameByPriority, keyIndexByPriority } func evictionsTableSchema() *memdb.TableSchema { @@ -1278,12 +1301,13 @@ func (nodeDb *NodeDb) stringFromPodRequirementsNotMetReason(reason PodRequiremen // nodeDbKey returns the index key for a particular node. // Allocatable resources are rounded down to the closest multiple of nodeDb.indexedResourceResolutionMillis. // This improves efficiency by reducing the number of distinct values in the index. -func (nodeDb *NodeDb) nodeDbKey(out []byte, nodeTypeId uint64, allocatable schedulerobjects.ResourceList) []byte { +func (nodeDb *NodeDb) nodeDbKey(out []byte, nodeTypeId uint64, allocatable schedulerobjects.ResourceList, nodeIndex uint64) []byte { return RoundedNodeIndexKeyFromResourceList( out, nodeTypeId, nodeDb.indexedResources, nodeDb.indexedResourceResolutionMillis, allocatable, + nodeIndex, ) } diff --git a/internal/scheduler/nodedb/nodedb_test.go b/internal/scheduler/nodedb/nodedb_test.go index 6b12c581115..4fda0e6814e 100644 --- a/internal/scheduler/nodedb/nodedb_test.go +++ b/internal/scheduler/nodedb/nodedb_test.go @@ -23,10 +23,83 @@ import ( ) func TestNodeDbSchema(t *testing.T) { - schema, _ := nodeDbSchema(testfixtures.TestPriorities, testfixtures.TestResourceNames) + schema, _, _ := nodeDbSchema(testfixtures.TestPriorities, testfixtures.TestResourceNames) assert.NoError(t, schema.Validate()) } +func TestNodeUnsafeCopy(t *testing.T) { + node := &Node{ + Id: "id", + Index: 1, + Executor: "executor", + Name: "name", + Taints: []v1.Taint{ + { + Key: "foo", + Value: "bar", + }, + }, + Labels: map[string]string{ + "key": "value", + }, + TotalResources: schedulerobjects.ResourceList{ + Resources: map[string]resource.Quantity{ + "cpu": resource.MustParse("16"), + "memory": resource.MustParse("32Gi"), + }, + }, + Keys: [][]byte{ + { + 0, 1, 255, + }, + }, + NodeTypeId: 123, + AllocatableByPriority: schedulerobjects.AllocatableByPriorityAndResourceType{ + 1: { + Resources: map[string]resource.Quantity{ + "cpu": resource.MustParse("0"), + "memory": resource.MustParse("0Gi"), + }, + }, + 2: { + Resources: map[string]resource.Quantity{ + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), + }, + }, + 3: { + Resources: map[string]resource.Quantity{ + "cpu": resource.MustParse("16"), + "memory": resource.MustParse("32Gi"), + }, + }, + }, + AllocatedByQueue: map[string]schedulerobjects.ResourceList{ + "queue": { + Resources: map[string]resource.Quantity{ + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), + }, + }, + }, + AllocatedByJobId: map[string]schedulerobjects.ResourceList{ + "jobId": { + Resources: map[string]resource.Quantity{ + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), + }, + }, + }, + EvictedJobRunIds: map[string]bool{ + "jobId": false, + "evictedJobId": true, + }, + } + nodeCopy := node.UnsafeCopy() + // TODO(albin): Add more tests here. + assert.Equal(t, node.Id, nodeCopy.Id) +} + // Test the accounting of total resources across all nodes. func TestTotalResources(t *testing.T) { nodeDb, err := newNodeDbWithNodes([]*schedulerobjects.Node{}) diff --git a/internal/scheduler/nodedb/nodeiteration.go b/internal/scheduler/nodedb/nodeiteration.go index fb2715c6676..304dc27a28e 100644 --- a/internal/scheduler/nodedb/nodeiteration.go +++ b/internal/scheduler/nodedb/nodeiteration.go @@ -3,10 +3,10 @@ package nodedb import ( "bytes" "container/heap" - "fmt" "github.com/hashicorp/go-memdb" "github.com/pkg/errors" + log "github.com/sirupsen/logrus" "golang.org/x/exp/slices" "k8s.io/apimachinery/pkg/api/resource" ) @@ -157,6 +157,7 @@ func NewNodeTypesIterator( nodeTypeIds []uint64, indexName string, priority int32, + keyIndex int, indexedResources []string, indexedResourceRequests []resource.Quantity, indexedResourceResolutionMillis []int64, @@ -172,6 +173,7 @@ func NewNodeTypesIterator( nodeTypeId, indexName, priority, + keyIndex, indexedResources, indexedResourceRequests, indexedResourceResolutionMillis, @@ -291,6 +293,10 @@ type NodeTypeIterator struct { nodeTypeId uint64 // Priority at which to consider allocatable resources on the node. priority int32 + // Used to index into node.keys to assert that keys are always increasing. + // This to detect if the iterator gets stuck. + // TODO(albin): With better testing we should be able to remove this. + keyIndex int // Name of the memdb index used for node iteration. // Should correspond to the priority set for this iterator. indexName string @@ -303,13 +309,19 @@ type NodeTypeIterator struct { // Current lower bound on node allocatable resources looked for. // Updated in-place as the iterator makes progress. lowerBound []resource.Quantity + // Tentative lower-bound. + newLowerBound []resource.Quantity // memdb key computed from nodeTypeId and lowerBound. // Stored here to avoid dynamic allocs. key []byte + // Key for newLowerBound. + newKey []byte // Current iterator into the underlying memdb. // Updated in-place whenever lowerBound changes. - memdbIterator memdb.ResultIterator - previousNodeId string + memdbIterator memdb.ResultIterator + // Used to detect if the iterator gets stuck in a loop. + previousKey []byte + previousNode *Node } func NewNodeTypeIterator( @@ -317,6 +329,7 @@ func NewNodeTypeIterator( nodeTypeId uint64, indexName string, priority int32, + keyIndex int, indexedResources []string, indexedResourceRequests []resource.Quantity, indexedResourceResolutionMillis []int64, @@ -327,15 +340,20 @@ func NewNodeTypeIterator( if len(indexedResources) != len(indexedResourceResolutionMillis) { return nil, errors.Errorf("indexedResources and indexedResourceResolutionMillis are not of equal length") } + if keyIndex < 0 { + return nil, errors.Errorf("keyIndex is negative: %d", keyIndex) + } it := &NodeTypeIterator{ txn: txn, nodeTypeId: nodeTypeId, priority: priority, + keyIndex: keyIndex, indexName: indexName, indexedResources: indexedResources, indexedResourceRequests: indexedResourceRequests, indexedResourceResolutionMillis: indexedResourceResolutionMillis, lowerBound: slices.Clone(indexedResourceRequests), + newLowerBound: slices.Clone(indexedResourceRequests), } memdbIt, err := it.newNodeTypeIterator() if err != nil { @@ -346,6 +364,7 @@ func NewNodeTypeIterator( } func (it *NodeTypeIterator) newNodeTypeIterator() (memdb.ResultIterator, error) { + // TODO(albin): We're re-computing the key unnecessarily here. it.key = NodeIndexKey(it.key[0:0], it.nodeTypeId, it.lowerBound) memdbIt, err := it.txn.LowerBound( "nodes", @@ -377,10 +396,18 @@ func (it *NodeTypeIterator) NextNode() (*Node, error) { return nil, nil } node := v.(*Node) - if node.Id == it.previousNodeId { - panic(fmt.Sprintf("iterator received the same node twice consecutively: %s", node.Id)) + if it.keyIndex >= len(node.Keys) { + return nil, errors.Errorf("keyIndex is %d, but node %s has only %d keys", it.keyIndex, node.Id, len(node.Keys)) + } + nodeKey := node.Keys[it.keyIndex] + if it.previousKey != nil && bytes.Compare(it.previousKey, nodeKey) != -1 { + return nil, errors.Errorf( + "iteration loop detected: key %x of node %#v is not greater than key %x of node %#v", + nodeKey, node, it.previousKey, it.previousNode, + ) } - it.previousNodeId = node.Id + it.previousKey = nodeKey + it.previousNode = node if node.NodeTypeId != it.nodeTypeId { // There are no more nodes of this nodeType. return nil, nil @@ -390,16 +417,31 @@ func (it *NodeTypeIterator) NextNode() (*Node, error) { return nil, errors.Errorf("node %s has no resources registered at priority %d: %v", node.Id, it.priority, node.AllocatableByPriority) } for i, t := range it.indexedResources { - nodeQuantity := allocatableByPriority.Get(t) - requestQuantity := it.indexedResourceRequests[i] - it.lowerBound[i] = roundQuantityToResolution(nodeQuantity, it.indexedResourceResolutionMillis[i]) + nodeQuantity := allocatableByPriority.Get(t).DeepCopy() + requestQuantity := it.indexedResourceRequests[i].DeepCopy() + it.newLowerBound[i] = roundQuantityToResolution(nodeQuantity, it.indexedResourceResolutionMillis[i]) // If nodeQuantity < requestQuantity, replace the iterator using the lowerBound. // If nodeQuantity >= requestQuantity for all resources, return the node. if nodeQuantity.Cmp(requestQuantity) == -1 { for j := i; j < len(it.indexedResources); j++ { - it.lowerBound[j] = it.indexedResourceRequests[j] + it.newLowerBound[j] = it.indexedResourceRequests[j] } + + it.newKey = NodeIndexKey(it.newKey[0:0], it.nodeTypeId, it.newLowerBound) + if bytes.Compare(it.key, it.newKey) == -1 { + // TODO(albin): Temporary workaround. Shouldn't be necessary. + lowerBound := it.lowerBound + it.lowerBound = it.newLowerBound + it.newLowerBound = lowerBound + } else { + log.Warnf( + "new lower-bound %x is not greater than current bound %x", + it.newKey, it.key, + ) + break + } + memdbIterator, err := it.newNodeTypeIterator() if err != nil { return nil, err diff --git a/internal/scheduler/nodedb/nodeiteration_test.go b/internal/scheduler/nodedb/nodeiteration_test.go index 5c23f158a88..c07425953d4 100644 --- a/internal/scheduler/nodedb/nodeiteration_test.go +++ b/internal/scheduler/nodedb/nodeiteration_test.go @@ -451,6 +451,7 @@ func TestNodeTypeIterator(t *testing.T) { tc.nodeTypeId, nodeIndexName(keyIndex), tc.priority, + keyIndex, testfixtures.TestResourceNames, indexedResourceRequests, testfixtures.TestIndexedResourceResolutionMillis, @@ -834,6 +835,7 @@ func TestNodeTypesIterator(t *testing.T) { tc.nodeTypeIds, nodeDb.indexNameByPriority[tc.priority], tc.priority, + nodeDb.keyIndexByPriority[tc.priority], testfixtures.TestResourceNames, indexedResourceRequests, testfixtures.TestIndexedResourceResolutionMillis, @@ -895,7 +897,16 @@ func BenchmarkNodeTypeIterator(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - it, err := NewNodeTypeIterator(txn, nodeTypeId, nodeDb.indexNameByPriority[priority], priority, nodeDb.indexedResources, indexedResourceRequests, testfixtures.TestIndexedResourceResolutionMillis) + it, err := NewNodeTypeIterator( + txn, + nodeTypeId, + nodeDb.indexNameByPriority[priority], + priority, + nodeDb.keyIndexByPriority[priority], + nodeDb.indexedResources, + indexedResourceRequests, + testfixtures.TestIndexedResourceResolutionMillis, + ) require.NoError(b, err) for { node, err := it.NextNode() diff --git a/internal/scheduler/preempting_queue_scheduler.go b/internal/scheduler/preempting_queue_scheduler.go index 2f1c0dadbaf..ee83023cbb0 100644 --- a/internal/scheduler/preempting_queue_scheduler.go +++ b/internal/scheduler/preempting_queue_scheduler.go @@ -9,6 +9,7 @@ import ( "github.com/pkg/errors" "golang.org/x/exp/maps" "golang.org/x/exp/slices" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "github.com/armadaproject/armada/internal/common/armadacontext" @@ -852,12 +853,20 @@ func (evi *Evictor) Evict(ctx *armadacontext.Context, nodeDbTxn *memdb.Txn) (*Ev } for _, job := range evictedJobs { - // Create a scheduling context for when re-scheduling this job. - // Mark as evicted and add a node selector to ensure the job is re-scheduled onto the node it was evicted from. + // Create a scheduling context for the attempt to re-schedule the job, and: + // 1. Mark the job as evicted. This ensures total scheduled resources is calculated correctly. + // 2. Add a node selector ensuring the job can only be re-scheduled onto the node it was evicted from. + // 3. Add tolerations for all taints on the node. This to ensure that: + // - Adding taints to a node doesn't cause jobs already running on the node to be preempted. + // - Jobs scheduled as away jobs have the necessary tolerations to be re-scheduled. + // TODO(albin): We can remove the checkOnlyDynamicRequirements flag in the nodeDb now that we've added the tolerations. jctx := schedulercontext.JobSchedulingContextFromJob(evi.priorityClasses, job) jctx.IsEvicted = true jctx.AddNodeSelector(schedulerconfig.NodeIdLabel, node.Id) evictedJctxsByJobId[job.GetId()] = jctx + for _, taint := range node.Taints { + jctx.AdditionalTolerations = append(jctx.AdditionalTolerations, v1.Toleration{Key: taint.Key, Value: taint.Value, Effect: taint.Effect}) + } nodeIdByJobId[job.GetId()] = node.Id } diff --git a/internal/scheduler/preempting_queue_scheduler_test.go b/internal/scheduler/preempting_queue_scheduler_test.go index 9b4d622d7e2..915fa6268ca 100644 --- a/internal/scheduler/preempting_queue_scheduler_test.go +++ b/internal/scheduler/preempting_queue_scheduler_test.go @@ -1866,11 +1866,12 @@ func TestPreemptingQueueScheduler(t *testing.T) { ) require.NoError(t, err) } - constraints := schedulerconstraints.SchedulingConstraintsFromSchedulingConfig( + constraints := schedulerconstraints.NewSchedulingConstraints( "pool", tc.TotalResources, schedulerobjects.ResourceList{Resources: tc.MinimumJobSize}, tc.SchedulingConfig, + nil, ) sch := NewPreemptingQueueScheduler( sctx, @@ -1937,6 +1938,16 @@ func TestPreemptingQueueScheduler(t *testing.T) { assert.True(t, ok) assert.NotEmpty(t, nodeId) + node, err := nodeDb.GetNode(nodeId) + require.NoError(t, err) + assert.NotEmpty(t, node) + + // Check that the job can actually go onto this node. + matches, reason, err := nodedb.StaticJobRequirementsMet(node.Taints, node.Labels, node.TotalResources, jctx) + require.NoError(t, err) + assert.Empty(t, reason) + assert.True(t, matches) + // Check that scheduled jobs are consistently assigned to the same node. // (We don't allow moving jobs between nodes.) if expectedNodeId, ok := nodeIdByJobId[job.GetId()]; ok { @@ -2212,11 +2223,12 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { err := sctx.AddQueueSchedulingContext(queue, weight, make(schedulerobjects.QuantityByTAndResourceType[string]), limiterByQueue[queue]) require.NoError(b, err) } - constraints := schedulerconstraints.SchedulingConstraintsFromSchedulingConfig( + constraints := schedulerconstraints.NewSchedulingConstraints( "pool", nodeDb.TotalResources(), schedulerobjects.ResourceList{Resources: tc.MinimumJobSize}, tc.SchedulingConfig, + nil, ) sch := NewPreemptingQueueScheduler( sctx, diff --git a/internal/scheduler/queue_scheduler_test.go b/internal/scheduler/queue_scheduler_test.go index 739cd7b6e83..412ed6dc176 100644 --- a/internal/scheduler/queue_scheduler_test.go +++ b/internal/scheduler/queue_scheduler_test.go @@ -4,6 +4,9 @@ import ( "fmt" "testing" + "github.com/armadaproject/armada/pkg/api" + "github.com/armadaproject/armada/pkg/client/queue" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -34,8 +37,8 @@ func TestQueueScheduler(t *testing.T) { // Total resources across all clusters. // Set to the total resources across all nodes if not provided. TotalResources schedulerobjects.ResourceList - // Map from queue to the priority factor associated with that queue. - PriorityFactorByQueue map[string]float64 + // Queues + Queues []queue.Queue // Initial resource usage for all queues. InitialAllocatedByQueueAndPriorityClass map[string]schedulerobjects.QuantityByTAndResourceType[string] // Nodes to be considered by the scheduler. @@ -49,21 +52,21 @@ func TestQueueScheduler(t *testing.T) { }{ "simple success": { SchedulingConfig: testfixtures.TestSchedulingConfig(), - PriorityFactorByQueue: map[string]float64{"A": 1.0}, + Queues: testfixtures.SingleQueuePriorityOne("A"), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), ExpectedScheduledIndices: testfixtures.IntRange(0, 31), }, "simple failure": { SchedulingConfig: testfixtures.TestSchedulingConfig(), - PriorityFactorByQueue: map[string]float64{"A": 1.0}, + Queues: testfixtures.SingleQueuePriorityOne("A"), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 33), ExpectedScheduledIndices: testfixtures.IntRange(0, 31), }, "multiple nodes": { SchedulingConfig: testfixtures.TestSchedulingConfig(), - PriorityFactorByQueue: map[string]float64{"A": 1.0}, + Queues: testfixtures.SingleQueuePriorityOne("A"), Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), Jobs: testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 64), ExpectedScheduledIndices: testfixtures.IntRange(0, 63), @@ -72,21 +75,21 @@ func TestQueueScheduler(t *testing.T) { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass1, 1)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: testfixtures.IntRange(0, 1), }, "no preemption of higher-priority jobs": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate(testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass1, 1), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: testfixtures.IntRange(0, 0), }, "unschedulable jobs do not block schedulable jobs": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 10), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0, 11}, }, "MaximumSchedulingBurst": { @@ -97,7 +100,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 10), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 4), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0, 11}, }, "MaximumPerQueueSchedulingBurst": { @@ -109,7 +112,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 3), testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 1), ), - PriorityFactorByQueue: map[string]float64{"A": 1, "B": 1}, + Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}}, ExpectedScheduledIndices: []int{0, 11, 14}, }, "MaximumSchedulingBurst is not exceeded by gangs": { @@ -123,7 +126,7 @@ func TestQueueScheduler(t *testing.T) { ), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0, 4}, }, "MaximumPerQueueSchedulingBurst is not exceeded by gangs": { @@ -137,7 +140,7 @@ func TestQueueScheduler(t *testing.T) { ), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0, 4}, }, "MaximumResourceFractionToSchedule": { @@ -145,7 +148,7 @@ func TestQueueScheduler(t *testing.T) { map[string]float64{"cpu": 0.5}, testfixtures.TestSchedulingConfig(), ), - PriorityFactorByQueue: map[string]float64{"A": 1.0}, + Queues: testfixtures.SingleQueuePriorityOne("A"), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), ExpectedScheduledIndices: testfixtures.IntRange(0, 16), @@ -161,8 +164,8 @@ func TestQueueScheduler(t *testing.T) { }, testfixtures.TestSchedulingConfig(), ), - PriorityFactorByQueue: map[string]float64{"A": 1.0}, - Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Queues: testfixtures.SingleQueuePriorityOne("A"), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate( testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), @@ -181,11 +184,32 @@ func TestQueueScheduler(t *testing.T) { testfixtures.IntRange(14, 17), ), }, + "per queue, resource class, and pool cpu limit": { + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Queues: []queue.Queue{ + { + Name: "A", + PriorityFactor: 1.0, + ResourceLimitsByPriorityClassName: map[string]api.PriorityClassResourceLimits{ + testfixtures.PriorityClass0: { + MaximumResourceFractionByPool: map[string]api.PriorityClassPoolResourceLimits{ + "pool": { + MaximumResourceFraction: map[string]float64{"cpu": 0.5, "memory": 1.0}, + }, + }, + }, + }, + }, + }, + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Jobs: testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), + ExpectedScheduledIndices: testfixtures.IntRange(0, 15), + }, "fairness two queues": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 32)), - PriorityFactorByQueue: map[string]float64{"A": 1, "B": 1}, + Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}}, ExpectedScheduledIndices: armadaslices.Concatenate(testfixtures.IntRange(0, 15), testfixtures.IntRange(32, 47)), }, "fairness three queues": { @@ -196,11 +220,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 32), testfixtures.N1Cpu4GiJobs("C", testfixtures.PriorityClass0, 32), ), - PriorityFactorByQueue: map[string]float64{ - "A": 1, - "B": 1, - "C": 1, - }, + Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}, {Name: "C", PriorityFactor: 1.0}}, ExpectedScheduledIndices: armadaslices.Concatenate( testfixtures.IntRange(0, 10), testfixtures.IntRange(32, 42), @@ -214,10 +234,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 96), testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 96), ), - PriorityFactorByQueue: map[string]float64{ - "A": 1, - "B": 2, - }, + Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 2.0}}, ExpectedScheduledIndices: armadaslices.Concatenate( testfixtures.IntRange(0, 63), testfixtures.IntRange(96, 127), @@ -231,11 +248,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 96), testfixtures.N1Cpu4GiJobs("C", testfixtures.PriorityClass0, 96), ), - PriorityFactorByQueue: map[string]float64{ - "A": 1, - "B": 2, - "C": 10, - }, + Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 2.0}, {Name: "C", PriorityFactor: 10.0}}, ExpectedScheduledIndices: armadaslices.Concatenate( testfixtures.IntRange(0, 59), testfixtures.IntRange(96, 125), @@ -249,10 +262,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 32), testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 32), ), - PriorityFactorByQueue: map[string]float64{ - "A": 1, - "B": 1, - }, + Queues: []queue.Queue{{Name: "A", PriorityFactor: 1.0}, {Name: "B", PriorityFactor: 1.0}}, InitialAllocatedByQueueAndPriorityClass: map[string]schedulerobjects.QuantityByTAndResourceType[string]{ "A": { testfixtures.PriorityClass0: schedulerobjects.ResourceList{ @@ -276,7 +286,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), ), Jobs: testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: nil, }, "node with some available capacity": { @@ -291,7 +301,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), ), Jobs: testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 2), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0}, }, "preempt used resources of lower-priority jobs": { @@ -306,21 +316,21 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), ), Jobs: testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass1, 1), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0}, }, "respect taints": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.NTainted32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{1}, }, "minimum job size": { - SchedulingConfig: testfixtures.TestSchedulingConfig(), - Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), - Jobs: armadaslices.Concatenate(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + SchedulingConfig: testfixtures.TestSchedulingConfig(), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Jobs: armadaslices.Concatenate(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), + Queues: testfixtures.SingleQueuePriorityOne("A"), MinimumJobSize: map[string]resource.Quantity{ "cpu": resource.MustParse("2"), }, @@ -334,7 +344,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N1GpuJobs("A", testfixtures.PriorityClass0, 1), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), MinimumJobSize: map[string]resource.Quantity{ "gpu": resource.MustParse("1"), }, @@ -348,7 +358,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N1GpuJobs("A", testfixtures.PriorityClass0, 1), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), MinimumJobSize: map[string]resource.Quantity{ "gpu": resource.MustParse("2"), }, @@ -358,7 +368,7 @@ func TestQueueScheduler(t *testing.T) { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.NTainted32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{1}, }, "Node selector": { @@ -368,14 +378,14 @@ func TestQueueScheduler(t *testing.T) { testfixtures.WithLabelsNodes(map[string]string{"foo": "foo"}, testfixtures.N32CpuNodes(1, testfixtures.TestPriorities)), ), Jobs: testfixtures.WithNodeSelectorJobs(map[string]string{"foo": "foo"}, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 2)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0}, }, "taints and tolerations (indexed)": { SchedulingConfig: testfixtures.WithIndexedTaintsConfig([]string{"largeJobsOnly"}, testfixtures.TestSchedulingConfig()), Nodes: testfixtures.NTainted32CpuNodes(1, testfixtures.TestPriorities), Jobs: armadaslices.Concatenate(testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{1}, }, "Node selector (indexed)": { @@ -385,7 +395,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.WithLabelsNodes(map[string]string{"foo": "foo"}, testfixtures.N32CpuNodes(1, testfixtures.TestPriorities))..., ), Jobs: testfixtures.WithNodeSelectorJobs(map[string]string{"foo": "foo"}, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 2)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0}, }, "MaxQueueLookback": { @@ -396,7 +406,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 3), testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass0, 1), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0}, ExpectedNeverAttemptedIndices: []int{3, 4}, }, @@ -404,7 +414,7 @@ func TestQueueScheduler(t *testing.T) { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), Jobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 2)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0, 1}, }, "non-consecutive gang success": { @@ -425,14 +435,14 @@ func TestQueueScheduler(t *testing.T) { }, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0, 1, 2}, }, "gang failure": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(2, testfixtures.TestPriorities), Jobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 3)), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: nil, }, "non-consecutive gang failure": { @@ -453,7 +463,7 @@ func TestQueueScheduler(t *testing.T) { }, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{1}, }, "job priority": { @@ -464,7 +474,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.WithPriorityJobs(1, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), testfixtures.WithPriorityJobs(20, testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 1)), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{1}, }, "nodeAffinity node notIn": { @@ -500,7 +510,7 @@ func TestQueueScheduler(t *testing.T) { testfixtures.N32Cpu256GiJobs("A", testfixtures.PriorityClass0, 3), ), ), - PriorityFactorByQueue: map[string]float64{"A": 1}, + Queues: testfixtures.SingleQueuePriorityOne("A"), ExpectedScheduledIndices: []int{0, 1}, }, } @@ -518,10 +528,16 @@ func TestQueueScheduler(t *testing.T) { // Default to NodeDb total. tc.TotalResources = nodeDb.TotalResources() } + + queueNameToQueue := map[string]*queue.Queue{} + for _, q := range tc.Queues { + queueNameToQueue[q.Name] = &q + } + indexByJobId := make(map[string]int) for i, job := range tc.Jobs { - if _, ok := tc.PriorityFactorByQueue[job.GetQueue()]; !ok { - panic(fmt.Sprintf("no priority factor for queue %s", job.Queue())) + if _, ok := queueNameToQueue[job.GetQueue()]; !ok { + panic(fmt.Sprintf("queue %s does not exist", job.Queue())) } indexByJobId[job.GetId()] = i } @@ -554,11 +570,11 @@ func TestQueueScheduler(t *testing.T) { ), tc.TotalResources, ) - for queue, priorityFactor := range tc.PriorityFactorByQueue { - weight := 1 / priorityFactor + for _, q := range tc.Queues { + weight := 1.0 / float64(q.PriorityFactor) err := sctx.AddQueueSchedulingContext( - queue, weight, - tc.InitialAllocatedByQueueAndPriorityClass[queue], + q.Name, weight, + tc.InitialAllocatedByQueueAndPriorityClass[q.Name], rate.NewLimiter( rate.Limit(tc.SchedulingConfig.MaximumPerQueueSchedulingRate), tc.SchedulingConfig.MaximumPerQueueSchedulingBurst, @@ -566,16 +582,17 @@ func TestQueueScheduler(t *testing.T) { ) require.NoError(t, err) } - constraints := schedulerconstraints.SchedulingConstraintsFromSchedulingConfig( + constraints := schedulerconstraints.NewSchedulingConstraints( "pool", tc.TotalResources, schedulerobjects.ResourceList{Resources: tc.MinimumJobSize}, tc.SchedulingConfig, + tc.Queues, ) jobIteratorByQueue := make(map[string]JobIterator) - for queue := range tc.PriorityFactorByQueue { - it := jobRepo.GetJobIterator(queue) - jobIteratorByQueue[queue] = it + for _, q := range tc.Queues { + it := jobRepo.GetJobIterator(q.Name) + jobIteratorByQueue[q.Name] = it } sch, err := NewQueueScheduler(sctx, constraints, nodeDb, jobIteratorByQueue) require.NoError(t, err) @@ -623,7 +640,7 @@ func TestQueueScheduler(t *testing.T) { queues := armadaslices.Unique(append( maps.Keys(sctx.QueueSchedulingContexts), - maps.Keys(tc.PriorityFactorByQueue)..., + maps.Keys(queueNameToQueue)..., )) for _, queue := range queues { qctx := sctx.QueueSchedulingContexts[queue] @@ -673,6 +690,16 @@ func TestQueueScheduler(t *testing.T) { nodeId, ok := result.NodeIdByJobId[jctx.JobId] assert.True(t, ok) assert.NotEmpty(t, nodeId) + + node, err := nodeDb.GetNode(nodeId) + require.NoError(t, err) + assert.NotEmpty(t, node) + + // Check that the job can actually go onto this node. + matches, reason, err := nodedb.StaticJobRequirementsMet(node.Taints, node.Labels, node.TotalResources, jctx) + require.NoError(t, err) + assert.Empty(t, reason) + assert.True(t, matches) } // For jobs that could not be scheduled, diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go index 8598c1a8d60..0191fd4aab7 100644 --- a/internal/scheduler/scheduler.go +++ b/internal/scheduler/scheduler.go @@ -289,10 +289,10 @@ func (s *Scheduler) cycle(ctx *armadacontext.Context, updateAll bool, leaderToke continue } if jst.Failed { - s.failureEstimator.Push(run.NodeName(), jst.Job.GetQueue(), false) + s.failureEstimator.Push(run.NodeName(), jst.Job.GetQueue(), run.Executor(), false) } if jst.Succeeded { - s.failureEstimator.Push(run.NodeName(), jst.Job.GetQueue(), true) + s.failureEstimator.Push(run.NodeName(), jst.Job.GetQueue(), run.Executor(), true) } } s.failureEstimator.Update() diff --git a/internal/scheduler/schedulerapp.go b/internal/scheduler/schedulerapp.go index bc408ef156e..9d7f6f35c71 100644 --- a/internal/scheduler/schedulerapp.go +++ b/internal/scheduler/schedulerapp.go @@ -17,6 +17,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/common" "github.com/armadaproject/armada/internal/common/app" "github.com/armadaproject/armada/internal/common/armadacontext" @@ -91,7 +92,7 @@ func Run(config schedulerconfig.Configuration) error { Warnf("Redis client didn't close down cleanly") } }() - queueRepository := database.NewLegacyQueueRepository(redisClient) + queueRepository := repository.NewRedisQueueRepository(redisClient) legacyExecutorRepository := database.NewRedisExecutorRepository(redisClient, "pulsar") // //////////////////////////////////////////////////////////////////////// diff --git a/internal/scheduler/schedulerobjects/schedulerobjects.pb.go b/internal/scheduler/schedulerobjects/schedulerobjects.pb.go index 8319120e3de..bbbdfe705a8 100644 --- a/internal/scheduler/schedulerobjects/schedulerobjects.pb.go +++ b/internal/scheduler/schedulerobjects/schedulerobjects.pb.go @@ -835,7 +835,10 @@ type PodRequirements struct { Tolerations []v1.Toleration `protobuf:"bytes,3,rep,name=tolerations,proto3" json:"tolerations"` // Kubernetes annotations. Included here since we use annotations with special meaning. Annotations map[string]string `protobuf:"bytes,7,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Pod priority. Should be mapped from the priority class name of the submitted pod. + // Priority class priority of the pod as submitted. Should be mapped from the priority class name of the submitted pod. + // + // During scheduling, the priority stored on the podSchedulingContext should be used instead, + // since a pod may be scheduled at a priority different from the priority it was submitted with. Priority int32 `protobuf:"varint,4,opt,name=priority,proto3" json:"priority,omitempty"` // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. diff --git a/internal/scheduler/schedulerobjects/schedulerobjects.proto b/internal/scheduler/schedulerobjects/schedulerobjects.proto index db8fbf64da2..ebfb8c2149d 100644 --- a/internal/scheduler/schedulerobjects/schedulerobjects.proto +++ b/internal/scheduler/schedulerobjects/schedulerobjects.proto @@ -159,7 +159,10 @@ message PodRequirements { repeated k8s.io.api.core.v1.Toleration tolerations = 3 [(gogoproto.nullable) = false]; // Kubernetes annotations. Included here since we use annotations with special meaning. map annotations = 7; - // Pod priority. Should be mapped from the priority class name of the submitted pod. + // Priority class priority of the pod as submitted. Should be mapped from the priority class name of the submitted pod. + // + // During scheduling, the priority stored on the podSchedulingContext should be used instead, + // since a pod may be scheduled at a priority different from the priority it was submitted with. int32 priority = 4; // One of Never, PreemptLowerPriority. // Defaults to PreemptLowerPriority if unset. diff --git a/internal/scheduler/scheduling_algo.go b/internal/scheduler/scheduling_algo.go index 34643c0aff9..0e37a9b466f 100644 --- a/internal/scheduler/scheduling_algo.go +++ b/internal/scheduler/scheduling_algo.go @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "github.com/armadaproject/armada/internal/armada/configuration" + "github.com/armadaproject/armada/internal/armada/repository" "github.com/armadaproject/armada/internal/common/armadacontext" "github.com/armadaproject/armada/internal/common/logging" armadaslices "github.com/armadaproject/armada/internal/common/slices" @@ -27,6 +28,7 @@ import ( "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/nodedb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" + "github.com/armadaproject/armada/pkg/client/queue" ) // SchedulingAlgo is the interface between the Pulsar-backed scheduler and the @@ -41,7 +43,7 @@ type SchedulingAlgo interface { type FairSchedulingAlgo struct { schedulingConfig configuration.SchedulingConfig executorRepository database.ExecutorRepository - queueRepository database.QueueRepository + queueRepository repository.QueueRepository schedulingContextRepository *SchedulingContextRepository // Global job scheduling rate-limiter. limiter *rate.Limiter @@ -63,7 +65,7 @@ func NewFairSchedulingAlgo( config configuration.SchedulingConfig, maxSchedulingDuration time.Duration, executorRepository database.ExecutorRepository, - queueRepository database.QueueRepository, + queueRepository repository.QueueRepository, schedulingContextRepository *SchedulingContextRepository, ) (*FairSchedulingAlgo, error) { if _, ok := config.Preemption.PriorityClasses[config.Preemption.DefaultPriorityClass]; !ok { @@ -221,6 +223,7 @@ func (it *JobQueueIteratorAdapter) Next() (interfaces.LegacySchedulerJob, error) } type fairSchedulingAlgoContext struct { + queues []queue.Queue priorityFactorByQueue map[string]float64 isActiveByQueueName map[string]bool totalCapacityByPool schedulerobjects.QuantityByTAndResourceType[string] @@ -246,7 +249,11 @@ func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx *armadacontext.Con } priorityFactorByQueue := make(map[string]float64) for _, queue := range queues { - priorityFactorByQueue[queue.Name] = queue.Weight + weight := 0.0 + if queue.PriorityFactor != 0 { + weight = 1 / float64(queue.PriorityFactor) + } + priorityFactorByQueue[queue.Name] = weight } // Get the total capacity available across executors. @@ -308,6 +315,7 @@ func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx *armadacontext.Con executors = l.filterLaggingExecutors(ctx, executors, jobsByExecutorId) return &fairSchedulingAlgoContext{ + queues: queues, priorityFactorByQueue: priorityFactorByQueue, isActiveByQueueName: isActiveByQueueName, totalCapacityByPool: totalCapacityByPool, @@ -399,11 +407,12 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( return nil, nil, err } } - constraints := schedulerconstraints.SchedulingConstraintsFromSchedulingConfig( + constraints := schedulerconstraints.NewSchedulingConstraints( pool, fsctx.totalCapacityByPool[pool], minimumJobSize, l.schedulingConfig, + fsctx.queues, ) scheduler := NewPreemptingQueueScheduler( sctx, diff --git a/internal/scheduler/scheduling_algo_test.go b/internal/scheduler/scheduling_algo_test.go index 922cf94f333..53167a3b83b 100644 --- a/internal/scheduler/scheduling_algo_test.go +++ b/internal/scheduler/scheduling_algo_test.go @@ -6,6 +6,8 @@ import ( "testing" "time" + "github.com/armadaproject/armada/pkg/client/queue" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -15,7 +17,6 @@ import ( "github.com/armadaproject/armada/internal/armada/configuration" "github.com/armadaproject/armada/internal/common/armadacontext" armadaslices "github.com/armadaproject/armada/internal/common/slices" - "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/jobdb" schedulermocks "github.com/armadaproject/armada/internal/scheduler/mocks" "github.com/armadaproject/armada/internal/scheduler/nodedb" @@ -32,7 +33,7 @@ func TestSchedule(t *testing.T) { schedulingConfig configuration.SchedulingConfig executors []*schedulerobjects.Executor - queues []*database.Queue + queues []queue.Queue queuedJobs []*jobdb.Job // Already scheduled jobs. Specifically, @@ -56,7 +57,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), expectedScheduledIndices: []int{0, 1, 2, 3}, }, @@ -66,7 +67,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.WithLastUpdateTimeExecutor(testfixtures.BaseTime.Add(-1*time.Hour), testfixtures.Test1Node32CoreExecutor("executor2")), }, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), expectedScheduledIndices: []int{0, 1}, }, @@ -76,7 +77,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N1Cpu4GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 48), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -94,7 +95,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N1Cpu4GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 48), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -112,7 +113,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -135,7 +136,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -157,7 +158,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -175,12 +176,12 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, }, "no executors": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{}, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass3, 10), }, "computation of allocated resources does not confuse priority class with per-queue priority": { @@ -191,7 +192,7 @@ func TestSchedule(t *testing.T) { testfixtures.TestSchedulingConfig(), ), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, queuedJobs: []*jobdb.Job{ // Submit the next job with a per-queue priority number (i.e., 1) that is larger // than the per-queue priority of the already-running job (i.e., 0), but smaller @@ -212,7 +213,7 @@ func TestSchedule(t *testing.T) { "urgency-based preemption within a single queue": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []*database.Queue{{Name: "A"}}, + queues: []queue.Queue{{Name: "A"}}, queuedJobs: testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass1, 2), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -232,7 +233,7 @@ func TestSchedule(t *testing.T) { "urgency-based preemption between queues": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []*database.Queue{{Name: "A"}, {Name: "B"}}, + queues: []queue.Queue{{Name: "A"}, {Name: "B"}}, queuedJobs: testfixtures.N16Cpu128GiJobs("B", testfixtures.PriorityClass1, 2), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -252,7 +253,7 @@ func TestSchedule(t *testing.T) { "preemption to fair share": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []*database.Queue{{Name: "A", Weight: 100}, {Name: "B", Weight: 100}}, + queues: []queue.Queue{{Name: "A", PriorityFactor: 0.01}, {Name: "B", PriorityFactor: 0.01}}, queuedJobs: testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 2), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -272,14 +273,14 @@ func TestSchedule(t *testing.T) { "gang scheduling successful": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []*database.Queue{{Name: "A", Weight: 100}}, + queues: []queue.Queue{{Name: "A", PriorityFactor: 0.01}}, queuedJobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 2)), expectedScheduledIndices: []int{0, 1}, }, "gang scheduling successful with some jobs failing to schedule above min cardinality": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []*database.Queue{{Name: "A", Weight: 100}}, + queues: []queue.Queue{{Name: "A", PriorityFactor: 0.01}}, queuedJobs: testfixtures.WithGangAnnotationsAndMinCardinalityJobs( 2, testfixtures.N16Cpu128GiJobs("A", testfixtures.PriorityClass0, 10), @@ -293,7 +294,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []*database.Queue{{Name: "A", Weight: 100}}, + queues: []queue.Queue{{Name: "A", PriorityFactor: 0.01}}, queuedJobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs("queue1", testfixtures.PriorityClass0, 3)), }, "urgency-based gang preemption": { @@ -301,7 +302,7 @@ func TestSchedule(t *testing.T) { executors: []*schedulerobjects.Executor{ testfixtures.Test1Node32CoreExecutor("executor1"), }, - queues: []*database.Queue{{Name: "queue1", Weight: 100}, {Name: "queue2", Weight: 100}}, + queues: []queue.Queue{{Name: "queue1", PriorityFactor: 0.01}, {Name: "queue2", PriorityFactor: 0.01}}, queuedJobs: testfixtures.N16Cpu128GiJobs("queue2", testfixtures.PriorityClass1, 1), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -321,7 +322,7 @@ func TestSchedule(t *testing.T) { "preemption to fair share evicting a gang": { schedulingConfig: testfixtures.TestSchedulingConfig(), executors: []*schedulerobjects.Executor{testfixtures.Test1Node32CoreExecutor("executor1")}, - queues: []*database.Queue{{Name: "queue1", Weight: 100}, {Name: "queue2", Weight: 100}}, + queues: []queue.Queue{{Name: "queue1", PriorityFactor: 0.01}, {Name: "queue2", PriorityFactor: 0.01}}, queuedJobs: testfixtures.N16Cpu128GiJobs("queue2", testfixtures.PriorityClass0, 1), scheduledJobsByExecutorIndexAndNodeIndex: map[int]map[int]scheduledJobs{ 0: { @@ -344,7 +345,7 @@ func TestSchedule(t *testing.T) { testfixtures.Test1Node32CoreExecutor("executor1"), testfixtures.Test1Node32CoreExecutor("executor2"), }, - queues: []*database.Queue{testfixtures.TestDbQueue()}, + queues: []queue.Queue{testfixtures.MakeTestQueue()}, queuedJobs: testfixtures.WithGangAnnotationsJobs(testfixtures.N16Cpu128GiJobs(testfixtures.TestQueue, testfixtures.PriorityClass0, 4)), expectedScheduledIndices: []int{0, 1, 2, 3}, }, diff --git a/internal/scheduler/simulator/simulator.go b/internal/scheduler/simulator/simulator.go index b270b54bfd6..3973e82708e 100644 --- a/internal/scheduler/simulator/simulator.go +++ b/internal/scheduler/simulator/simulator.go @@ -461,12 +461,13 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { return err } } - constraints := schedulerconstraints.SchedulingConstraintsFromSchedulingConfig( + constraints := schedulerconstraints.NewSchedulingConstraints( pool.Name, totalResources, // Minimum job size not used for simulation; use taints/tolerations instead. schedulerobjects.ResourceList{}, s.schedulingConfig, + nil, ) sch := scheduler.NewPreemptingQueueScheduler( sctx, diff --git a/internal/scheduler/testfixtures/testfixtures.go b/internal/scheduler/testfixtures/testfixtures.go index 70b6bfb05ce..36fea49946d 100644 --- a/internal/scheduler/testfixtures/testfixtures.go +++ b/internal/scheduler/testfixtures/testfixtures.go @@ -10,6 +10,7 @@ import ( "time" "github.com/armadaproject/armada/pkg/api" + "github.com/armadaproject/armada/pkg/client/queue" "github.com/google/uuid" "github.com/oklog/ulid" @@ -21,7 +22,6 @@ import ( "github.com/armadaproject/armada/internal/common/types" "github.com/armadaproject/armada/internal/common/util" schedulerconfiguration "github.com/armadaproject/armada/internal/scheduler/configuration" - "github.com/armadaproject/armada/internal/scheduler/database" "github.com/armadaproject/armada/internal/scheduler/jobdb" "github.com/armadaproject/armada/internal/scheduler/schedulerobjects" ) @@ -711,6 +711,10 @@ func N8GpuNodes(n int, priorities []int32) []*schedulerobjects.Node { return rv } +func SingleQueuePriorityOne(name string) []queue.Queue { + return []queue.Queue{{Name: name, PriorityFactor: 1.0}} +} + func TestNode(priorities []int32, resources map[string]resource.Quantity) *schedulerobjects.Node { id := uuid.NewString() return &schedulerobjects.Node{ @@ -783,10 +787,10 @@ func Test1Node32CoreExecutor(executorId string) *schedulerobjects.Executor { } } -func TestDbQueue() *database.Queue { - return &database.Queue{ - Name: TestQueue, - Weight: 100, +func MakeTestQueue() queue.Queue { + return queue.Queue{ + Name: TestQueue, + PriorityFactor: 0.01, } } diff --git a/pkg/api/api.swagger.go b/pkg/api/api.swagger.go index 0f338497a6b..89923640b0d 100644 --- a/pkg/api/api.swagger.go +++ b/pkg/api/api.swagger.go @@ -1615,6 +1615,38 @@ func SwaggerJsonTemplate() string { " }\n" + " }\n" + " },\n" + + " \"apiPriorityClassPoolResourceLimits\": {\n" + + " \"type\": \"object\",\n" + + " \"properties\": {\n" + + " \"maximumResourceFraction\": {\n" + + " \"type\": \"object\",\n" + + " \"additionalProperties\": {\n" + + " \"type\": \"number\",\n" + + " \"format\": \"double\"\n" + + " }\n" + + " }\n" + + " }\n" + + " },\n" + + " \"apiPriorityClassResourceLimits\": {\n" + + " \"type\": \"object\",\n" + + " \"properties\": {\n" + + " \"maximumResourceFraction\": {\n" + + " \"description\": \"Limits resources assigned to jobs of this priority class.\\nSpecifically, jobs of this priority class are only scheduled if doing so does not exceed this limit.\",\n" + + " \"type\": \"object\",\n" + + " \"additionalProperties\": {\n" + + " \"type\": \"number\",\n" + + " \"format\": \"double\"\n" + + " }\n" + + " },\n" + + " \"maximumResourceFractionByPool\": {\n" + + " \"description\": \"Per-pool override of maximum_resource_fraction.\\nIf missing for a particular pool, maximum_resource_fraction is used instead for that pool.\",\n" + + " \"type\": \"object\",\n" + + " \"additionalProperties\": {\n" + + " \"$ref\": \"#/definitions/apiPriorityClassPoolResourceLimits\"\n" + + " }\n" + + " }\n" + + " }\n" + + " },\n" + " \"apiQueue\": {\n" + " \"type\": \"object\",\n" + " \"title\": \"swagger:model\",\n" + @@ -1645,6 +1677,13 @@ func SwaggerJsonTemplate() string { " \"format\": \"double\"\n" + " }\n" + " },\n" + + " \"resourceLimitsByPriorityClassName\": {\n" + + " \"description\": \"Map from priority class name to resource limit overrides for this queue and priority class.\\nIf provided for a priority class, global limits for that priority class do not apply to this queue.\",\n" + + " \"type\": \"object\",\n" + + " \"additionalProperties\": {\n" + + " \"$ref\": \"#/definitions/apiPriorityClassResourceLimits\"\n" + + " }\n" + + " },\n" + " \"userOwners\": {\n" + " \"type\": \"array\",\n" + " \"items\": {\n" + diff --git a/pkg/api/api.swagger.json b/pkg/api/api.swagger.json index 963593816f5..66eb44db81b 100644 --- a/pkg/api/api.swagger.json +++ b/pkg/api/api.swagger.json @@ -1604,6 +1604,38 @@ } } }, + "apiPriorityClassPoolResourceLimits": { + "type": "object", + "properties": { + "maximumResourceFraction": { + "type": "object", + "additionalProperties": { + "type": "number", + "format": "double" + } + } + } + }, + "apiPriorityClassResourceLimits": { + "type": "object", + "properties": { + "maximumResourceFraction": { + "description": "Limits resources assigned to jobs of this priority class.\nSpecifically, jobs of this priority class are only scheduled if doing so does not exceed this limit.", + "type": "object", + "additionalProperties": { + "type": "number", + "format": "double" + } + }, + "maximumResourceFractionByPool": { + "description": "Per-pool override of maximum_resource_fraction.\nIf missing for a particular pool, maximum_resource_fraction is used instead for that pool.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/apiPriorityClassPoolResourceLimits" + } + } + } + }, "apiQueue": { "type": "object", "title": "swagger:model", @@ -1634,6 +1666,13 @@ "format": "double" } }, + "resourceLimitsByPriorityClassName": { + "description": "Map from priority class name to resource limit overrides for this queue and priority class.\nIf provided for a priority class, global limits for that priority class do not apply to this queue.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/apiPriorityClassResourceLimits" + } + }, "userOwners": { "type": "array", "items": { diff --git a/pkg/api/submit.pb.go b/pkg/api/submit.pb.go index 3af7b788f02..477367cba77 100644 --- a/pkg/api/submit.pb.go +++ b/pkg/api/submit.pb.go @@ -1097,12 +1097,15 @@ func (m *JobSubmitResponse) GetJobResponseItems() []*JobSubmitResponseItem { // swagger:model type Queue struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - PriorityFactor float64 `protobuf:"fixed64,2,opt,name=priority_factor,json=priorityFactor,proto3" json:"priorityFactor,omitempty"` - UserOwners []string `protobuf:"bytes,3,rep,name=user_owners,json=userOwners,proto3" json:"userOwners,omitempty"` - GroupOwners []string `protobuf:"bytes,4,rep,name=group_owners,json=groupOwners,proto3" json:"groupOwners,omitempty"` - ResourceLimits map[string]float64 `protobuf:"bytes,5,rep,name=resource_limits,json=resourceLimits,proto3" json:"resourceLimits,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` - Permissions []*Queue_Permissions `protobuf:"bytes,6,rep,name=permissions,proto3" json:"permissions,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + PriorityFactor float64 `protobuf:"fixed64,2,opt,name=priority_factor,json=priorityFactor,proto3" json:"priorityFactor,omitempty"` + UserOwners []string `protobuf:"bytes,3,rep,name=user_owners,json=userOwners,proto3" json:"userOwners,omitempty"` + GroupOwners []string `protobuf:"bytes,4,rep,name=group_owners,json=groupOwners,proto3" json:"groupOwners,omitempty"` + ResourceLimits map[string]float64 `protobuf:"bytes,5,rep,name=resource_limits,json=resourceLimits,proto3" json:"resourceLimits,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` // Deprecated: Do not use. + // Map from priority class name to resource limit overrides for this queue and priority class. + // If provided for a priority class, global limits for that priority class do not apply to this queue. + ResourceLimitsByPriorityClassName map[string]PriorityClassResourceLimits `protobuf:"bytes,7,rep,name=resource_limits_by_priority_class_name,json=resourceLimitsByPriorityClassName,proto3" json:"resourceLimitsByPriorityClassName" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Permissions []*Queue_Permissions `protobuf:"bytes,6,rep,name=permissions,proto3" json:"permissions,omitempty"` } func (m *Queue) Reset() { *m = Queue{} } @@ -1165,6 +1168,7 @@ func (m *Queue) GetGroupOwners() []string { return nil } +// Deprecated: Do not use. func (m *Queue) GetResourceLimits() map[string]float64 { if m != nil { return m.ResourceLimits @@ -1172,6 +1176,13 @@ func (m *Queue) GetResourceLimits() map[string]float64 { return nil } +func (m *Queue) GetResourceLimitsByPriorityClassName() map[string]PriorityClassResourceLimits { + if m != nil { + return m.ResourceLimitsByPriorityClassName + } + return nil +} + func (m *Queue) GetPermissions() []*Queue_Permissions { if m != nil { return m.Permissions @@ -1281,6 +1292,104 @@ func (m *Queue_Permissions_Subject) GetName() string { return "" } +type PriorityClassResourceLimits struct { + // Limits resources assigned to jobs of this priority class. + // Specifically, jobs of this priority class are only scheduled if doing so does not exceed this limit. + MaximumResourceFraction map[string]float64 `protobuf:"bytes,1,rep,name=maximum_resource_fraction,json=maximumResourceFraction,proto3" json:"maximumResourceFraction" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + // Per-pool override of maximum_resource_fraction. + // If missing for a particular pool, maximum_resource_fraction is used instead for that pool. + MaximumResourceFractionByPool map[string]PriorityClassPoolResourceLimits `protobuf:"bytes,2,rep,name=maximum_resource_fraction_by_pool,json=maximumResourceFractionByPool,proto3" json:"maximumResourceFractionByPool" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *PriorityClassResourceLimits) Reset() { *m = PriorityClassResourceLimits{} } +func (*PriorityClassResourceLimits) ProtoMessage() {} +func (*PriorityClassResourceLimits) Descriptor() ([]byte, []int) { + return fileDescriptor_e998bacb27df16c1, []int{13} +} +func (m *PriorityClassResourceLimits) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassResourceLimits) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PriorityClassResourceLimits.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PriorityClassResourceLimits) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassResourceLimits.Merge(m, src) +} +func (m *PriorityClassResourceLimits) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassResourceLimits) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassResourceLimits.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassResourceLimits proto.InternalMessageInfo + +func (m *PriorityClassResourceLimits) GetMaximumResourceFraction() map[string]float64 { + if m != nil { + return m.MaximumResourceFraction + } + return nil +} + +func (m *PriorityClassResourceLimits) GetMaximumResourceFractionByPool() map[string]PriorityClassPoolResourceLimits { + if m != nil { + return m.MaximumResourceFractionByPool + } + return nil +} + +type PriorityClassPoolResourceLimits struct { + MaximumResourceFraction map[string]float64 `protobuf:"bytes,1,rep,name=maximum_resource_fraction,json=maximumResourceFraction,proto3" json:"maximumResourceFraction" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` +} + +func (m *PriorityClassPoolResourceLimits) Reset() { *m = PriorityClassPoolResourceLimits{} } +func (*PriorityClassPoolResourceLimits) ProtoMessage() {} +func (*PriorityClassPoolResourceLimits) Descriptor() ([]byte, []int) { + return fileDescriptor_e998bacb27df16c1, []int{14} +} +func (m *PriorityClassPoolResourceLimits) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassPoolResourceLimits) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PriorityClassPoolResourceLimits.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PriorityClassPoolResourceLimits) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassPoolResourceLimits.Merge(m, src) +} +func (m *PriorityClassPoolResourceLimits) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassPoolResourceLimits) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassPoolResourceLimits.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassPoolResourceLimits proto.InternalMessageInfo + +func (m *PriorityClassPoolResourceLimits) GetMaximumResourceFraction() map[string]float64 { + if m != nil { + return m.MaximumResourceFraction + } + return nil +} + // swagger:model type QueueList struct { Queues []*Queue `protobuf:"bytes,1,rep,name=queues,proto3" json:"queues,omitempty"` @@ -1289,7 +1398,7 @@ type QueueList struct { func (m *QueueList) Reset() { *m = QueueList{} } func (*QueueList) ProtoMessage() {} func (*QueueList) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{13} + return fileDescriptor_e998bacb27df16c1, []int{15} } func (m *QueueList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1333,7 +1442,7 @@ type CancellationResult struct { func (m *CancellationResult) Reset() { *m = CancellationResult{} } func (*CancellationResult) ProtoMessage() {} func (*CancellationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{14} + return fileDescriptor_e998bacb27df16c1, []int{16} } func (m *CancellationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1377,7 +1486,7 @@ type QueueGetRequest struct { func (m *QueueGetRequest) Reset() { *m = QueueGetRequest{} } func (*QueueGetRequest) ProtoMessage() {} func (*QueueGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{15} + return fileDescriptor_e998bacb27df16c1, []int{17} } func (m *QueueGetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1421,7 +1530,7 @@ type StreamingQueueGetRequest struct { func (m *StreamingQueueGetRequest) Reset() { *m = StreamingQueueGetRequest{} } func (*StreamingQueueGetRequest) ProtoMessage() {} func (*StreamingQueueGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{16} + return fileDescriptor_e998bacb27df16c1, []int{18} } func (m *StreamingQueueGetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1465,7 +1574,7 @@ type QueueDeleteRequest struct { func (m *QueueDeleteRequest) Reset() { *m = QueueDeleteRequest{} } func (*QueueDeleteRequest) ProtoMessage() {} func (*QueueDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{17} + return fileDescriptor_e998bacb27df16c1, []int{19} } func (m *QueueDeleteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1510,7 +1619,7 @@ type JobSetInfo struct { func (m *JobSetInfo) Reset() { *m = JobSetInfo{} } func (*JobSetInfo) ProtoMessage() {} func (*JobSetInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{18} + return fileDescriptor_e998bacb27df16c1, []int{20} } func (m *JobSetInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1568,7 +1677,7 @@ type QueueUpdateResponse struct { func (m *QueueUpdateResponse) Reset() { *m = QueueUpdateResponse{} } func (*QueueUpdateResponse) ProtoMessage() {} func (*QueueUpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{19} + return fileDescriptor_e998bacb27df16c1, []int{21} } func (m *QueueUpdateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1618,7 +1727,7 @@ type BatchQueueUpdateResponse struct { func (m *BatchQueueUpdateResponse) Reset() { *m = BatchQueueUpdateResponse{} } func (*BatchQueueUpdateResponse) ProtoMessage() {} func (*BatchQueueUpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{20} + return fileDescriptor_e998bacb27df16c1, []int{22} } func (m *BatchQueueUpdateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1662,7 +1771,7 @@ type QueueCreateResponse struct { func (m *QueueCreateResponse) Reset() { *m = QueueCreateResponse{} } func (*QueueCreateResponse) ProtoMessage() {} func (*QueueCreateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{21} + return fileDescriptor_e998bacb27df16c1, []int{23} } func (m *QueueCreateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1712,7 +1821,7 @@ type BatchQueueCreateResponse struct { func (m *BatchQueueCreateResponse) Reset() { *m = BatchQueueCreateResponse{} } func (*BatchQueueCreateResponse) ProtoMessage() {} func (*BatchQueueCreateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{22} + return fileDescriptor_e998bacb27df16c1, []int{24} } func (m *BatchQueueCreateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1755,7 +1864,7 @@ type EndMarker struct { func (m *EndMarker) Reset() { *m = EndMarker{} } func (*EndMarker) ProtoMessage() {} func (*EndMarker) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{23} + return fileDescriptor_e998bacb27df16c1, []int{25} } func (m *EndMarker) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1794,7 +1903,7 @@ type StreamingQueueMessage struct { func (m *StreamingQueueMessage) Reset() { *m = StreamingQueueMessage{} } func (*StreamingQueueMessage) ProtoMessage() {} func (*StreamingQueueMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_e998bacb27df16c1, []int{24} + return fileDescriptor_e998bacb27df16c1, []int{26} } func (m *StreamingQueueMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1893,9 +2002,15 @@ func init() { proto.RegisterType((*JobSubmitResponseItem)(nil), "api.JobSubmitResponseItem") proto.RegisterType((*JobSubmitResponse)(nil), "api.JobSubmitResponse") proto.RegisterType((*Queue)(nil), "api.Queue") + proto.RegisterMapType((map[string]PriorityClassResourceLimits)(nil), "api.Queue.ResourceLimitsByPriorityClassNameEntry") proto.RegisterMapType((map[string]float64)(nil), "api.Queue.ResourceLimitsEntry") proto.RegisterType((*Queue_Permissions)(nil), "api.Queue.Permissions") proto.RegisterType((*Queue_Permissions_Subject)(nil), "api.Queue.Permissions.Subject") + proto.RegisterType((*PriorityClassResourceLimits)(nil), "api.PriorityClassResourceLimits") + proto.RegisterMapType((map[string]PriorityClassPoolResourceLimits)(nil), "api.PriorityClassResourceLimits.MaximumResourceFractionByPoolEntry") + proto.RegisterMapType((map[string]float64)(nil), "api.PriorityClassResourceLimits.MaximumResourceFractionEntry") + proto.RegisterType((*PriorityClassPoolResourceLimits)(nil), "api.PriorityClassPoolResourceLimits") + proto.RegisterMapType((map[string]float64)(nil), "api.PriorityClassPoolResourceLimits.MaximumResourceFractionEntry") proto.RegisterType((*QueueList)(nil), "api.QueueList") proto.RegisterType((*CancellationResult)(nil), "api.CancellationResult") proto.RegisterType((*QueueGetRequest)(nil), "api.QueueGetRequest") @@ -1913,174 +2028,187 @@ func init() { func init() { proto.RegisterFile("pkg/api/submit.proto", fileDescriptor_e998bacb27df16c1) } var fileDescriptor_e998bacb27df16c1 = []byte{ - // 2658 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4d, 0x6f, 0x1b, 0xc7, - 0xf9, 0xd7, 0x92, 0x12, 0x25, 0x3e, 0xa4, 0x24, 0x6a, 0x44, 0xc9, 0x6b, 0xda, 0x21, 0x99, 0xcd, - 0x3f, 0xfe, 0xd3, 0x42, 0x42, 0xc6, 0x4a, 0x83, 0xda, 0x6e, 0x00, 0xc3, 0x94, 0x68, 0x9b, 0x8e, - 0xad, 0x28, 0xa4, 0x95, 0x34, 0x45, 0x51, 0x66, 0xc9, 0x1d, 0x51, 0x2b, 0x91, 0xbb, 0x9b, 0xdd, - 0xa5, 0x0c, 0xb7, 0x08, 0x10, 0x14, 0x05, 0x7a, 0x4d, 0xd1, 0x5b, 0xfb, 0x0d, 0xd2, 0x4b, 0x3f, - 0x46, 0x8e, 0x01, 0x7a, 0x49, 0x2f, 0x4c, 0x6b, 0xf7, 0x05, 0xe0, 0xad, 0xf7, 0x1e, 0x8a, 0x79, - 0x66, 0x97, 0x3b, 0xcb, 0x17, 0xbd, 0x38, 0x76, 0x7a, 0xe9, 0x4d, 0xf3, 0x9b, 0xe7, 0x75, 0xf6, - 0x99, 0xe7, 0x65, 0x44, 0x48, 0x5b, 0x47, 0xed, 0x92, 0x6a, 0xe9, 0x25, 0xa7, 0xd7, 0xec, 0xea, - 0x6e, 0xd1, 0xb2, 0x4d, 0xd7, 0x24, 0x51, 0xd5, 0xd2, 0x33, 0x97, 0xda, 0xa6, 0xd9, 0xee, 0xd0, - 0x12, 0x42, 0xcd, 0xde, 0x7e, 0x89, 0x76, 0x2d, 0xf7, 0x09, 0xa7, 0xc8, 0xe4, 0x46, 0x37, 0x5d, - 0xbd, 0x4b, 0x1d, 0x57, 0xed, 0x5a, 0x1e, 0x81, 0x72, 0x74, 0xdd, 0x29, 0xea, 0x26, 0xca, 0x6e, - 0x99, 0x36, 0x2d, 0x1d, 0x5f, 0x2b, 0xb5, 0xa9, 0x41, 0x6d, 0xd5, 0xa5, 0x9a, 0x47, 0x53, 0x10, - 0x68, 0x0c, 0xea, 0x3e, 0x36, 0xed, 0x23, 0xdd, 0x68, 0x4f, 0xa2, 0xbc, 0xec, 0xa9, 0x63, 0x94, - 0xaa, 0x61, 0x98, 0xae, 0xea, 0xea, 0xa6, 0xe1, 0x78, 0xbb, 0x6f, 0xb6, 0x75, 0xf7, 0xa0, 0xd7, - 0x2c, 0xb6, 0xcc, 0x6e, 0xa9, 0x6d, 0xb6, 0xcd, 0xc0, 0x2a, 0xb6, 0xc2, 0x05, 0xfe, 0xe5, 0x91, - 0x0f, 0x7d, 0x3e, 0xa0, 0x6a, 0xc7, 0x3d, 0xe0, 0xa8, 0x32, 0x88, 0x43, 0xfa, 0xbe, 0xd9, 0xac, - 0xe3, 0x39, 0xd4, 0xe8, 0xa7, 0x3d, 0xea, 0xb8, 0x55, 0x97, 0x76, 0xc9, 0x26, 0x2c, 0x58, 0xb6, - 0x6e, 0xda, 0xba, 0xfb, 0x44, 0x96, 0xf2, 0x52, 0x41, 0x2a, 0xaf, 0x0f, 0xfa, 0x39, 0xe2, 0x63, - 0x6f, 0x98, 0x5d, 0xdd, 0xc5, 0xa3, 0xa9, 0x0d, 0xe9, 0xc8, 0x3b, 0x10, 0x37, 0xd4, 0x2e, 0x75, - 0x2c, 0xb5, 0x45, 0xe5, 0x68, 0x5e, 0x2a, 0xc4, 0xcb, 0x17, 0x06, 0xfd, 0xdc, 0xea, 0x10, 0x14, - 0xb8, 0x02, 0x4a, 0xf2, 0x36, 0xc4, 0x5b, 0x1d, 0x9d, 0x1a, 0x6e, 0x43, 0xd7, 0xe4, 0x05, 0x64, - 0x43, 0x5d, 0x1c, 0xac, 0x6a, 0xa2, 0x2e, 0x1f, 0x23, 0x75, 0x88, 0x75, 0xd4, 0x26, 0xed, 0x38, - 0xf2, 0x6c, 0x3e, 0x5a, 0x48, 0x6c, 0xbe, 0x5e, 0x54, 0x2d, 0xbd, 0x38, 0xc9, 0x95, 0xe2, 0x03, - 0xa4, 0xab, 0x18, 0xae, 0xfd, 0xa4, 0x9c, 0x1e, 0xf4, 0x73, 0x29, 0xce, 0x28, 0x88, 0xf5, 0x44, - 0x91, 0x36, 0x24, 0x84, 0x73, 0x96, 0xe7, 0x50, 0xf2, 0xc6, 0x74, 0xc9, 0xb7, 0x03, 0x62, 0x2e, - 0xfe, 0xe2, 0xa0, 0x9f, 0x5b, 0x13, 0x44, 0x08, 0x3a, 0x44, 0xc9, 0xe4, 0xd7, 0x12, 0xa4, 0x6d, - 0xfa, 0x69, 0x4f, 0xb7, 0xa9, 0xd6, 0x30, 0x4c, 0x8d, 0x36, 0x3c, 0x67, 0x62, 0xa8, 0xf2, 0xda, - 0x74, 0x95, 0x35, 0x8f, 0x6b, 0xc7, 0xd4, 0xa8, 0xe8, 0x98, 0x32, 0xe8, 0xe7, 0x2e, 0xdb, 0x63, - 0x9b, 0x81, 0x01, 0xb2, 0x54, 0x23, 0xe3, 0xfb, 0xe4, 0x7d, 0x58, 0xb0, 0x4c, 0xad, 0xe1, 0x58, - 0xb4, 0x25, 0x47, 0xf2, 0x52, 0x21, 0xb1, 0x79, 0xa9, 0xc8, 0x03, 0x14, 0x6d, 0x60, 0x41, 0x5c, - 0x3c, 0xbe, 0x56, 0xdc, 0x35, 0xb5, 0xba, 0x45, 0x5b, 0xf8, 0x3d, 0x57, 0x2c, 0xbe, 0x08, 0xc9, - 0x9e, 0xf7, 0x40, 0xb2, 0x0b, 0x71, 0x5f, 0xa0, 0x23, 0xcf, 0xa3, 0x3b, 0x27, 0x4a, 0xe4, 0x61, - 0xc5, 0x17, 0x4e, 0x28, 0xac, 0x3c, 0x8c, 0x6c, 0xc1, 0xbc, 0x6e, 0xb4, 0x6d, 0xea, 0x38, 0x72, - 0x1c, 0xe5, 0x11, 0x14, 0x54, 0xe5, 0xd8, 0x96, 0x69, 0xec, 0xeb, 0xed, 0xf2, 0x1a, 0x33, 0xcc, - 0x23, 0x13, 0xa4, 0xf8, 0x9c, 0xe4, 0x0e, 0x2c, 0x38, 0xd4, 0x3e, 0xd6, 0x5b, 0xd4, 0x91, 0x41, - 0x90, 0x52, 0xe7, 0xa0, 0x27, 0x05, 0x8d, 0xf1, 0xe9, 0x44, 0x63, 0x7c, 0x8c, 0xc5, 0xb8, 0xd3, - 0x3a, 0xa0, 0x5a, 0xaf, 0x43, 0x6d, 0x39, 0x11, 0xc4, 0xf8, 0x10, 0x14, 0x63, 0x7c, 0x08, 0x92, - 0x2a, 0xac, 0x7c, 0xda, 0xa3, 0x3d, 0xda, 0x70, 0xdd, 0x4e, 0xc3, 0xa1, 0x2d, 0xd3, 0xd0, 0x1c, - 0x39, 0x99, 0x97, 0x0a, 0xd1, 0xf2, 0x2b, 0x83, 0x7e, 0xee, 0x22, 0x6e, 0x3e, 0x72, 0x3b, 0x75, - 0xbe, 0x25, 0x08, 0x59, 0x1e, 0xd9, 0xca, 0xa8, 0x90, 0x10, 0x3e, 0x3c, 0x79, 0x0d, 0xa2, 0x47, - 0x94, 0xdf, 0xd1, 0x78, 0x79, 0x65, 0xd0, 0xcf, 0x2d, 0x1e, 0x51, 0xf1, 0x7a, 0xb2, 0x5d, 0x72, - 0x15, 0xe6, 0x8e, 0xd5, 0x4e, 0x8f, 0xe2, 0x27, 0x8e, 0x97, 0x57, 0x07, 0xfd, 0xdc, 0x32, 0x02, - 0x02, 0x21, 0xa7, 0xb8, 0x19, 0xb9, 0x2e, 0x65, 0xf6, 0x21, 0x35, 0x1a, 0xda, 0x2f, 0x45, 0x4f, - 0x17, 0x2e, 0x4c, 0x89, 0xe7, 0x97, 0xa1, 0x4e, 0xf9, 0x57, 0x14, 0x16, 0x43, 0x51, 0x43, 0x6e, - 0xc2, 0xac, 0xfb, 0xc4, 0xa2, 0xa8, 0x66, 0x69, 0x33, 0x25, 0xc6, 0xd5, 0xa3, 0x27, 0x16, 0xc5, - 0x74, 0xb1, 0xc4, 0x28, 0x42, 0xb1, 0x8e, 0x3c, 0x4c, 0xb9, 0x65, 0xda, 0xae, 0x23, 0x47, 0xf2, - 0xd1, 0xc2, 0x22, 0x57, 0x8e, 0x80, 0xa8, 0x1c, 0x01, 0xf2, 0x49, 0x38, 0xaf, 0x44, 0x31, 0xfe, - 0x5e, 0x1b, 0x8f, 0xe2, 0xe7, 0x4f, 0x28, 0x37, 0x20, 0xe1, 0x76, 0x9c, 0x06, 0x35, 0xd4, 0x66, - 0x87, 0x6a, 0xf2, 0x6c, 0x5e, 0x2a, 0x2c, 0x94, 0xe5, 0x41, 0x3f, 0x97, 0x76, 0xd9, 0x89, 0x22, - 0x2a, 0xf0, 0x42, 0x80, 0x62, 0xfa, 0xa5, 0xb6, 0xdb, 0x60, 0x09, 0x59, 0x9e, 0x13, 0xd2, 0x2f, - 0xb5, 0xdd, 0x1d, 0xb5, 0x4b, 0x43, 0xe9, 0xd7, 0xc3, 0xc8, 0x2d, 0x58, 0xec, 0x39, 0xb4, 0xd1, - 0xea, 0xf4, 0x1c, 0x97, 0xda, 0xd5, 0x5d, 0x39, 0x86, 0x1a, 0x33, 0x83, 0x7e, 0x6e, 0xbd, 0xe7, - 0xd0, 0x2d, 0x1f, 0x17, 0x98, 0x93, 0x22, 0xfe, 0x7d, 0x85, 0x98, 0xe2, 0xc2, 0x62, 0xe8, 0x8a, - 0x93, 0xeb, 0x13, 0x3e, 0xb9, 0x47, 0x81, 0x9f, 0x9c, 0x8c, 0x7f, 0xf2, 0x73, 0x7f, 0x70, 0xe5, - 0xcf, 0x12, 0xa4, 0x46, 0xd3, 0x37, 0xe3, 0xc7, 0xbb, 0xec, 0x39, 0x88, 0xfc, 0x08, 0x88, 0xfc, - 0x08, 0x90, 0x1f, 0x00, 0x1c, 0x9a, 0xcd, 0x86, 0x43, 0xb1, 0x26, 0x46, 0x82, 0x8f, 0x72, 0x68, - 0x36, 0xeb, 0x74, 0xa4, 0x26, 0xfa, 0x18, 0xd1, 0x60, 0x85, 0x71, 0xd9, 0x5c, 0x5f, 0x83, 0x11, - 0xf8, 0xc1, 0x76, 0x71, 0x6a, 0x45, 0xe1, 0xf9, 0xe7, 0xd0, 0x6c, 0x0a, 0x58, 0x28, 0xff, 0x8c, - 0x6c, 0x29, 0xff, 0xe6, 0xbe, 0x6d, 0xa9, 0x46, 0x8b, 0x76, 0x7c, 0xdf, 0x36, 0x20, 0xc6, 0x54, - 0xeb, 0x9a, 0xe8, 0xdc, 0xa1, 0xd9, 0x0c, 0x59, 0x3a, 0x87, 0xc0, 0x73, 0x3a, 0x37, 0x3c, 0xbd, - 0xe8, 0xa9, 0xa7, 0xf7, 0x26, 0xcc, 0x73, 0x63, 0x78, 0x73, 0x10, 0xe7, 0x55, 0x1f, 0x95, 0x87, - 0xaa, 0x3e, 0x47, 0xc8, 0x1b, 0x10, 0xb3, 0xa9, 0xea, 0x98, 0x86, 0x17, 0xfd, 0x48, 0xcd, 0x11, - 0x91, 0x9a, 0x23, 0xca, 0xdf, 0x25, 0x58, 0xbd, 0x8f, 0x46, 0x85, 0x4f, 0x20, 0xec, 0x95, 0x74, - 0x5e, 0xaf, 0x22, 0xa7, 0x7a, 0x75, 0x0b, 0x62, 0xfb, 0x7a, 0xc7, 0xa5, 0x36, 0x9e, 0x40, 0x62, - 0x73, 0x65, 0xf8, 0x49, 0xa9, 0x7b, 0x07, 0x37, 0xb8, 0xe5, 0x9c, 0x48, 0xb4, 0x9c, 0x23, 0x82, - 0x9f, 0xb3, 0x67, 0xf0, 0xf3, 0x3d, 0x48, 0x8a, 0xb2, 0xc9, 0x8f, 0x20, 0xe6, 0xb8, 0xaa, 0x4b, - 0x1d, 0x59, 0xca, 0x47, 0x0b, 0x4b, 0x9b, 0x8b, 0x43, 0xf5, 0x0c, 0xe5, 0xc2, 0x38, 0x81, 0x28, - 0x8c, 0x23, 0xca, 0xb7, 0xcb, 0x10, 0xbd, 0x6f, 0x36, 0x49, 0x1e, 0x22, 0xc3, 0xc3, 0x49, 0x0d, - 0xfa, 0xb9, 0xa4, 0x2e, 0x1e, 0x4b, 0x44, 0xd7, 0xc2, 0xcd, 0xe0, 0xe2, 0x19, 0x9b, 0xc1, 0x97, - 0x1e, 0x51, 0xa1, 0xce, 0x76, 0xfe, 0xcc, 0x9d, 0x6d, 0x79, 0xd8, 0xa4, 0xf2, 0xc6, 0x25, 0xed, - 0x9f, 0xd9, 0x39, 0x7a, 0xd2, 0x0f, 0xc3, 0xb5, 0x03, 0xc2, 0xd7, 0xf9, 0xf9, 0x2b, 0xc6, 0xf1, - 0x94, 0x0e, 0x34, 0x81, 0x0a, 0xf2, 0x43, 0x05, 0x2f, 0xba, 0xe1, 0xbc, 0x0a, 0x73, 0xe6, 0x63, - 0x83, 0xda, 0x5e, 0xa7, 0x8f, 0xa7, 0x8e, 0x80, 0x78, 0xea, 0x08, 0x10, 0x0a, 0x97, 0x78, 0xd3, - 0x84, 0x4b, 0xe7, 0x40, 0xb7, 0x1a, 0x3d, 0x87, 0xda, 0x8d, 0xb6, 0x6d, 0xf6, 0x2c, 0x47, 0x5e, - 0xc6, 0xbb, 0x7d, 0x65, 0xd0, 0xcf, 0x29, 0x48, 0xf6, 0xbe, 0x4f, 0xb5, 0xe7, 0x50, 0xfb, 0x2e, - 0xd2, 0x08, 0x32, 0xe5, 0x69, 0x34, 0xe4, 0x57, 0x12, 0x5c, 0x69, 0x99, 0x5d, 0x8b, 0xd5, 0x61, - 0xaa, 0x35, 0x4e, 0x52, 0xb9, 0x9a, 0x97, 0x0a, 0xc9, 0xf2, 0x5b, 0x83, 0x7e, 0xee, 0x8d, 0x80, - 0xe3, 0x83, 0xd3, 0x95, 0x2b, 0xa7, 0x53, 0x87, 0x26, 0xae, 0xd9, 0x33, 0x4e, 0x5c, 0x62, 0xf7, - 0x3e, 0xf7, 0xc2, 0xbb, 0xf7, 0xe4, 0x8b, 0xe8, 0xde, 0x7f, 0x27, 0x41, 0xde, 0xeb, 0x83, 0x75, - 0xa3, 0xdd, 0xb0, 0xa9, 0x63, 0xf6, 0xec, 0x16, 0x6d, 0x78, 0xa1, 0xd1, 0xa5, 0x86, 0xeb, 0xc8, - 0x6b, 0x68, 0x7b, 0x61, 0x92, 0xa6, 0x9a, 0xc7, 0x50, 0x13, 0xe8, 0xcb, 0x57, 0xbe, 0xea, 0xe7, - 0x66, 0x06, 0xfd, 0x5c, 0x36, 0x90, 0x3c, 0x89, 0xae, 0x76, 0xca, 0x3e, 0xa9, 0xc2, 0x7c, 0xcb, - 0xa6, 0x6c, 0xe4, 0xc6, 0x06, 0x26, 0xb1, 0x99, 0x29, 0xf2, 0x99, 0xbb, 0xe8, 0x0f, 0xd3, 0xc5, - 0x47, 0xfe, 0x88, 0x5f, 0x5e, 0xf5, 0x94, 0xfa, 0x2c, 0x5f, 0x7c, 0x9b, 0x93, 0x6a, 0xfe, 0x42, - 0x9c, 0x52, 0x96, 0x5e, 0xc8, 0x94, 0x92, 0xfa, 0x0e, 0x53, 0xca, 0x4f, 0x21, 0x71, 0x74, 0xdd, - 0x69, 0xf8, 0x06, 0xad, 0xa0, 0xa8, 0x57, 0xc5, 0xe3, 0x0d, 0x5e, 0x1e, 0xd8, 0x21, 0x7b, 0x56, - 0xf2, 0x8e, 0xf1, 0xe8, 0xba, 0x53, 0x1d, 0x33, 0x11, 0x02, 0x94, 0xa5, 0x24, 0x26, 0xdd, 0xd3, - 0x26, 0x93, 0xe9, 0x61, 0xe2, 0xd9, 0x3d, 0x94, 0xeb, 0xad, 0x47, 0xe4, 0x7a, 0x68, 0x78, 0xb6, - 0x4a, 0x7f, 0xb7, 0xd9, 0x6a, 0xfd, 0x7f, 0xb3, 0xd5, 0xf7, 0x3a, 0x5b, 0xfd, 0x43, 0x82, 0xf5, - 0xfb, 0xac, 0x53, 0xf4, 0x72, 0x93, 0xfe, 0x73, 0xea, 0x77, 0x46, 0x42, 0x3b, 0x26, 0x9d, 0xa1, - 0x1d, 0x7b, 0xe9, 0xc5, 0xfc, 0x5d, 0x48, 0x1a, 0xf4, 0x71, 0x63, 0x24, 0xd9, 0x62, 0xdd, 0x34, - 0xe8, 0xe3, 0xdd, 0xf1, 0x7c, 0x9b, 0x10, 0x60, 0xe5, 0x0f, 0x11, 0xb8, 0x30, 0xe6, 0xa8, 0x63, - 0x99, 0x86, 0x43, 0xc9, 0xef, 0x25, 0x90, 0xed, 0x60, 0x03, 0x3f, 0x31, 0xcb, 0x78, 0xbd, 0x8e, - 0xcb, 0x7d, 0x4f, 0x6c, 0xde, 0xf0, 0x0b, 0xeb, 0x24, 0x01, 0xc5, 0xda, 0x08, 0x73, 0x8d, 0xf3, - 0xf2, 0x8a, 0xfb, 0xfa, 0xa0, 0x9f, 0x7b, 0xd5, 0x9e, 0x4c, 0x21, 0x58, 0x7b, 0x61, 0x0a, 0x49, - 0xc6, 0x86, 0xcb, 0x27, 0xc9, 0x7f, 0x29, 0x61, 0x61, 0xc0, 0x9a, 0x30, 0x74, 0x70, 0x2f, 0xf1, - 0x7d, 0xf1, 0x3c, 0x03, 0xc3, 0x55, 0x98, 0xa3, 0xb6, 0x6d, 0xda, 0xa2, 0x4e, 0x04, 0x44, 0x52, - 0x04, 0x94, 0xcf, 0x60, 0x65, 0x4c, 0x1f, 0x39, 0x00, 0xc2, 0xe7, 0x22, 0xbe, 0xf6, 0x06, 0x23, - 0xfe, 0x3d, 0x32, 0xa3, 0x83, 0x51, 0x60, 0x63, 0x39, 0x3b, 0xe8, 0xe7, 0x32, 0x38, 0xfe, 0x04, - 0xa0, 0x78, 0xd2, 0xa9, 0xd1, 0x3d, 0xe5, 0xf3, 0x18, 0xcc, 0x61, 0x81, 0x27, 0x57, 0x60, 0x16, - 0x07, 0x6a, 0xee, 0x1d, 0x0e, 0x95, 0x46, 0x78, 0x98, 0xc6, 0x7d, 0x52, 0x81, 0x65, 0x3f, 0x10, - 0x1b, 0xfb, 0x6a, 0xcb, 0xf5, 0xbc, 0x94, 0xca, 0x97, 0x07, 0xfd, 0x9c, 0xec, 0x6f, 0xdd, 0xc1, - 0x1d, 0x81, 0x79, 0x29, 0xbc, 0xc3, 0xe6, 0x7f, 0xec, 0x53, 0x78, 0xdb, 0x82, 0x43, 0x5f, 0x9c, - 0x67, 0x5d, 0x06, 0xf3, 0x76, 0x43, 0xcc, 0xba, 0x01, 0xca, 0xae, 0x03, 0x76, 0x37, 0x3e, 0x2f, - 0x1f, 0x99, 0xf0, 0x3a, 0x20, 0x3e, 0xc6, 0x9c, 0x10, 0x60, 0x42, 0x61, 0x79, 0x58, 0xd2, 0x3b, - 0x7a, 0x57, 0x77, 0xfd, 0x67, 0xd3, 0x2c, 0x1e, 0x2c, 0x1e, 0xc6, 0xb0, 0x86, 0x3f, 0x40, 0x02, - 0x1e, 0xcd, 0xe8, 0x9f, 0x1d, 0xda, 0x10, 0xfd, 0x0b, 0xef, 0x90, 0x3a, 0x24, 0x2c, 0x6a, 0x77, - 0x75, 0xc7, 0xc1, 0x2e, 0x98, 0x3f, 0x93, 0xae, 0x0b, 0x2a, 0x76, 0x83, 0x5d, 0x6e, 0xbb, 0x40, - 0x2e, 0xda, 0x2e, 0xc0, 0x99, 0x7f, 0x4a, 0x90, 0x10, 0xf8, 0x48, 0x0d, 0x16, 0x9c, 0x5e, 0xf3, - 0x90, 0xb6, 0x86, 0xb7, 0x35, 0x3b, 0x59, 0x43, 0xb1, 0xce, 0xc9, 0xbc, 0x4a, 0xec, 0xf1, 0x84, - 0x2a, 0xb1, 0x87, 0xe1, 0x7d, 0xa1, 0x76, 0x93, 0x3f, 0x1a, 0xf8, 0xf7, 0x85, 0x01, 0xa1, 0xfb, - 0xc2, 0x80, 0xcc, 0xc7, 0x30, 0xef, 0xc9, 0x65, 0xd1, 0x73, 0xa4, 0x1b, 0x9a, 0x18, 0x3d, 0x6c, - 0x2d, 0x46, 0x0f, 0x5b, 0x0f, 0xa3, 0x2c, 0x72, 0x72, 0x94, 0x65, 0x74, 0x58, 0x9d, 0xf0, 0x0d, - 0x9e, 0xe3, 0xc6, 0x4b, 0xa7, 0xde, 0xf8, 0x0a, 0xc4, 0xf1, 0xbc, 0x1e, 0xe8, 0x8e, 0x4b, 0xae, - 0x43, 0x0c, 0x73, 0xae, 0x7f, 0x9e, 0x10, 0x9c, 0x27, 0xaf, 0x02, 0x7c, 0x57, 0xac, 0x02, 0x1c, - 0x51, 0xf6, 0x80, 0xf0, 0xf9, 0xba, 0x23, 0x24, 0x2a, 0x72, 0x0b, 0x16, 0x5b, 0x1c, 0xa5, 0x9a, - 0x50, 0x50, 0xf0, 0xd9, 0x69, 0xb8, 0x11, 0x2e, 0x2b, 0x49, 0x11, 0x57, 0x6e, 0xc0, 0x32, 0x6a, - 0xbf, 0x4b, 0x87, 0xcf, 0x32, 0x67, 0xbc, 0xa9, 0xca, 0x2d, 0x90, 0xeb, 0xae, 0x4d, 0xd5, 0xae, - 0x6e, 0xb4, 0x47, 0x65, 0xbc, 0x06, 0x51, 0xa3, 0xd7, 0x45, 0x11, 0x8b, 0xfc, 0x20, 0x8d, 0x5e, - 0x57, 0x3c, 0x48, 0xa3, 0xd7, 0x55, 0xde, 0x05, 0x82, 0x7c, 0xdb, 0xb4, 0x43, 0x5d, 0x7a, 0x5e, - 0xf5, 0x5f, 0x4a, 0x00, 0x7c, 0x20, 0xaf, 0x1a, 0xfb, 0xe6, 0x99, 0xf3, 0xcb, 0x0d, 0x48, 0xe0, - 0x89, 0x6a, 0x8d, 0x43, 0x13, 0xa3, 0x50, 0x2a, 0xcc, 0xf1, 0xc4, 0xc0, 0xe1, 0xfb, 0x66, 0x28, - 0x14, 0x21, 0x40, 0x19, 0x6b, 0x87, 0xaa, 0x8e, 0xcf, 0x1a, 0x0d, 0x58, 0x39, 0x3c, 0xca, 0x1a, - 0xa0, 0xca, 0x63, 0x58, 0x45, 0x57, 0xf7, 0x2c, 0x4d, 0x75, 0x83, 0xfa, 0xf8, 0x8e, 0xf8, 0x02, - 0x16, 0x8e, 0x86, 0x93, 0x0a, 0xf6, 0x39, 0xf2, 0x7f, 0x0f, 0xe4, 0xb2, 0xea, 0xb6, 0x0e, 0x26, - 0x69, 0xff, 0x18, 0x16, 0xf7, 0x55, 0xbd, 0xe3, 0x8f, 0x78, 0x7e, 0x4c, 0xca, 0x81, 0x15, 0x61, - 0x06, 0x1e, 0x56, 0x9c, 0xe5, 0x83, 0xd1, 0x38, 0x4d, 0x8a, 0xf8, 0xd0, 0xdf, 0x2d, 0x1c, 0x06, - 0xfe, 0x5b, 0xfe, 0x8e, 0x68, 0x3f, 0xdd, 0xdf, 0x30, 0xc3, 0x39, 0xfc, 0x4d, 0x40, 0xbc, 0x62, - 0x68, 0x0f, 0x55, 0xfb, 0x88, 0xda, 0xca, 0x17, 0x12, 0xac, 0x85, 0x6f, 0xc6, 0x43, 0xea, 0x38, - 0x6a, 0x9b, 0x92, 0x1f, 0x9e, 0xcf, 0xff, 0x7b, 0x33, 0xc1, 0x7b, 0x4b, 0x94, 0x1a, 0x9a, 0xf7, - 0x0f, 0xa9, 0x25, 0x64, 0x1b, 0xea, 0xe3, 0xf7, 0x8b, 0x8a, 0xd9, 0xf0, 0xde, 0x4c, 0x8d, 0xd1, - 0x97, 0xe7, 0x61, 0x8e, 0x1e, 0x53, 0xc3, 0xdd, 0xc8, 0x40, 0x42, 0x78, 0xc6, 0x27, 0x09, 0x98, - 0xf7, 0x96, 0xa9, 0x99, 0x8d, 0xab, 0x90, 0x10, 0xde, 0x7b, 0x49, 0x12, 0x16, 0x58, 0x7f, 0xbc, - 0x6b, 0xda, 0x6e, 0x6a, 0x86, 0xad, 0xee, 0x51, 0x55, 0xeb, 0x30, 0x52, 0x69, 0xe3, 0x37, 0x12, - 0x2c, 0xf8, 0x2f, 0x5c, 0x04, 0x20, 0xf6, 0xc1, 0x5e, 0x65, 0xaf, 0xb2, 0x9d, 0x9a, 0x61, 0x02, - 0x77, 0x2b, 0x3b, 0xdb, 0xd5, 0x9d, 0xbb, 0x29, 0x89, 0x2d, 0x6a, 0x7b, 0x3b, 0x3b, 0x6c, 0x11, - 0x21, 0x8b, 0x10, 0xaf, 0xef, 0x6d, 0x6d, 0x55, 0x2a, 0xdb, 0x95, 0xed, 0x54, 0x94, 0x31, 0xdd, - 0xb9, 0x5d, 0x7d, 0x50, 0xd9, 0x4e, 0xcd, 0x32, 0xba, 0xbd, 0x9d, 0xf7, 0x76, 0xde, 0xff, 0x68, - 0x27, 0x35, 0xc7, 0xe9, 0xca, 0x0f, 0xab, 0x8f, 0x1e, 0x55, 0xb6, 0x53, 0x31, 0x46, 0xf7, 0xa0, - 0x72, 0xbb, 0x5e, 0xd9, 0x4e, 0xcd, 0xb3, 0xad, 0xdd, 0x5a, 0xa5, 0xf2, 0x70, 0x97, 0x6d, 0x2d, - 0xb0, 0xe5, 0xd6, 0xed, 0x9d, 0xad, 0xca, 0x03, 0x26, 0x25, 0xbe, 0xf9, 0xc7, 0x05, 0x88, 0xf1, - 0x5e, 0x85, 0x7c, 0x08, 0xc0, 0xff, 0xc2, 0xeb, 0xba, 0x36, 0xf1, 0x89, 0x37, 0xb3, 0x3e, 0xb9, - 0xc1, 0x51, 0x2e, 0xfe, 0xf2, 0x4f, 0x7f, 0xfb, 0x6d, 0x64, 0x55, 0x59, 0x2a, 0x1d, 0x5f, 0x2b, - 0x1d, 0x9a, 0x4d, 0xef, 0x7f, 0xe1, 0x37, 0xa5, 0x0d, 0xf2, 0x11, 0x00, 0xcf, 0xbd, 0x61, 0xb9, - 0xa1, 0xf7, 0xce, 0xcc, 0x05, 0x84, 0xc7, 0x73, 0xf4, 0xb8, 0x60, 0x9e, 0x80, 0x99, 0xe0, 0x9f, - 0x41, 0x72, 0x28, 0xb8, 0x4e, 0x5d, 0x22, 0x0b, 0x4f, 0x98, 0x61, 0xe9, 0xeb, 0x63, 0x73, 0x78, - 0x85, 0x7d, 0x68, 0xe5, 0x32, 0x0a, 0x5f, 0x57, 0x56, 0x3c, 0xe1, 0x0e, 0x75, 0x05, 0xf9, 0x06, - 0xa4, 0xc4, 0xb6, 0x1a, 0xcd, 0xbf, 0x34, 0xb9, 0xe1, 0xe6, 0x6a, 0x2e, 0x9f, 0xd4, 0x8d, 0x2b, - 0x39, 0x54, 0x76, 0x51, 0x49, 0xfb, 0x9e, 0x08, 0x9d, 0x35, 0x65, 0xfa, 0xee, 0x42, 0x82, 0x5f, - 0x21, 0xde, 0xf3, 0x09, 0xf1, 0x3d, 0xd5, 0x81, 0x34, 0xca, 0x5c, 0x52, 0xe2, 0x4c, 0x26, 0x06, - 0x3b, 0x13, 0xd4, 0x82, 0xa4, 0x20, 0xc8, 0x21, 0x4b, 0x81, 0x24, 0x56, 0x47, 0x33, 0xaf, 0xe0, - 0x7a, 0xda, 0x4d, 0x57, 0xfe, 0x0f, 0x85, 0x66, 0x95, 0x8b, 0x4c, 0x68, 0x93, 0x51, 0x51, 0xad, - 0xc4, 0x1f, 0x26, 0xbc, 0xbb, 0xcf, 0x94, 0xec, 0x40, 0x82, 0x27, 0xb8, 0xb3, 0x5b, 0x7b, 0x09, - 0x05, 0xaf, 0x65, 0x52, 0x43, 0x6b, 0x4b, 0xbf, 0x60, 0x65, 0xe5, 0x33, 0xcf, 0x68, 0x41, 0xde, - 0xe9, 0x46, 0x87, 0xb3, 0xab, 0x6f, 0x74, 0x26, 0x64, 0x74, 0x0f, 0x69, 0x04, 0xa3, 0x7f, 0x0c, - 0x09, 0x5e, 0x2f, 0xb9, 0xd1, 0x17, 0x02, 0x1d, 0xa1, 0x32, 0x3a, 0xd5, 0x03, 0x19, 0xb5, 0x90, - 0x8d, 0x31, 0x0f, 0xc8, 0x1d, 0x58, 0xb8, 0x4b, 0x5d, 0x2e, 0x36, 0x1d, 0x88, 0x0d, 0xaa, 0x7a, - 0x46, 0x38, 0x21, 0x5f, 0x0e, 0x19, 0x97, 0xa3, 0x41, 0xdc, 0x97, 0xe3, 0x10, 0xee, 0xf3, 0xb4, - 0x3e, 0x21, 0x93, 0x99, 0xb0, 0xed, 0x25, 0x4b, 0x25, 0x83, 0x1a, 0xd2, 0x84, 0x88, 0xe7, 0xc1, - 0x0f, 0xe2, 0x2d, 0x89, 0xdc, 0x84, 0xd8, 0x3d, 0xfc, 0xe1, 0x06, 0x99, 0xe2, 0x69, 0x86, 0x5f, - 0x26, 0x4e, 0xb4, 0x75, 0x40, 0x5b, 0x47, 0xc3, 0xbc, 0xfe, 0xc9, 0x37, 0x7f, 0xcd, 0xce, 0x7c, - 0xfe, 0x34, 0x2b, 0x7d, 0xf5, 0x34, 0x2b, 0x7d, 0xfd, 0x34, 0x2b, 0xfd, 0xe5, 0x69, 0x56, 0xfa, - 0xe2, 0x59, 0x76, 0xe6, 0xeb, 0x67, 0xd9, 0x99, 0x6f, 0x9e, 0x65, 0x67, 0x7e, 0xf2, 0xff, 0xc2, - 0x6f, 0x49, 0x54, 0xbb, 0xab, 0x6a, 0xaa, 0x65, 0x9b, 0xac, 0x13, 0xf5, 0x56, 0x25, 0xef, 0xc7, - 0x23, 0x5f, 0x46, 0xd2, 0xb7, 0x11, 0xd8, 0xe5, 0xdb, 0xc5, 0xaa, 0x59, 0xbc, 0x6d, 0xe9, 0xcd, - 0x18, 0xda, 0xf2, 0xf6, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x60, 0xca, 0x90, 0x21, 0x59, 0x23, - 0x00, 0x00, + // 2878 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4b, 0x6f, 0x1b, 0xd7, + 0xf5, 0xd7, 0x90, 0x12, 0x25, 0x1e, 0xea, 0x41, 0x5d, 0xbd, 0x46, 0xb4, 0x43, 0xca, 0x93, 0xc4, + 0x7f, 0x59, 0x48, 0xa8, 0x58, 0xf9, 0x07, 0xb5, 0xdd, 0x14, 0x86, 0x28, 0xd1, 0xb6, 0x1c, 0x9b, + 0x56, 0x28, 0x2b, 0x69, 0x8a, 0xa2, 0xcc, 0x90, 0x73, 0x45, 0x8d, 0xc4, 0x99, 0x61, 0x66, 0x86, + 0x72, 0xd5, 0x22, 0x40, 0x51, 0x14, 0xed, 0x36, 0x45, 0x77, 0x2d, 0x90, 0x0f, 0x90, 0x2e, 0xda, + 0x45, 0xd1, 0xcf, 0x90, 0x65, 0x80, 0xa2, 0x40, 0xba, 0x61, 0x5a, 0xa7, 0x0f, 0x80, 0xbb, 0xee, + 0xbb, 0x28, 0xee, 0xb9, 0x33, 0x9c, 0x3b, 0x7c, 0x88, 0x94, 0x63, 0x25, 0x9b, 0xee, 0x74, 0x7f, + 0xf7, 0xbc, 0xe7, 0xdc, 0x73, 0xcf, 0xb9, 0x14, 0xcc, 0xd7, 0x8f, 0xab, 0xeb, 0x6a, 0x5d, 0x5f, + 0x77, 0x1a, 0x65, 0x43, 0x77, 0xb3, 0x75, 0xdb, 0x72, 0x2d, 0x12, 0x55, 0xeb, 0x7a, 0xea, 0x52, + 0xd5, 0xb2, 0xaa, 0x35, 0xba, 0x8e, 0x50, 0xb9, 0x71, 0xb0, 0x4e, 0x8d, 0xba, 0x7b, 0xca, 0x29, + 0x52, 0x99, 0xce, 0x4d, 0x57, 0x37, 0xa8, 0xe3, 0xaa, 0x46, 0xdd, 0x23, 0x50, 0x8e, 0x6f, 0x38, + 0x59, 0xdd, 0x42, 0xd9, 0x15, 0xcb, 0xa6, 0xeb, 0x27, 0xd7, 0xd7, 0xab, 0xd4, 0xa4, 0xb6, 0xea, + 0x52, 0xcd, 0xa3, 0x59, 0x15, 0x68, 0x4c, 0xea, 0x3e, 0xb1, 0xec, 0x63, 0xdd, 0xac, 0xf6, 0xa2, + 0xbc, 0xec, 0xa9, 0x63, 0x94, 0xaa, 0x69, 0x5a, 0xae, 0xea, 0xea, 0x96, 0xe9, 0x78, 0xbb, 0xaf, + 0x56, 0x75, 0xf7, 0xb0, 0x51, 0xce, 0x56, 0x2c, 0x63, 0xbd, 0x6a, 0x55, 0xad, 0xc0, 0x2a, 0xb6, + 0xc2, 0x05, 0xfe, 0xe5, 0x91, 0xb7, 0x7d, 0x3e, 0xa4, 0x6a, 0xcd, 0x3d, 0xe4, 0xa8, 0xd2, 0x8a, + 0xc3, 0xfc, 0x7d, 0xab, 0xbc, 0x87, 0x71, 0x28, 0xd2, 0x0f, 0x1a, 0xd4, 0x71, 0x77, 0x5c, 0x6a, + 0x90, 0x0d, 0x98, 0xa8, 0xdb, 0xba, 0x65, 0xeb, 0xee, 0xa9, 0x2c, 0xad, 0x48, 0xab, 0x52, 0x6e, + 0xb1, 0xd5, 0xcc, 0x10, 0x1f, 0x7b, 0xc5, 0x32, 0x74, 0x17, 0x43, 0x53, 0x6c, 0xd3, 0x91, 0x37, + 0x20, 0x6e, 0xaa, 0x06, 0x75, 0xea, 0x6a, 0x85, 0xca, 0xd1, 0x15, 0x69, 0x35, 0x9e, 0x5b, 0x6a, + 0x35, 0x33, 0x73, 0x6d, 0x50, 0xe0, 0x0a, 0x28, 0xc9, 0xeb, 0x10, 0xaf, 0xd4, 0x74, 0x6a, 0xba, + 0x25, 0x5d, 0x93, 0x27, 0x90, 0x0d, 0x75, 0x71, 0x70, 0x47, 0x13, 0x75, 0xf9, 0x18, 0xd9, 0x83, + 0x58, 0x4d, 0x2d, 0xd3, 0x9a, 0x23, 0x8f, 0xae, 0x44, 0x57, 0x13, 0x1b, 0x2f, 0x67, 0xd5, 0xba, + 0x9e, 0xed, 0xe5, 0x4a, 0xf6, 0x01, 0xd2, 0xe5, 0x4d, 0xd7, 0x3e, 0xcd, 0xcd, 0xb7, 0x9a, 0x99, + 0x24, 0x67, 0x14, 0xc4, 0x7a, 0xa2, 0x48, 0x15, 0x12, 0x42, 0x9c, 0xe5, 0x31, 0x94, 0xbc, 0xd6, + 0x5f, 0xf2, 0x66, 0x40, 0xcc, 0xc5, 0x2f, 0xb7, 0x9a, 0x99, 0x05, 0x41, 0x84, 0xa0, 0x43, 0x94, + 0x4c, 0x7e, 0x21, 0xc1, 0xbc, 0x4d, 0x3f, 0x68, 0xe8, 0x36, 0xd5, 0x4a, 0xa6, 0xa5, 0xd1, 0x92, + 0xe7, 0x4c, 0x0c, 0x55, 0x5e, 0xef, 0xaf, 0xb2, 0xe8, 0x71, 0x15, 0x2c, 0x8d, 0x8a, 0x8e, 0x29, + 0xad, 0x66, 0xe6, 0xb2, 0xdd, 0xb5, 0x19, 0x18, 0x20, 0x4b, 0x45, 0xd2, 0xbd, 0x4f, 0x1e, 0xc1, + 0x44, 0xdd, 0xd2, 0x4a, 0x4e, 0x9d, 0x56, 0xe4, 0xc8, 0x8a, 0xb4, 0x9a, 0xd8, 0xb8, 0x94, 0xe5, + 0x09, 0x8a, 0x36, 0xb0, 0x24, 0xce, 0x9e, 0x5c, 0xcf, 0xee, 0x5a, 0xda, 0x5e, 0x9d, 0x56, 0xf0, + 0x7b, 0xce, 0xd6, 0xf9, 0x22, 0x24, 0x7b, 0xdc, 0x03, 0xc9, 0x2e, 0xc4, 0x7d, 0x81, 0x8e, 0x3c, + 0x8e, 0xee, 0x9c, 0x29, 0x91, 0xa7, 0x15, 0x5f, 0x38, 0xa1, 0xb4, 0xf2, 0x30, 0xb2, 0x05, 0xe3, + 0xba, 0x59, 0xb5, 0xa9, 0xe3, 0xc8, 0x71, 0x94, 0x47, 0x50, 0xd0, 0x0e, 0xc7, 0xb6, 0x2c, 0xf3, + 0x40, 0xaf, 0xe6, 0x16, 0x98, 0x61, 0x1e, 0x99, 0x20, 0xc5, 0xe7, 0x24, 0x77, 0x60, 0xc2, 0xa1, + 0xf6, 0x89, 0x5e, 0xa1, 0x8e, 0x0c, 0x82, 0x94, 0x3d, 0x0e, 0x7a, 0x52, 0xd0, 0x18, 0x9f, 0x4e, + 0x34, 0xc6, 0xc7, 0x58, 0x8e, 0x3b, 0x95, 0x43, 0xaa, 0x35, 0x6a, 0xd4, 0x96, 0x13, 0x41, 0x8e, + 0xb7, 0x41, 0x31, 0xc7, 0xdb, 0x20, 0xd9, 0x81, 0xd9, 0x0f, 0x1a, 0xb4, 0x41, 0x4b, 0xae, 0x5b, + 0x2b, 0x39, 0xb4, 0x62, 0x99, 0x9a, 0x23, 0x4f, 0xae, 0x48, 0xab, 0xd1, 0xdc, 0x0b, 0xad, 0x66, + 0x66, 0x19, 0x37, 0x1f, 0xbb, 0xb5, 0x3d, 0xbe, 0x25, 0x08, 0x99, 0xe9, 0xd8, 0x4a, 0xa9, 0x90, + 0x10, 0x3e, 0x3c, 0x79, 0x11, 0xa2, 0xc7, 0x94, 0x9f, 0xd1, 0x78, 0x6e, 0xb6, 0xd5, 0xcc, 0x4c, + 0x1d, 0x53, 0xf1, 0x78, 0xb2, 0x5d, 0x72, 0x0d, 0xc6, 0x4e, 0xd4, 0x5a, 0x83, 0xe2, 0x27, 0x8e, + 0xe7, 0xe6, 0x5a, 0xcd, 0xcc, 0x0c, 0x02, 0x02, 0x21, 0xa7, 0xb8, 0x15, 0xb9, 0x21, 0xa5, 0x0e, + 0x20, 0xd9, 0x99, 0xda, 0x17, 0xa2, 0xc7, 0x80, 0xa5, 0x3e, 0xf9, 0x7c, 0x11, 0xea, 0x94, 0x7f, + 0x47, 0x61, 0x2a, 0x94, 0x35, 0xe4, 0x16, 0x8c, 0xba, 0xa7, 0x75, 0x8a, 0x6a, 0xa6, 0x37, 0x92, + 0x62, 0x5e, 0x3d, 0x3e, 0xad, 0x53, 0x2c, 0x17, 0xd3, 0x8c, 0x22, 0x94, 0xeb, 0xc8, 0xc3, 0x94, + 0xd7, 0x2d, 0xdb, 0x75, 0xe4, 0xc8, 0x4a, 0x74, 0x75, 0x8a, 0x2b, 0x47, 0x40, 0x54, 0x8e, 0x00, + 0x79, 0x3f, 0x5c, 0x57, 0xa2, 0x98, 0x7f, 0x2f, 0x76, 0x67, 0xf1, 0xb3, 0x17, 0x94, 0x9b, 0x90, + 0x70, 0x6b, 0x4e, 0x89, 0x9a, 0x6a, 0xb9, 0x46, 0x35, 0x79, 0x74, 0x45, 0x5a, 0x9d, 0xc8, 0xc9, + 0xad, 0x66, 0x66, 0xde, 0x65, 0x11, 0x45, 0x54, 0xe0, 0x85, 0x00, 0xc5, 0xf2, 0x4b, 0x6d, 0xb7, + 0xc4, 0x0a, 0xb2, 0x3c, 0x26, 0x94, 0x5f, 0x6a, 0xbb, 0x05, 0xd5, 0xa0, 0xa1, 0xf2, 0xeb, 0x61, + 0xe4, 0x36, 0x4c, 0x35, 0x1c, 0x5a, 0xaa, 0xd4, 0x1a, 0x8e, 0x4b, 0xed, 0x9d, 0x5d, 0x39, 0x86, + 0x1a, 0x53, 0xad, 0x66, 0x66, 0xb1, 0xe1, 0xd0, 0x2d, 0x1f, 0x17, 0x98, 0x27, 0x45, 0xfc, 0xeb, + 0x4a, 0x31, 0xc5, 0x85, 0xa9, 0xd0, 0x11, 0x27, 0x37, 0x7a, 0x7c, 0x72, 0x8f, 0x02, 0x3f, 0x39, + 0xe9, 0xfe, 0xe4, 0xe7, 0xfe, 0xe0, 0xca, 0x5f, 0x24, 0x48, 0x76, 0x96, 0x6f, 0xc6, 0x8f, 0x67, + 0xd9, 0x73, 0x10, 0xf9, 0x11, 0x10, 0xf9, 0x11, 0x20, 0xff, 0x0f, 0x70, 0x64, 0x95, 0x4b, 0x0e, + 0xc5, 0x3b, 0x31, 0x12, 0x7c, 0x94, 0x23, 0xab, 0xbc, 0x47, 0x3b, 0xee, 0x44, 0x1f, 0x23, 0x1a, + 0xcc, 0x32, 0x2e, 0x9b, 0xeb, 0x2b, 0x31, 0x02, 0x3f, 0xd9, 0x96, 0xfb, 0xde, 0x28, 0xbc, 0xfe, + 0x1c, 0x59, 0x65, 0x01, 0x0b, 0xd5, 0x9f, 0x8e, 0x2d, 0xe5, 0x3f, 0xdc, 0xb7, 0x2d, 0xd5, 0xac, + 0xd0, 0x9a, 0xef, 0xdb, 0x1a, 0xc4, 0x98, 0x6a, 0x5d, 0x13, 0x9d, 0x3b, 0xb2, 0xca, 0x21, 0x4b, + 0xc7, 0x10, 0x78, 0x46, 0xe7, 0xda, 0xd1, 0x8b, 0x0e, 0x8c, 0xde, 0xab, 0x30, 0xce, 0x8d, 0xe1, + 0xcd, 0x41, 0x9c, 0xdf, 0xfa, 0xa8, 0x3c, 0x74, 0xeb, 0x73, 0x84, 0xbc, 0x02, 0x31, 0x9b, 0xaa, + 0x8e, 0x65, 0x7a, 0xd9, 0x8f, 0xd4, 0x1c, 0x11, 0xa9, 0x39, 0xa2, 0xfc, 0x43, 0x82, 0xb9, 0xfb, + 0x68, 0x54, 0x38, 0x02, 0x61, 0xaf, 0xa4, 0xf3, 0x7a, 0x15, 0x19, 0xe8, 0xd5, 0x6d, 0x88, 0x1d, + 0xe8, 0x35, 0x97, 0xda, 0x18, 0x81, 0xc4, 0xc6, 0x6c, 0xfb, 0x93, 0x52, 0xf7, 0x0e, 0x6e, 0x70, + 0xcb, 0x39, 0x91, 0x68, 0x39, 0x47, 0x04, 0x3f, 0x47, 0x87, 0xf0, 0xf3, 0x2d, 0x98, 0x14, 0x65, + 0x93, 0x6f, 0x43, 0xcc, 0x71, 0x55, 0x97, 0x3a, 0xb2, 0xb4, 0x12, 0x5d, 0x9d, 0xde, 0x98, 0x6a, + 0xab, 0x67, 0x28, 0x17, 0xc6, 0x09, 0x44, 0x61, 0x1c, 0x51, 0xbe, 0x98, 0x81, 0xe8, 0x7d, 0xab, + 0x4c, 0x56, 0x20, 0xd2, 0x0e, 0x4e, 0xb2, 0xd5, 0xcc, 0x4c, 0xea, 0x62, 0x58, 0x22, 0xba, 0x16, + 0x6e, 0x06, 0xa7, 0x86, 0x6c, 0x06, 0x2f, 0x3c, 0xa3, 0x42, 0x9d, 0xed, 0xf8, 0xd0, 0x9d, 0x6d, + 0xae, 0xdd, 0xa4, 0xf2, 0xc6, 0x65, 0xde, 0x8f, 0xd9, 0x39, 0x7a, 0xd2, 0x77, 0xc2, 0x77, 0x07, + 0x84, 0x8f, 0xf3, 0xb3, 0xdf, 0x18, 0x27, 0x7d, 0x3a, 0xd0, 0x04, 0x2a, 0x58, 0x69, 0x2b, 0x78, + 0xde, 0x0d, 0xe7, 0x35, 0x18, 0xb3, 0x9e, 0x98, 0xd4, 0xf6, 0x3a, 0x7d, 0x8c, 0x3a, 0x02, 0x62, + 0xd4, 0x11, 0x20, 0x14, 0x2e, 0xf1, 0xa6, 0x09, 0x97, 0xce, 0xa1, 0x5e, 0x2f, 0x35, 0x1c, 0x6a, + 0x97, 0xaa, 0xb6, 0xd5, 0xa8, 0x3b, 0xf2, 0x0c, 0x9e, 0xed, 0xab, 0xad, 0x66, 0x46, 0x41, 0xb2, + 0x47, 0x3e, 0xd5, 0xbe, 0x43, 0xed, 0xbb, 0x48, 0x23, 0xc8, 0x94, 0xfb, 0xd1, 0x90, 0x9f, 0x49, + 0x70, 0xb5, 0x62, 0x19, 0x75, 0x76, 0x0f, 0x53, 0xad, 0x74, 0x96, 0xca, 0xb9, 0x15, 0x69, 0x75, + 0x32, 0xf7, 0x5a, 0xab, 0x99, 0x79, 0x25, 0xe0, 0x78, 0x7b, 0xb0, 0x72, 0x65, 0x30, 0x75, 0x68, + 0xe2, 0x1a, 0x1d, 0x72, 0xe2, 0x12, 0xbb, 0xf7, 0xb1, 0xe7, 0xde, 0xbd, 0x4f, 0x3e, 0x8f, 0xee, + 0xfd, 0xd7, 0x12, 0xac, 0x78, 0x7d, 0xb0, 0x6e, 0x56, 0x4b, 0x36, 0x75, 0xac, 0x86, 0x5d, 0xa1, + 0x25, 0x2f, 0x35, 0x0c, 0x6a, 0xba, 0x8e, 0xbc, 0x80, 0xb6, 0xaf, 0xf6, 0xd2, 0x54, 0xf4, 0x18, + 0x8a, 0x02, 0x7d, 0xee, 0xea, 0xa7, 0xcd, 0xcc, 0x48, 0xab, 0x99, 0x49, 0x07, 0x92, 0x7b, 0xd1, + 0x15, 0x07, 0xec, 0x93, 0x1d, 0x18, 0xaf, 0xd8, 0x94, 0x8d, 0xdc, 0xd8, 0xc0, 0x24, 0x36, 0x52, + 0x59, 0x3e, 0x73, 0x67, 0xfd, 0x61, 0x3a, 0xfb, 0xd8, 0x1f, 0xf1, 0x73, 0x73, 0x9e, 0x52, 0x9f, + 0xe5, 0xa3, 0x2f, 0x32, 0x52, 0xd1, 0x5f, 0x88, 0x53, 0xca, 0xf4, 0x73, 0x99, 0x52, 0x92, 0x5f, + 0x61, 0x4a, 0xf9, 0x3e, 0x24, 0x8e, 0x6f, 0x38, 0x25, 0xdf, 0xa0, 0x59, 0x14, 0x75, 0x45, 0x0c, + 0x6f, 0xf0, 0xf2, 0xc0, 0x82, 0xec, 0x59, 0xc9, 0x3b, 0xc6, 0xe3, 0x1b, 0xce, 0x4e, 0x97, 0x89, + 0x10, 0xa0, 0xac, 0x24, 0x31, 0xe9, 0x9e, 0x36, 0x99, 0xf4, 0x4f, 0x13, 0xcf, 0xee, 0xb6, 0x5c, + 0x6f, 0xdd, 0x21, 0xd7, 0x43, 0xc3, 0xb3, 0xd5, 0xfc, 0x57, 0x9b, 0xad, 0x16, 0xff, 0x37, 0x5b, + 0x7d, 0xad, 0xb3, 0xd5, 0x3f, 0x25, 0x58, 0xbc, 0xcf, 0x3a, 0x45, 0xaf, 0x36, 0xe9, 0x3f, 0xa2, + 0x7e, 0x67, 0x24, 0xb4, 0x63, 0xd2, 0x10, 0xed, 0xd8, 0x85, 0x5f, 0xe6, 0x6f, 0xc2, 0xa4, 0x49, + 0x9f, 0x94, 0x3a, 0x8a, 0x2d, 0xde, 0x9b, 0x26, 0x7d, 0xb2, 0xdb, 0x5d, 0x6f, 0x13, 0x02, 0xac, + 0xfc, 0x36, 0x02, 0x4b, 0x5d, 0x8e, 0x3a, 0x75, 0xcb, 0x74, 0x28, 0xf9, 0x8d, 0x04, 0xb2, 0x1d, + 0x6c, 0xe0, 0x27, 0x66, 0x15, 0xaf, 0x51, 0x73, 0xb9, 0xef, 0x89, 0x8d, 0x9b, 0xfe, 0xc5, 0xda, + 0x4b, 0x40, 0xb6, 0xd8, 0xc1, 0x5c, 0xe4, 0xbc, 0xfc, 0xc6, 0x7d, 0xb9, 0xd5, 0xcc, 0x5c, 0xb1, + 0x7b, 0x53, 0x08, 0xd6, 0x2e, 0xf5, 0x21, 0x49, 0xd9, 0x70, 0xf9, 0x2c, 0xf9, 0x17, 0x92, 0x16, + 0x26, 0x2c, 0x08, 0x43, 0x07, 0xf7, 0x12, 0xdf, 0x17, 0xcf, 0x33, 0x30, 0x5c, 0x83, 0x31, 0x6a, + 0xdb, 0x96, 0x2d, 0xea, 0x44, 0x40, 0x24, 0x45, 0x40, 0xf9, 0x10, 0x66, 0xbb, 0xf4, 0x91, 0x43, + 0x20, 0x7c, 0x2e, 0xe2, 0x6b, 0x6f, 0x30, 0xe2, 0xdf, 0x23, 0xd5, 0x39, 0x18, 0x05, 0x36, 0xe6, + 0xd2, 0xad, 0x66, 0x26, 0x85, 0xe3, 0x4f, 0x00, 0x8a, 0x91, 0x4e, 0x76, 0xee, 0x29, 0x7f, 0x9e, + 0x80, 0x31, 0xbc, 0xe0, 0xc9, 0x55, 0x18, 0xc5, 0x81, 0x9a, 0x7b, 0x87, 0x43, 0xa5, 0x19, 0x1e, + 0xa6, 0x71, 0x9f, 0xe4, 0x61, 0xc6, 0x4f, 0xc4, 0xd2, 0x81, 0x5a, 0x71, 0x3d, 0x2f, 0xa5, 0xdc, + 0xe5, 0x56, 0x33, 0x23, 0xfb, 0x5b, 0x77, 0x70, 0x47, 0x60, 0x9e, 0x0e, 0xef, 0xb0, 0xf9, 0x1f, + 0xfb, 0x14, 0xde, 0xb6, 0xe0, 0xd0, 0x17, 0xe7, 0x55, 0x97, 0xc1, 0xbc, 0xdd, 0x10, 0xab, 0x6e, + 0x80, 0xb2, 0xe3, 0x80, 0xdd, 0x8d, 0xcf, 0xcb, 0x47, 0x26, 0x3c, 0x0e, 0x88, 0x77, 0x31, 0x27, + 0x04, 0x98, 0x54, 0x61, 0xa6, 0x7d, 0xa5, 0xd7, 0x74, 0x43, 0x77, 0xfd, 0x67, 0xd3, 0x34, 0x06, + 0x16, 0x83, 0xd1, 0xbe, 0xc3, 0x1f, 0x20, 0x01, 0xcf, 0x66, 0x16, 0x5c, 0xd9, 0x0e, 0x6d, 0x84, + 0x5a, 0x92, 0xe9, 0xf0, 0x1e, 0xf9, 0x9d, 0x04, 0x57, 0x3b, 0x34, 0x95, 0xca, 0xa7, 0xed, 0x53, + 0x5c, 0xaa, 0xd4, 0x54, 0xc7, 0xe1, 0x8f, 0x18, 0xe3, 0xc2, 0x23, 0x6a, 0x2f, 0x03, 0x72, 0xa7, + 0xfe, 0x69, 0xde, 0x62, 0x4c, 0x05, 0xd5, 0xa0, 0xdc, 0xa6, 0x6b, 0xde, 0x0d, 0x7f, 0xc5, 0x1e, + 0x44, 0x5f, 0x1c, 0x4c, 0x42, 0xf6, 0x20, 0x51, 0xa7, 0xb6, 0xa1, 0x3b, 0x0e, 0x76, 0xee, 0xfc, + 0x69, 0x77, 0x51, 0xb0, 0x6a, 0x37, 0xd8, 0xe5, 0xf1, 0x16, 0xc8, 0xc5, 0x78, 0x0b, 0x70, 0xea, + 0x5f, 0x12, 0x24, 0x04, 0x3e, 0x52, 0x84, 0x09, 0xa7, 0x51, 0x3e, 0xa2, 0x95, 0x76, 0x85, 0x49, + 0xf7, 0xd6, 0x90, 0xdd, 0xe3, 0x64, 0x5e, 0xf7, 0xe0, 0xf1, 0x84, 0xba, 0x07, 0x0f, 0xc3, 0x33, + 0x4e, 0xed, 0x32, 0x7f, 0xe8, 0xf0, 0xcf, 0x38, 0x03, 0x42, 0x67, 0x9c, 0x01, 0xa9, 0xf7, 0x60, + 0xdc, 0x93, 0xcb, 0x32, 0xfe, 0x58, 0x37, 0x35, 0x31, 0xe3, 0xd9, 0x5a, 0xcc, 0x78, 0xb6, 0x6e, + 0x9f, 0x8c, 0xc8, 0xd9, 0x27, 0x23, 0xa5, 0xc3, 0x5c, 0x8f, 0xbc, 0x79, 0x86, 0x2a, 0x25, 0x0d, + 0xbc, 0x2b, 0x3f, 0x96, 0xe0, 0xea, 0x70, 0x29, 0x32, 0x9c, 0xfa, 0xb7, 0x44, 0xf5, 0xfe, 0x30, + 0x15, 0x12, 0xd8, 0xa1, 0x6d, 0x50, 0x19, 0xfd, 0xf9, 0x18, 0x5c, 0x3a, 0x83, 0x9f, 0x35, 0xd9, + 0xcb, 0x86, 0xfa, 0x43, 0xdd, 0x68, 0x18, 0x41, 0x87, 0x7d, 0x60, 0xab, 0x15, 0x56, 0xe4, 0xbd, + 0xbc, 0xf8, 0xce, 0x20, 0x2b, 0xb2, 0x0f, 0xb9, 0x04, 0x1f, 0xbd, 0xe3, 0xf1, 0xf3, 0xb3, 0x91, + 0xf1, 0xce, 0xc6, 0x92, 0xd1, 0x9b, 0xaa, 0xd8, 0x6f, 0x83, 0xfc, 0x41, 0x82, 0x2b, 0x7d, 0x8d, + 0xc3, 0x33, 0x6c, 0x59, 0x35, 0xcc, 0xb5, 0xc4, 0xc6, 0xd6, 0xb3, 0x1a, 0x99, 0x3b, 0xdd, 0xb5, + 0xac, 0x9a, 0x77, 0x51, 0x7a, 0xa6, 0xbe, 0x60, 0x9c, 0x45, 0x5b, 0x3c, 0x7b, 0x9b, 0x5d, 0x97, + 0x67, 0x05, 0xe4, 0xa2, 0x12, 0x51, 0x19, 0xec, 0xe0, 0x70, 0xaa, 0x1f, 0x85, 0x93, 0xf0, 0xa5, + 0xee, 0xc8, 0x62, 0x14, 0xce, 0x97, 0x88, 0x7f, 0x8c, 0x40, 0x66, 0x80, 0x0c, 0xf2, 0xf1, 0x10, + 0xc9, 0xb8, 0x39, 0x8c, 0x35, 0x17, 0x94, 0x90, 0xdf, 0xc4, 0x97, 0x55, 0xf2, 0x10, 0xc7, 0x92, + 0xfc, 0x40, 0x77, 0x5c, 0x72, 0x03, 0x62, 0xd8, 0x8a, 0xfa, 0x25, 0x1b, 0x82, 0x92, 0xcd, 0x9b, + 0x63, 0xbe, 0x2b, 0x36, 0xc7, 0x1c, 0x51, 0xf6, 0x81, 0xf0, 0x67, 0xc7, 0x9a, 0xd0, 0xbf, 0x91, + 0xdb, 0x30, 0x55, 0xe1, 0x28, 0xd5, 0x84, 0x3e, 0x1b, 0x5f, 0xe3, 0xdb, 0x1b, 0xe1, 0x6e, 0x7b, + 0x52, 0xc4, 0x95, 0x9b, 0x30, 0x83, 0xda, 0xef, 0xd2, 0xf6, 0x6b, 0xf5, 0x90, 0x0d, 0x8c, 0x72, + 0x1b, 0xe4, 0x3d, 0xd7, 0xa6, 0xaa, 0xa1, 0x9b, 0xd5, 0x4e, 0x19, 0x2f, 0x42, 0xd4, 0x6c, 0x18, + 0x28, 0x62, 0x8a, 0x07, 0xd2, 0x6c, 0x18, 0x62, 0x20, 0xcd, 0x86, 0xa1, 0xbc, 0x09, 0x04, 0xf9, + 0xb6, 0x69, 0x8d, 0xba, 0xf4, 0xbc, 0xea, 0x3f, 0x91, 0x00, 0xf8, 0x3b, 0xe5, 0x8e, 0x79, 0x60, + 0x0d, 0xdd, 0x76, 0xdd, 0x84, 0x04, 0x46, 0x54, 0x2b, 0x1d, 0x59, 0x78, 0xd1, 0x49, 0xab, 0x63, + 0xbc, 0x5f, 0xe2, 0xf0, 0x7d, 0x2b, 0x74, 0xdb, 0x41, 0x80, 0x32, 0xd6, 0x1a, 0x55, 0x1d, 0x9f, + 0x35, 0x1a, 0xb0, 0x72, 0xb8, 0x93, 0x35, 0x40, 0x95, 0x27, 0x30, 0x87, 0xae, 0xee, 0xd7, 0x35, + 0xd5, 0x0d, 0xc6, 0x86, 0x37, 0xc4, 0x1f, 0x06, 0xc2, 0xd9, 0x70, 0xd6, 0x1c, 0x73, 0x8e, 0xb6, + 0xb8, 0x01, 0x72, 0x4e, 0x75, 0x2b, 0x87, 0xbd, 0xb4, 0xbf, 0x07, 0x53, 0x07, 0xaa, 0x5e, 0xf3, + 0x5f, 0xbe, 0xfc, 0x9c, 0x94, 0x03, 0x2b, 0xc2, 0x0c, 0x3c, 0xad, 0x38, 0xcb, 0xdb, 0x9d, 0x79, + 0x3a, 0x29, 0xe2, 0x6d, 0x7f, 0xb7, 0xf0, 0x8d, 0xe4, 0x9b, 0xf2, 0xb7, 0x43, 0xfb, 0x60, 0x7f, + 0xc3, 0x0c, 0xe7, 0xf0, 0x37, 0x01, 0xf1, 0xbc, 0xa9, 0x3d, 0x54, 0xed, 0x63, 0x6a, 0x2b, 0x1f, + 0x49, 0xb0, 0x10, 0x3e, 0x19, 0x0f, 0xa9, 0xe3, 0xa8, 0x55, 0x4a, 0xbe, 0x75, 0x3e, 0xff, 0xef, + 0x8d, 0x04, 0xcf, 0xd0, 0x51, 0x6a, 0x6a, 0x5e, 0x41, 0x9f, 0x46, 0xb6, 0xb6, 0x3e, 0x7e, 0xbe, + 0xa8, 0xd8, 0x70, 0xdd, 0x1b, 0x29, 0x32, 0xfa, 0xdc, 0x38, 0x8c, 0xd1, 0x13, 0x6a, 0xba, 0x6b, + 0x29, 0x48, 0x08, 0xbf, 0x6e, 0x92, 0x04, 0x8c, 0x7b, 0xcb, 0xe4, 0xc8, 0xda, 0x35, 0x48, 0x08, + 0x3f, 0x83, 0x91, 0x49, 0x98, 0x28, 0x58, 0x1a, 0xdd, 0xb5, 0x6c, 0x37, 0x39, 0xc2, 0x56, 0xf7, + 0xa8, 0xaa, 0xd5, 0x18, 0xa9, 0xb4, 0xf6, 0x4b, 0x09, 0x26, 0xfc, 0x87, 0x7f, 0x02, 0x10, 0x7b, + 0x7b, 0x3f, 0xbf, 0x9f, 0xdf, 0x4e, 0x8e, 0x30, 0x81, 0xbb, 0xf9, 0xc2, 0xf6, 0x4e, 0xe1, 0x6e, + 0x52, 0x62, 0x8b, 0xe2, 0x7e, 0xa1, 0xc0, 0x16, 0x11, 0x32, 0x05, 0xf1, 0xbd, 0xfd, 0xad, 0xad, + 0x7c, 0x7e, 0x3b, 0xbf, 0x9d, 0x8c, 0x32, 0xa6, 0x3b, 0x9b, 0x3b, 0x0f, 0xf2, 0xdb, 0xc9, 0x51, + 0x46, 0xb7, 0x5f, 0x78, 0xab, 0xf0, 0xe8, 0xdd, 0x42, 0x72, 0x8c, 0xd3, 0xe5, 0x1e, 0xee, 0x3c, + 0x7e, 0x9c, 0xdf, 0x4e, 0xc6, 0x18, 0xdd, 0x83, 0xfc, 0xe6, 0x5e, 0x7e, 0x3b, 0x39, 0xce, 0xb6, + 0x76, 0x8b, 0xf9, 0xfc, 0xc3, 0x5d, 0xb6, 0x35, 0xc1, 0x96, 0x5b, 0x9b, 0x85, 0xad, 0xfc, 0x03, + 0x26, 0x25, 0xbe, 0xf1, 0xfb, 0x09, 0x88, 0xf1, 0x11, 0x8e, 0xbc, 0x03, 0xc0, 0xff, 0xc2, 0xe3, + 0xba, 0xd0, 0xf3, 0x97, 0xaf, 0xd4, 0x62, 0xef, 0xb9, 0x4f, 0x59, 0xfe, 0xe9, 0x9f, 0xfe, 0xfe, + 0xab, 0xc8, 0x9c, 0x32, 0xbd, 0x7e, 0x72, 0x7d, 0xfd, 0xc8, 0x2a, 0x7b, 0xff, 0x22, 0x74, 0x4b, + 0x5a, 0x23, 0xef, 0x02, 0xf0, 0xda, 0x1b, 0x96, 0x1b, 0xfa, 0x19, 0x28, 0xb5, 0x84, 0x70, 0x77, + 0x8d, 0xee, 0x16, 0xcc, 0x0b, 0x30, 0x13, 0xfc, 0x03, 0x98, 0x6c, 0x0b, 0xde, 0xa3, 0x2e, 0x91, + 0x85, 0x5f, 0x76, 0xc2, 0xd2, 0x17, 0xbb, 0x9e, 0x27, 0xf3, 0xec, 0x43, 0x2b, 0x97, 0x51, 0xf8, + 0xa2, 0x32, 0xeb, 0x09, 0x77, 0xa8, 0x2b, 0xc8, 0x37, 0x21, 0x29, 0xbe, 0x36, 0xa0, 0xf9, 0x97, + 0x7a, 0xbf, 0x43, 0x70, 0x35, 0x97, 0xcf, 0x7a, 0xa4, 0x50, 0x32, 0xa8, 0x6c, 0x59, 0x99, 0xf7, + 0x3d, 0x11, 0x1e, 0x1c, 0x28, 0xd3, 0x77, 0x17, 0x12, 0xfc, 0x08, 0xf1, 0x51, 0x58, 0xc8, 0xef, + 0xbe, 0x0e, 0xcc, 0xa3, 0xcc, 0x69, 0x25, 0xce, 0x64, 0x62, 0xb2, 0x33, 0x41, 0x15, 0x98, 0x14, + 0x04, 0x39, 0x64, 0x3a, 0x90, 0xc4, 0xee, 0xd1, 0xd4, 0x0b, 0xb8, 0xee, 0x77, 0xd2, 0x95, 0x97, + 0x50, 0x68, 0x5a, 0x59, 0x66, 0x42, 0xcb, 0x8c, 0x8a, 0x6a, 0xeb, 0xfc, 0xbd, 0xd6, 0x3b, 0xfb, + 0x4c, 0x49, 0x01, 0x12, 0xbc, 0xc0, 0x0d, 0x6f, 0xed, 0x25, 0x14, 0xbc, 0x90, 0x4a, 0xb6, 0xad, + 0x5d, 0xff, 0x31, 0xbb, 0x56, 0x3e, 0xf4, 0x8c, 0x16, 0xe4, 0x0d, 0x36, 0x3a, 0x5c, 0x5d, 0x7d, + 0xa3, 0x53, 0x21, 0xa3, 0x1b, 0x48, 0x23, 0x18, 0xfd, 0x5d, 0x48, 0xf0, 0xfb, 0x92, 0x1b, 0xbd, + 0x14, 0xe8, 0x08, 0x5d, 0xa3, 0x7d, 0x3d, 0x90, 0x51, 0x0b, 0x59, 0xeb, 0xf2, 0x80, 0xdc, 0x81, + 0x89, 0xbb, 0xd4, 0xe5, 0x62, 0xe7, 0x03, 0xb1, 0xc1, 0xad, 0x9e, 0x12, 0x22, 0xe4, 0xcb, 0x21, + 0xdd, 0x72, 0x34, 0x88, 0xfb, 0x72, 0x1c, 0xc2, 0x7d, 0xee, 0xd7, 0x27, 0xa4, 0x52, 0x3d, 0xb6, + 0xbd, 0x62, 0xa9, 0xa4, 0x50, 0xc3, 0x3c, 0x21, 0x62, 0x3c, 0x78, 0x20, 0x5e, 0x93, 0xc8, 0x2d, + 0x88, 0xdd, 0xc3, 0xff, 0x67, 0x23, 0x7d, 0x3c, 0x4d, 0xf1, 0xc3, 0xc4, 0x89, 0xb6, 0x0e, 0x69, + 0xe5, 0xb8, 0x5d, 0xd7, 0xdf, 0xff, 0xfc, 0x6f, 0xe9, 0x91, 0x9f, 0x3c, 0x4d, 0x4b, 0x9f, 0x3e, + 0x4d, 0x4b, 0x9f, 0x3d, 0x4d, 0x4b, 0x7f, 0x7d, 0x9a, 0x96, 0x3e, 0xfa, 0x32, 0x3d, 0xf2, 0xd9, + 0x97, 0xe9, 0x91, 0xcf, 0xbf, 0x4c, 0x8f, 0x7c, 0xef, 0xff, 0x84, 0x7f, 0xb1, 0x53, 0x6d, 0x43, + 0xd5, 0xd4, 0xba, 0x6d, 0xb1, 0x61, 0xd7, 0x5b, 0xad, 0x7b, 0xff, 0x53, 0xf7, 0x49, 0x64, 0x7e, + 0x13, 0x81, 0x5d, 0xbe, 0x9d, 0xdd, 0xb1, 0xb2, 0x9b, 0x75, 0xbd, 0x1c, 0x43, 0x5b, 0x5e, 0xff, + 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe2, 0xc5, 0xd2, 0xf1, 0x70, 0x28, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -3557,6 +3685,30 @@ func (m *Queue) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ResourceLimitsByPriorityClassName) > 0 { + for k := range m.ResourceLimitsByPriorityClassName { + v := m.ResourceLimitsByPriorityClassName[k] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSubmit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintSubmit(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintSubmit(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } if len(m.Permissions) > 0 { for iNdEx := len(m.Permissions) - 1; iNdEx >= 0; iNdEx-- { { @@ -3706,6 +3858,112 @@ func (m *Queue_Permissions_Subject) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } +func (m *PriorityClassResourceLimits) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClassResourceLimits) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassResourceLimits) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MaximumResourceFractionByPool) > 0 { + for k := range m.MaximumResourceFractionByPool { + v := m.MaximumResourceFractionByPool[k] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintSubmit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintSubmit(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintSubmit(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.MaximumResourceFraction) > 0 { + for k := range m.MaximumResourceFraction { + v := m.MaximumResourceFraction[k] + baseI := i + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(v)))) + i-- + dAtA[i] = 0x11 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintSubmit(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintSubmit(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PriorityClassPoolResourceLimits) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClassPoolResourceLimits) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassPoolResourceLimits) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MaximumResourceFraction) > 0 { + for k := range m.MaximumResourceFraction { + v := m.MaximumResourceFraction[k] + baseI := i + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(v)))) + i-- + dAtA[i] = 0x11 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintSubmit(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintSubmit(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *QueueList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4622,6 +4880,15 @@ func (m *Queue) Size() (n int) { n += 1 + l + sovSubmit(uint64(l)) } } + if len(m.ResourceLimitsByPriorityClassName) > 0 { + for k, v := range m.ResourceLimitsByPriorityClassName { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovSubmit(uint64(len(k))) + 1 + l + sovSubmit(uint64(l)) + n += mapEntrySize + 1 + sovSubmit(uint64(mapEntrySize)) + } + } return n } @@ -4663,6 +4930,49 @@ func (m *Queue_Permissions_Subject) Size() (n int) { return n } +func (m *PriorityClassResourceLimits) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MaximumResourceFraction) > 0 { + for k, v := range m.MaximumResourceFraction { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSubmit(uint64(len(k))) + 1 + 8 + n += mapEntrySize + 1 + sovSubmit(uint64(mapEntrySize)) + } + } + if len(m.MaximumResourceFractionByPool) > 0 { + for k, v := range m.MaximumResourceFractionByPool { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovSubmit(uint64(len(k))) + 1 + l + sovSubmit(uint64(l)) + n += mapEntrySize + 1 + sovSubmit(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PriorityClassPoolResourceLimits) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MaximumResourceFraction) > 0 { + for k, v := range m.MaximumResourceFraction { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSubmit(uint64(len(k))) + 1 + 8 + n += mapEntrySize + 1 + sovSubmit(uint64(mapEntrySize)) + } + } + return n +} + func (m *QueueList) Size() (n int) { if m == nil { return 0 @@ -5186,6 +5496,16 @@ func (this *Queue) String() string { mapStringForResourceLimits += fmt.Sprintf("%v: %v,", k, this.ResourceLimits[k]) } mapStringForResourceLimits += "}" + keysForResourceLimitsByPriorityClassName := make([]string, 0, len(this.ResourceLimitsByPriorityClassName)) + for k, _ := range this.ResourceLimitsByPriorityClassName { + keysForResourceLimitsByPriorityClassName = append(keysForResourceLimitsByPriorityClassName, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForResourceLimitsByPriorityClassName) + mapStringForResourceLimitsByPriorityClassName := "map[string]PriorityClassResourceLimits{" + for _, k := range keysForResourceLimitsByPriorityClassName { + mapStringForResourceLimitsByPriorityClassName += fmt.Sprintf("%v: %v,", k, this.ResourceLimitsByPriorityClassName[k]) + } + mapStringForResourceLimitsByPriorityClassName += "}" s := strings.Join([]string{`&Queue{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `PriorityFactor:` + fmt.Sprintf("%v", this.PriorityFactor) + `,`, @@ -5193,6 +5513,7 @@ func (this *Queue) String() string { `GroupOwners:` + fmt.Sprintf("%v", this.GroupOwners) + `,`, `ResourceLimits:` + mapStringForResourceLimits + `,`, `Permissions:` + repeatedStringForPermissions + `,`, + `ResourceLimitsByPriorityClassName:` + mapStringForResourceLimitsByPriorityClassName + `,`, `}`, }, "") return s @@ -5224,6 +5545,57 @@ func (this *Queue_Permissions_Subject) String() string { }, "") return s } +func (this *PriorityClassResourceLimits) String() string { + if this == nil { + return "nil" + } + keysForMaximumResourceFraction := make([]string, 0, len(this.MaximumResourceFraction)) + for k, _ := range this.MaximumResourceFraction { + keysForMaximumResourceFraction = append(keysForMaximumResourceFraction, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaximumResourceFraction) + mapStringForMaximumResourceFraction := "map[string]float64{" + for _, k := range keysForMaximumResourceFraction { + mapStringForMaximumResourceFraction += fmt.Sprintf("%v: %v,", k, this.MaximumResourceFraction[k]) + } + mapStringForMaximumResourceFraction += "}" + keysForMaximumResourceFractionByPool := make([]string, 0, len(this.MaximumResourceFractionByPool)) + for k, _ := range this.MaximumResourceFractionByPool { + keysForMaximumResourceFractionByPool = append(keysForMaximumResourceFractionByPool, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaximumResourceFractionByPool) + mapStringForMaximumResourceFractionByPool := "map[string]PriorityClassPoolResourceLimits{" + for _, k := range keysForMaximumResourceFractionByPool { + mapStringForMaximumResourceFractionByPool += fmt.Sprintf("%v: %v,", k, this.MaximumResourceFractionByPool[k]) + } + mapStringForMaximumResourceFractionByPool += "}" + s := strings.Join([]string{`&PriorityClassResourceLimits{`, + `MaximumResourceFraction:` + mapStringForMaximumResourceFraction + `,`, + `MaximumResourceFractionByPool:` + mapStringForMaximumResourceFractionByPool + `,`, + `}`, + }, "") + return s +} +func (this *PriorityClassPoolResourceLimits) String() string { + if this == nil { + return "nil" + } + keysForMaximumResourceFraction := make([]string, 0, len(this.MaximumResourceFraction)) + for k, _ := range this.MaximumResourceFraction { + keysForMaximumResourceFraction = append(keysForMaximumResourceFraction, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaximumResourceFraction) + mapStringForMaximumResourceFraction := "map[string]float64{" + for _, k := range keysForMaximumResourceFraction { + mapStringForMaximumResourceFraction += fmt.Sprintf("%v: %v,", k, this.MaximumResourceFraction[k]) + } + mapStringForMaximumResourceFraction += "}" + s := strings.Join([]string{`&PriorityClassPoolResourceLimits{`, + `MaximumResourceFraction:` + mapStringForMaximumResourceFraction + `,`, + `}`, + }, "") + return s +} func (this *QueueList) String() string { if this == nil { return "nil" @@ -9064,9 +9436,138 @@ func (m *Queue) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSubmit(dAtA[iNdEx:]) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceLimitsByPriorityClassName", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSubmit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSubmit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceLimitsByPriorityClassName == nil { + m.ResourceLimitsByPriorityClassName = make(map[string]PriorityClassResourceLimits) + } + var mapkey string + mapvalue := &PriorityClassResourceLimits{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSubmit + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthSubmit + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthSubmit + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthSubmit + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &PriorityClassResourceLimits{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipSubmit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSubmit + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ResourceLimitsByPriorityClassName[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSubmit(dAtA[iNdEx:]) if err != nil { return err } @@ -9315,6 +9816,447 @@ func (m *Queue_Permissions_Subject) Unmarshal(dAtA []byte) error { } return nil } +func (m *PriorityClassResourceLimits) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClassResourceLimits: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClassResourceLimits: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaximumResourceFraction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSubmit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSubmit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaximumResourceFraction == nil { + m.MaximumResourceFraction = make(map[string]float64) + } + var mapkey string + var mapvalue float64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSubmit + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthSubmit + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + mapvaluetemp = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + mapvalue = math.Float64frombits(mapvaluetemp) + } else { + iNdEx = entryPreIndex + skippy, err := skipSubmit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSubmit + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MaximumResourceFraction[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaximumResourceFractionByPool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSubmit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSubmit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaximumResourceFractionByPool == nil { + m.MaximumResourceFractionByPool = make(map[string]PriorityClassPoolResourceLimits) + } + var mapkey string + mapvalue := &PriorityClassPoolResourceLimits{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSubmit + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthSubmit + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthSubmit + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthSubmit + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &PriorityClassPoolResourceLimits{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipSubmit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSubmit + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MaximumResourceFractionByPool[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSubmit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSubmit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityClassPoolResourceLimits) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClassPoolResourceLimits: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClassPoolResourceLimits: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaximumResourceFraction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSubmit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSubmit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaximumResourceFraction == nil { + m.MaximumResourceFraction = make(map[string]float64) + } + var mapkey string + var mapvalue float64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSubmit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSubmit + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthSubmit + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + mapvaluetemp = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + mapvalue = math.Float64frombits(mapvaluetemp) + } else { + iNdEx = entryPreIndex + skippy, err := skipSubmit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSubmit + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MaximumResourceFraction[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSubmit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthSubmit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *QueueList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/api/submit.proto b/pkg/api/submit.proto index 160cafda737..c68038745bb 100644 --- a/pkg/api/submit.proto +++ b/pkg/api/submit.proto @@ -183,10 +183,26 @@ message Queue { double priority_factor = 2; repeated string user_owners = 3; repeated string group_owners = 4; - map resource_limits = 5; + map resource_limits = 5 [deprecated = true]; + // Map from priority class name to resource limit overrides for this queue and priority class. + // If provided for a priority class, global limits for that priority class do not apply to this queue. + map resource_limits_by_priority_class_name = 7 [(gogoproto.nullable) = false]; repeated Permissions permissions = 6; } +message PriorityClassResourceLimits { + // Limits resources assigned to jobs of this priority class. + // Specifically, jobs of this priority class are only scheduled if doing so does not exceed this limit. + map maximum_resource_fraction = 1 [(gogoproto.nullable) = false]; + // Per-pool override of maximum_resource_fraction. + // If missing for a particular pool, maximum_resource_fraction is used instead for that pool. + map maximum_resource_fraction_by_pool = 2 [(gogoproto.nullable) = false]; +} + +message PriorityClassPoolResourceLimits { + map maximum_resource_fraction = 1 [(gogoproto.nullable) = false]; +} + // swagger:model message QueueList { repeated Queue queues = 1; diff --git a/pkg/armadaevents/events.pb.go b/pkg/armadaevents/events.pb.go index a38940932b6..6185d70f957 100644 --- a/pkg/armadaevents/events.pb.go +++ b/pkg/armadaevents/events.pb.go @@ -3526,233 +3526,233 @@ func init() { func init() { proto.RegisterFile("pkg/armadaevents/events.proto", fileDescriptor_6aab92ca59e015f8) } var fileDescriptor_6aab92ca59e015f8 = []byte{ - // 3615 bytes of a gzipped FileDescriptorProto + // 3604 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5b, 0x4b, 0x6c, 0x1b, 0xd7, - 0x7a, 0xf6, 0x90, 0x12, 0x1f, 0x3f, 0xf5, 0xa0, 0x8f, 0x25, 0x85, 0x56, 0x6c, 0x51, 0x1e, 0xa7, - 0x8d, 0x13, 0x24, 0x54, 0xe2, 0x3c, 0x90, 0x47, 0x91, 0x40, 0xb4, 0x15, 0xdb, 0x8a, 0x65, 0x2b, - 0x94, 0x95, 0xba, 0x41, 0x0a, 0x66, 0xc8, 0x39, 0xa2, 0xc6, 0x1a, 0xce, 0x4c, 0xe6, 0x21, 0x4b, - 0x40, 0x16, 0x4d, 0xd1, 0xa6, 0xbb, 0xd4, 0x40, 0xb3, 0x28, 0xd0, 0x45, 0xba, 0x6d, 0x80, 0xae, - 0xbb, 0xee, 0xaa, 0x59, 0x14, 0x45, 0xba, 0xeb, 0x8a, 0xbd, 0x48, 0x70, 0x17, 0x97, 0x8b, 0xbb, - 0xbe, 0xf7, 0x6e, 0xee, 0xc5, 0x79, 0xcd, 0x9c, 0x33, 0x1c, 0xda, 0xf2, 0xeb, 0x3a, 0x17, 0x5e, - 0x49, 0xf3, 0xfd, 0xcf, 0xf3, 0xfa, 0xe7, 0xff, 0xff, 0x39, 0x84, 0xd3, 0xde, 0x5e, 0x6f, 0xc5, - 0xf0, 0xfb, 0x86, 0x69, 0xe0, 0x7d, 0xec, 0x84, 0xc1, 0x0a, 0xfb, 0xd3, 0xf0, 0x7c, 0x37, 0x74, - 0xd1, 0x94, 0x4c, 0x5a, 0xd4, 0xf7, 0xde, 0x0a, 0x1a, 0x96, 0xbb, 0x62, 0x78, 0xd6, 0x4a, 0xd7, - 0xf5, 0xf1, 0xca, 0xfe, 0xab, 0x2b, 0x3d, 0xec, 0x60, 0xdf, 0x08, 0xb1, 0xc9, 0x24, 0x16, 0xcf, - 0x49, 0x3c, 0x0e, 0x0e, 0x6f, 0xbb, 0xfe, 0x9e, 0xe5, 0xf4, 0xb2, 0x38, 0xeb, 0x3d, 0xd7, 0xed, - 0xd9, 0x78, 0x85, 0x3e, 0x75, 0xa2, 0x9d, 0x95, 0xd0, 0xea, 0xe3, 0x20, 0x34, 0xfa, 0x1e, 0x67, - 0x58, 0x4a, 0x33, 0xdc, 0xf6, 0x0d, 0xcf, 0xc3, 0x3e, 0x77, 0x6e, 0xf1, 0xf5, 0xc4, 0x54, 0xdf, - 0xe8, 0xee, 0x5a, 0x0e, 0xf6, 0x0f, 0x57, 0xe8, 0x78, 0x3c, 0x6b, 0xc5, 0xc7, 0x81, 0x1b, 0xf9, - 0x5d, 0x3c, 0x62, 0xf6, 0xe5, 0x9e, 0x15, 0xee, 0x46, 0x9d, 0x46, 0xd7, 0xed, 0xaf, 0xf4, 0xdc, - 0x9e, 0x9b, 0xa8, 0x27, 0x4f, 0xf4, 0x81, 0xfe, 0xc7, 0xd9, 0xdf, 0xb1, 0x9c, 0x10, 0xfb, 0x8e, - 0x61, 0xaf, 0x04, 0xdd, 0x5d, 0x6c, 0x46, 0x36, 0xf6, 0x93, 0xff, 0xdc, 0xce, 0x2d, 0xdc, 0x0d, - 0x83, 0x11, 0x80, 0xc9, 0xea, 0x77, 0xe6, 0x60, 0x7a, 0x8d, 0x4c, 0xdd, 0x16, 0xfe, 0x3c, 0xc2, - 0x4e, 0x17, 0xa3, 0x17, 0x60, 0xf2, 0xf3, 0x08, 0x47, 0xb8, 0xa6, 0x2d, 0x6b, 0xe7, 0xca, 0xcd, - 0x13, 0xc3, 0x41, 0x7d, 0x96, 0x02, 0x2f, 0xb9, 0x7d, 0x2b, 0xc4, 0x7d, 0x2f, 0x3c, 0x6c, 0x31, - 0x0e, 0xf4, 0x0e, 0x4c, 0xdd, 0x72, 0x3b, 0xed, 0x00, 0x87, 0x6d, 0xc7, 0xe8, 0xe3, 0x5a, 0x8e, - 0x4a, 0xd4, 0x86, 0x83, 0xfa, 0xdc, 0x2d, 0xb7, 0xb3, 0x85, 0xc3, 0x6b, 0x46, 0x5f, 0x16, 0x83, - 0x04, 0x45, 0x2f, 0x43, 0x31, 0x0a, 0xb0, 0xdf, 0xb6, 0xcc, 0x5a, 0x9e, 0x8a, 0xcd, 0x0d, 0x07, - 0xf5, 0x2a, 0x81, 0xae, 0x98, 0x92, 0x48, 0x81, 0x21, 0xe8, 0x25, 0x28, 0xf4, 0x7c, 0x37, 0xf2, - 0x82, 0xda, 0xc4, 0x72, 0x5e, 0x70, 0x33, 0x44, 0xe6, 0x66, 0x08, 0xba, 0x0e, 0x05, 0xb6, 0x1f, - 0x6a, 0x93, 0xcb, 0xf9, 0x73, 0x95, 0xf3, 0x67, 0x1a, 0xf2, 0x26, 0x69, 0x28, 0x03, 0x66, 0x4f, - 0x4c, 0x21, 0xa3, 0xcb, 0x0a, 0xf9, 0xb6, 0xfa, 0xd5, 0x71, 0x98, 0xa4, 0x7c, 0xe8, 0x3a, 0x14, - 0xbb, 0x3e, 0x26, 0x8b, 0x55, 0x43, 0xcb, 0xda, 0xb9, 0xca, 0xf9, 0xc5, 0x06, 0xdb, 0x03, 0x0d, - 0xb1, 0x48, 0x8d, 0x1b, 0x62, 0x93, 0x34, 0x4f, 0x0e, 0x07, 0xf5, 0xe3, 0x9c, 0x3d, 0xd1, 0x7a, - 0xe7, 0xff, 0xeb, 0x5a, 0x4b, 0x68, 0x41, 0x9b, 0x50, 0x0e, 0xa2, 0x4e, 0xdf, 0x0a, 0xd7, 0xdd, - 0x0e, 0x9d, 0xf3, 0xca, 0xf9, 0x67, 0x54, 0x77, 0xb7, 0x04, 0xb9, 0xf9, 0xcc, 0x70, 0x50, 0x3f, - 0x11, 0x73, 0x27, 0x1a, 0x2f, 0x1f, 0x6b, 0x25, 0x4a, 0xd0, 0x2e, 0xcc, 0xfa, 0xd8, 0xf3, 0x2d, - 0xd7, 0xb7, 0x42, 0x2b, 0xc0, 0x44, 0x6f, 0x8e, 0xea, 0x3d, 0xad, 0xea, 0x6d, 0xa9, 0x4c, 0xcd, - 0xd3, 0xc3, 0x41, 0xfd, 0x64, 0x4a, 0x52, 0xb1, 0x91, 0x56, 0x8b, 0x42, 0x40, 0x29, 0x68, 0x0b, - 0x87, 0x74, 0x3d, 0x2b, 0xe7, 0x97, 0xef, 0x6a, 0x6c, 0x0b, 0x87, 0xcd, 0xe5, 0xe1, 0xa0, 0x7e, - 0x6a, 0x54, 0x5e, 0x31, 0x99, 0xa1, 0x1f, 0xd9, 0x50, 0x95, 0x51, 0x93, 0x0c, 0x70, 0x82, 0xda, - 0x5c, 0x1a, 0x6f, 0x93, 0x70, 0x35, 0x97, 0x86, 0x83, 0xfa, 0x62, 0x5a, 0x56, 0xb1, 0x37, 0xa2, - 0x99, 0xac, 0x4f, 0xd7, 0x70, 0xba, 0xd8, 0x26, 0x66, 0x26, 0xb3, 0xd6, 0xe7, 0x82, 0x20, 0xb3, - 0xf5, 0x89, 0xb9, 0xd5, 0xf5, 0x89, 0x61, 0xf4, 0x29, 0x4c, 0xc5, 0x0f, 0x64, 0xbe, 0x0a, 0x7c, - 0x1f, 0x65, 0x2b, 0x25, 0x33, 0xb5, 0x38, 0x1c, 0xd4, 0x17, 0x64, 0x19, 0x45, 0xb5, 0xa2, 0x2d, - 0xd1, 0x6e, 0xb3, 0x99, 0x29, 0x8e, 0xd7, 0xce, 0x38, 0x64, 0xed, 0xf6, 0xe8, 0x8c, 0x28, 0xda, - 0x88, 0x76, 0x72, 0x88, 0xa3, 0x6e, 0x17, 0x63, 0x13, 0x9b, 0xb5, 0x52, 0x96, 0xf6, 0x75, 0x89, - 0x83, 0x69, 0x97, 0x65, 0x54, 0xed, 0x32, 0x85, 0xcc, 0xf5, 0x2d, 0xb7, 0xb3, 0xe6, 0xfb, 0xae, - 0x1f, 0xd4, 0xca, 0x59, 0x73, 0xbd, 0x2e, 0xc8, 0x6c, 0xae, 0x63, 0x6e, 0x75, 0xae, 0x63, 0x98, - 0xfb, 0xdb, 0x8a, 0x9c, 0xab, 0xd8, 0x08, 0xb0, 0x59, 0x83, 0x31, 0xfe, 0xc6, 0x1c, 0xb1, 0xbf, - 0x31, 0x32, 0xe2, 0x6f, 0x4c, 0x41, 0x26, 0xcc, 0xb0, 0xe7, 0xd5, 0x20, 0xb0, 0x7a, 0x0e, 0x36, - 0x6b, 0x15, 0xaa, 0xff, 0x54, 0x96, 0x7e, 0xc1, 0xd3, 0x3c, 0x35, 0x1c, 0xd4, 0x6b, 0xaa, 0x9c, - 0x62, 0x23, 0xa5, 0x13, 0x7d, 0x06, 0xd3, 0x0c, 0x69, 0x45, 0x8e, 0x63, 0x39, 0xbd, 0xda, 0x14, - 0x35, 0xf2, 0x6c, 0x96, 0x11, 0xce, 0xd2, 0x7c, 0x76, 0x38, 0xa8, 0x3f, 0xa3, 0x48, 0x29, 0x26, - 0x54, 0x85, 0x24, 0x62, 0x30, 0x20, 0x59, 0xd8, 0xe9, 0xac, 0x88, 0xb1, 0xae, 0x32, 0xb1, 0x88, - 0x91, 0x92, 0x54, 0x23, 0x46, 0x8a, 0x98, 0xac, 0x07, 0x5f, 0xe4, 0x99, 0xf1, 0xeb, 0xc1, 0xd7, - 0x59, 0x5a, 0x8f, 0x8c, 0xa5, 0x56, 0xb4, 0xa1, 0x2f, 0x80, 0xbc, 0x78, 0x2e, 0x46, 0x9e, 0x6d, - 0x75, 0x8d, 0x10, 0x5f, 0xc4, 0x21, 0xee, 0x92, 0x48, 0x3d, 0x4b, 0xad, 0xe8, 0x23, 0x56, 0x46, - 0x38, 0x9b, 0xfa, 0x70, 0x50, 0x5f, 0xca, 0xd2, 0xa1, 0x58, 0xcd, 0xb4, 0x82, 0xfe, 0x46, 0x83, - 0xf9, 0x20, 0x34, 0x1c, 0xd3, 0xb0, 0x5d, 0x07, 0x5f, 0x71, 0x7a, 0x3e, 0x0e, 0x82, 0x2b, 0xce, - 0x8e, 0x5b, 0xab, 0x52, 0xfb, 0x67, 0x53, 0x61, 0x3d, 0x8b, 0xb5, 0x79, 0x76, 0x38, 0xa8, 0xd7, - 0x33, 0xb5, 0x28, 0x1e, 0x64, 0x1b, 0x42, 0x07, 0x70, 0x42, 0x64, 0x15, 0xdb, 0xa1, 0x65, 0x5b, - 0x81, 0x11, 0x5a, 0xae, 0x53, 0x3b, 0x4e, 0xed, 0x9f, 0x49, 0x47, 0xc7, 0x11, 0xc6, 0xe6, 0x99, - 0xe1, 0xa0, 0x7e, 0x3a, 0x43, 0x83, 0x62, 0x3b, 0xcb, 0x44, 0xb2, 0x85, 0x36, 0x7d, 0x4c, 0x18, - 0xb1, 0x59, 0x3b, 0x31, 0x7e, 0x0b, 0xc5, 0x4c, 0xf2, 0x16, 0x8a, 0xc1, 0xac, 0x2d, 0x14, 0x13, - 0x89, 0x25, 0xcf, 0xf0, 0x43, 0x8b, 0x98, 0xdd, 0x30, 0xfc, 0x3d, 0xec, 0xd7, 0xe6, 0xb2, 0x2c, - 0x6d, 0xaa, 0x4c, 0xcc, 0x52, 0x4a, 0x52, 0xb5, 0x94, 0x22, 0xa2, 0x3b, 0x1a, 0xa8, 0xae, 0x59, - 0xae, 0xd3, 0x22, 0x69, 0x43, 0x40, 0x86, 0x37, 0x4f, 0x8d, 0x3e, 0x7f, 0x97, 0xe1, 0xc9, 0xec, - 0xcd, 0xe7, 0x87, 0x83, 0xfa, 0xd9, 0xb1, 0xda, 0x14, 0x47, 0xc6, 0x1b, 0x45, 0x37, 0xa1, 0x42, - 0x88, 0x98, 0x26, 0x60, 0x66, 0x6d, 0x81, 0xfa, 0x70, 0x72, 0xd4, 0x07, 0xce, 0x40, 0x33, 0x90, - 0x79, 0x49, 0x42, 0xb1, 0x23, 0xab, 0x6a, 0x16, 0x61, 0x92, 0xca, 0xeb, 0xc3, 0x02, 0x9c, 0xc8, - 0xd8, 0x1b, 0xe8, 0x3d, 0x28, 0xf8, 0x91, 0x43, 0x12, 0x36, 0x96, 0xa5, 0x20, 0xd5, 0xea, 0x76, - 0x64, 0x99, 0x2c, 0x5b, 0xf4, 0x23, 0x47, 0xc9, 0xe1, 0x26, 0x29, 0x40, 0xe4, 0x49, 0xb6, 0x68, - 0x99, 0x3c, 0x1b, 0x19, 0x2b, 0x7f, 0xcb, 0xed, 0xa8, 0xf2, 0x14, 0x40, 0x18, 0xa6, 0xc5, 0xc6, - 0x6b, 0x5b, 0xe4, 0x54, 0xb1, 0x3c, 0xe3, 0x39, 0x55, 0xcd, 0x87, 0x51, 0x07, 0xfb, 0x0e, 0x0e, - 0x71, 0x20, 0xc6, 0x40, 0x8f, 0x15, 0x8d, 0x22, 0xbe, 0x84, 0x48, 0xfa, 0xa7, 0x64, 0x1c, 0x7d, - 0xa3, 0x41, 0xad, 0x6f, 0x1c, 0xb4, 0x05, 0x18, 0xb4, 0x77, 0x5c, 0xbf, 0xed, 0x61, 0xdf, 0x72, - 0x4d, 0x9a, 0x7c, 0x56, 0xce, 0xff, 0xc5, 0x3d, 0x0f, 0x52, 0x63, 0xc3, 0x38, 0x10, 0x70, 0xf0, - 0x81, 0xeb, 0x6f, 0x52, 0xf1, 0x35, 0x27, 0xf4, 0x0f, 0x9b, 0xa7, 0xbf, 0x1f, 0xd4, 0x8f, 0x91, - 0x65, 0xe9, 0x67, 0xf1, 0xb4, 0xb2, 0x61, 0xf4, 0x8f, 0x1a, 0x2c, 0x84, 0x6e, 0x68, 0xd8, 0xed, - 0x6e, 0xd4, 0x8f, 0x6c, 0x23, 0xb4, 0xf6, 0x71, 0x3b, 0x0a, 0x8c, 0x1e, 0xe6, 0x39, 0xee, 0xbb, - 0xf7, 0x76, 0xea, 0x06, 0x91, 0xbf, 0x10, 0x8b, 0x6f, 0x13, 0x69, 0xe6, 0xd3, 0x29, 0xee, 0xd3, - 0x5c, 0x98, 0xc1, 0xd2, 0xca, 0x44, 0x17, 0xff, 0x55, 0x83, 0xc5, 0xf1, 0xc3, 0x44, 0x67, 0x21, - 0xbf, 0x87, 0x0f, 0x79, 0x15, 0x71, 0x7c, 0x38, 0xa8, 0x4f, 0xef, 0xe1, 0x43, 0x69, 0xd6, 0x09, - 0x15, 0xfd, 0x15, 0x4c, 0xee, 0x1b, 0x76, 0x84, 0xf9, 0x96, 0x68, 0x34, 0x58, 0xbd, 0xd4, 0x90, - 0xeb, 0xa5, 0x86, 0xb7, 0xd7, 0x23, 0x40, 0x43, 0xac, 0x48, 0xe3, 0xa3, 0xc8, 0x70, 0x42, 0x2b, - 0x3c, 0x64, 0xdb, 0x85, 0x2a, 0x90, 0xb7, 0x0b, 0x05, 0xde, 0xc9, 0xbd, 0xa5, 0x2d, 0x7e, 0xab, - 0xc1, 0xc9, 0xb1, 0x83, 0xfe, 0x39, 0x78, 0xa8, 0xb7, 0x61, 0x82, 0x6c, 0x7c, 0x52, 0xdf, 0xec, - 0x5a, 0xbd, 0xdd, 0x37, 0x5f, 0xa7, 0xee, 0x14, 0x58, 0x39, 0xc2, 0x10, 0xb9, 0x1c, 0x61, 0x08, - 0xa9, 0xd1, 0x6c, 0xf7, 0xf6, 0x9b, 0xaf, 0x53, 0xa7, 0x0a, 0xcc, 0x08, 0x05, 0x64, 0x23, 0x14, - 0xd0, 0x7f, 0x5f, 0x80, 0x72, 0x5c, 0x40, 0x48, 0x67, 0x50, 0x7b, 0xa0, 0x33, 0x78, 0x19, 0xaa, - 0x26, 0x36, 0xf9, 0x9b, 0xcf, 0x72, 0x1d, 0x71, 0x9a, 0xcb, 0x2c, 0xba, 0x2a, 0x34, 0x45, 0x7e, - 0x36, 0x45, 0x42, 0xe7, 0xa1, 0xc4, 0x13, 0xed, 0x43, 0x7a, 0x90, 0xa7, 0x9b, 0x0b, 0xc3, 0x41, - 0x1d, 0x09, 0x4c, 0x12, 0x8d, 0xf9, 0x50, 0x0b, 0x80, 0x55, 0xaf, 0x1b, 0x38, 0x34, 0x78, 0xca, - 0x5f, 0x53, 0x47, 0x70, 0x3d, 0xa6, 0xb3, 0x3a, 0x34, 0xe1, 0x97, 0xeb, 0xd0, 0x04, 0x45, 0x9f, - 0x02, 0xf4, 0x0d, 0xcb, 0x61, 0x72, 0x3c, 0xbf, 0xd7, 0xc7, 0x85, 0x94, 0x8d, 0x98, 0x93, 0x69, - 0x4f, 0x24, 0x65, 0xed, 0x09, 0x4a, 0xaa, 0x45, 0x5e, 0x6f, 0xd7, 0x0a, 0xf4, 0x94, 0x2e, 0x8d, - 0x53, 0xcd, 0xd5, 0xce, 0x93, 0x8a, 0x91, 0x8b, 0x48, 0x3a, 0x85, 0x16, 0x32, 0x6d, 0xb6, 0xb5, - 0x83, 0x43, 0xab, 0x8f, 0x69, 0x66, 0xcf, 0xa7, 0x4d, 0x60, 0xf2, 0xb4, 0x09, 0x0c, 0xbd, 0x05, - 0x60, 0x84, 0x1b, 0x6e, 0x10, 0x5e, 0x77, 0xba, 0x98, 0x66, 0xec, 0x25, 0xe6, 0x7e, 0x82, 0xca, - 0xee, 0x27, 0x28, 0x7a, 0x17, 0x2a, 0x1e, 0x7f, 0x09, 0x75, 0x6c, 0x4c, 0x33, 0xf2, 0x12, 0x7b, - 0xa5, 0x48, 0xb0, 0x24, 0x2b, 0x73, 0xa3, 0x4b, 0x30, 0xdb, 0x75, 0x9d, 0x6e, 0xe4, 0xfb, 0xd8, - 0xe9, 0x1e, 0x6e, 0x19, 0x3b, 0x98, 0x66, 0xdf, 0x25, 0xb6, 0x55, 0x52, 0x24, 0x79, 0xab, 0xa4, - 0x48, 0xe8, 0x0d, 0x28, 0xc7, 0xdd, 0x0b, 0x9a, 0x60, 0x97, 0x79, 0x21, 0x2c, 0x40, 0x49, 0x38, - 0xe1, 0x24, 0xce, 0x5b, 0x41, 0x9c, 0xa5, 0xd1, 0xa4, 0x99, 0x3b, 0x2f, 0xc1, 0xb2, 0xf3, 0x12, - 0x8c, 0xae, 0xc0, 0x71, 0xfa, 0x5e, 0x6c, 0x87, 0xa1, 0xdd, 0x0e, 0x70, 0xd7, 0x75, 0xcc, 0x80, - 0xe6, 0xc4, 0x79, 0xe6, 0x3e, 0x25, 0xde, 0x08, 0xed, 0x2d, 0x46, 0x92, 0xdd, 0x4f, 0x91, 0xf4, - 0xff, 0xd6, 0x60, 0x2e, 0x6b, 0x0b, 0xa5, 0xb6, 0xb3, 0xf6, 0x48, 0xb6, 0xf3, 0xc7, 0x50, 0xf2, - 0x5c, 0xb3, 0x1d, 0x78, 0xb8, 0xcb, 0x23, 0x56, 0x6a, 0x33, 0x6f, 0xba, 0xe6, 0x96, 0x87, 0xbb, - 0x7f, 0x69, 0x85, 0xbb, 0xab, 0xfb, 0xae, 0x65, 0x5e, 0xb5, 0x02, 0xbe, 0xeb, 0x3c, 0x46, 0x51, - 0x32, 0x84, 0x22, 0x07, 0x9b, 0x25, 0x28, 0x30, 0x2b, 0xfa, 0xff, 0xe4, 0xa1, 0x9a, 0xde, 0xb6, - 0x7f, 0x4a, 0x43, 0x41, 0x37, 0xa1, 0x68, 0xb1, 0x94, 0x99, 0x67, 0x10, 0x7f, 0x26, 0xc5, 0xf4, - 0x46, 0xd2, 0x10, 0x6c, 0xec, 0xbf, 0xda, 0xe0, 0xb9, 0x35, 0x9d, 0x02, 0xaa, 0x99, 0x4b, 0xaa, - 0x9a, 0x39, 0x88, 0x5a, 0x50, 0x0c, 0xb0, 0xbf, 0x6f, 0x75, 0x31, 0x0f, 0x4e, 0x75, 0x59, 0x73, - 0xd7, 0xf5, 0x31, 0xd1, 0xb9, 0xc5, 0x58, 0x12, 0x9d, 0x5c, 0x46, 0xd5, 0xc9, 0x41, 0xf4, 0x31, - 0x94, 0xbb, 0xae, 0xb3, 0x63, 0xf5, 0x36, 0x0c, 0x8f, 0x87, 0xa7, 0xd3, 0x59, 0x5a, 0x2f, 0x08, - 0x26, 0xde, 0x84, 0x10, 0x8f, 0xa9, 0x26, 0x44, 0xcc, 0x95, 0x2c, 0xe8, 0xaf, 0x27, 0x00, 0x92, - 0xc5, 0x41, 0x6f, 0x43, 0x05, 0x1f, 0xe0, 0x6e, 0x14, 0xba, 0xbe, 0x78, 0x4f, 0xf0, 0x9e, 0x9e, - 0x80, 0x95, 0xc0, 0x0e, 0x09, 0x4a, 0x0e, 0xaa, 0x63, 0xf4, 0x71, 0xe0, 0x19, 0x5d, 0xd1, 0x0c, - 0xa4, 0xce, 0xc4, 0xa0, 0x7c, 0x50, 0x63, 0x10, 0xfd, 0x39, 0x4c, 0xd0, 0xf6, 0x21, 0xeb, 0x03, - 0xa2, 0xe1, 0xa0, 0x3e, 0xe3, 0xa8, 0x8d, 0x43, 0x4a, 0x47, 0xef, 0xc3, 0xf4, 0x5e, 0xbc, 0xf1, - 0x88, 0x6f, 0x13, 0x54, 0x80, 0xa6, 0x76, 0x09, 0x41, 0xf1, 0x6e, 0x4a, 0xc6, 0xd1, 0x0e, 0x54, - 0x0c, 0xc7, 0x71, 0x43, 0xfa, 0x0e, 0x12, 0xbd, 0xc1, 0x17, 0xc6, 0x6d, 0xd3, 0xc6, 0x6a, 0xc2, - 0xcb, 0xb2, 0x24, 0x1a, 0x3c, 0x24, 0x0d, 0x72, 0xf0, 0x90, 0x60, 0xd4, 0x82, 0x82, 0x6d, 0x74, - 0xb0, 0x2d, 0x82, 0xfe, 0x73, 0x63, 0x4d, 0x5c, 0xa5, 0x6c, 0x4c, 0x3b, 0x7d, 0xe5, 0x33, 0x39, - 0xf9, 0x95, 0xcf, 0x90, 0xc5, 0x1d, 0xa8, 0xa6, 0xfd, 0x39, 0x5a, 0x02, 0xf3, 0x82, 0x9c, 0xc0, - 0x94, 0xef, 0x99, 0x32, 0x19, 0x50, 0x91, 0x9c, 0x7a, 0x1c, 0x26, 0xf4, 0x7f, 0xd3, 0x60, 0x2e, - 0xeb, 0xec, 0xa2, 0x0d, 0xe9, 0xc4, 0x6b, 0xbc, 0xc7, 0x91, 0xb1, 0xd5, 0xb9, 0xec, 0x98, 0xa3, - 0x9e, 0x1c, 0xf4, 0x26, 0xcc, 0x38, 0xae, 0x89, 0xdb, 0x06, 0x31, 0x60, 0x5b, 0x41, 0x58, 0xcb, - 0xd1, 0xde, 0x31, 0xed, 0x8d, 0x10, 0xca, 0xaa, 0x20, 0x48, 0xd2, 0xd3, 0x0a, 0x41, 0xff, 0x7b, - 0x0d, 0x66, 0x53, 0xad, 0xcb, 0x87, 0x4e, 0xa2, 0xe4, 0xd4, 0x27, 0x77, 0xb4, 0xd4, 0x47, 0xff, - 0xa7, 0x1c, 0x54, 0xa4, 0xba, 0xee, 0xa1, 0x7d, 0xb8, 0x05, 0xb3, 0xfc, 0x4d, 0x69, 0x39, 0x3d, - 0x56, 0x4e, 0xe5, 0x78, 0x93, 0x62, 0xe4, 0x4b, 0xc1, 0xba, 0xdb, 0xd9, 0x8a, 0x79, 0x69, 0x35, - 0x45, 0x3b, 0x58, 0x81, 0x82, 0x49, 0x26, 0x66, 0x54, 0x0a, 0xba, 0x09, 0x0b, 0x91, 0x67, 0x1a, - 0x21, 0x6e, 0x07, 0xbc, 0xe7, 0xde, 0x76, 0xa2, 0x7e, 0x07, 0xfb, 0xf4, 0xc4, 0x4f, 0xb2, 0x9e, - 0x0b, 0xe3, 0x10, 0x4d, 0xf9, 0x6b, 0x94, 0x2e, 0xe9, 0x9c, 0xcb, 0xa2, 0xeb, 0x97, 0x01, 0x8d, - 0xf6, 0x95, 0x95, 0xf9, 0xd5, 0x8e, 0x38, 0xbf, 0x5f, 0x69, 0x50, 0x4d, 0xb7, 0x8b, 0x9f, 0xc8, - 0x42, 0x1f, 0x42, 0x39, 0x6e, 0xfd, 0x3e, 0xb4, 0x03, 0x2f, 0x41, 0xc1, 0xc7, 0x46, 0xe0, 0x3a, - 0xfc, 0x64, 0xd2, 0x10, 0xc3, 0x10, 0x39, 0xc4, 0x30, 0x44, 0xbf, 0x01, 0x53, 0x6c, 0x06, 0x3f, - 0xb0, 0xec, 0x10, 0xfb, 0xe8, 0x22, 0x14, 0x82, 0xd0, 0x08, 0x71, 0x50, 0xd3, 0x96, 0xf3, 0xe7, - 0x66, 0xce, 0x2f, 0x8c, 0x76, 0x79, 0x09, 0x99, 0x69, 0x65, 0x9c, 0xb2, 0x56, 0x86, 0xe8, 0x7f, - 0xab, 0xc1, 0x94, 0xdc, 0xcc, 0x7e, 0x34, 0x6a, 0xef, 0x73, 0x68, 0x5f, 0x08, 0x1f, 0xec, 0x47, - 0xb3, 0xb2, 0xf7, 0x67, 0xfd, 0x3f, 0x34, 0x36, 0xb3, 0x71, 0x17, 0xf4, 0x61, 0xcd, 0xf7, 0x92, - 0x56, 0x08, 0x39, 0x61, 0x01, 0x0d, 0x6c, 0x47, 0x6d, 0x85, 0xd0, 0xf0, 0xa7, 0x88, 0xcb, 0xe1, - 0x4f, 0x21, 0xe8, 0xdf, 0x4c, 0x52, 0xcf, 0x93, 0x8e, 0xf7, 0x93, 0x6e, 0x02, 0xa5, 0xb2, 0x93, - 0xfc, 0x7d, 0x64, 0x27, 0x2f, 0x43, 0x91, 0xbe, 0x0e, 0xe2, 0xc4, 0x81, 0x2e, 0x1a, 0x81, 0xd4, - 0x2f, 0x8e, 0x0c, 0xb9, 0x4b, 0xd4, 0x9a, 0x7c, 0xb8, 0xa8, 0x85, 0xda, 0x70, 0x72, 0xd7, 0x08, - 0xda, 0x22, 0xce, 0x9a, 0x6d, 0x23, 0x6c, 0xc7, 0x71, 0xa2, 0x40, 0xcb, 0x94, 0xe7, 0x86, 0x83, - 0xfa, 0xf2, 0xae, 0x11, 0x6c, 0x09, 0x9e, 0xd5, 0x70, 0x73, 0x34, 0x6a, 0x2c, 0x64, 0x73, 0xa0, - 0x6d, 0x98, 0xcf, 0x56, 0x5e, 0xa4, 0x9e, 0xd3, 0x26, 0x6f, 0x70, 0x57, 0xcd, 0x27, 0x32, 0xc8, - 0xe8, 0x4b, 0x0d, 0x6a, 0xe4, 0xfd, 0xec, 0xe3, 0xcf, 0x23, 0xcb, 0xc7, 0x7d, 0xb2, 0x62, 0x6d, - 0x77, 0x1f, 0xfb, 0xb6, 0x71, 0xc8, 0xbf, 0xd6, 0x9c, 0x19, 0x7d, 0x7b, 0x6c, 0xba, 0x66, 0x4b, - 0x12, 0x60, 0x43, 0xf3, 0x54, 0xf0, 0x3a, 0x53, 0x22, 0x0f, 0x2d, 0x9b, 0x63, 0x7d, 0xa2, 0x54, - 0xaa, 0x96, 0xf5, 0xdf, 0x6a, 0x30, 0xa3, 0x7e, 0x54, 0x79, 0xe2, 0x1b, 0x73, 0xe4, 0x48, 0xe6, - 0x1f, 0xd3, 0x91, 0xfc, 0x8d, 0x06, 0xd3, 0xca, 0xb7, 0x9e, 0xa7, 0x67, 0xe8, 0xff, 0x9c, 0x83, - 0x85, 0x6c, 0x35, 0x8f, 0xa5, 0x00, 0xbd, 0x0c, 0x24, 0x95, 0xbc, 0x92, 0xe4, 0x46, 0xf3, 0x23, - 0xf5, 0x27, 0x1d, 0x82, 0xc8, 0x43, 0x47, 0x3e, 0xd2, 0x08, 0x71, 0x74, 0x13, 0x2a, 0x96, 0xf4, - 0x39, 0x28, 0x9f, 0xd5, 0xb5, 0x97, 0x3f, 0x02, 0xb1, 0x2e, 0xc5, 0x98, 0x4f, 0x3f, 0xb2, 0xaa, - 0x66, 0x01, 0x26, 0x48, 0xf2, 0xa6, 0xef, 0x43, 0x91, 0xbb, 0x83, 0x5e, 0x83, 0x32, 0x8d, 0x73, - 0xb4, 0xa6, 0x62, 0x89, 0x3b, 0x4d, 0x3b, 0x08, 0x98, 0xba, 0x90, 0x51, 0x12, 0x18, 0x7a, 0x13, - 0x80, 0x1c, 0x6d, 0x1e, 0xe1, 0x72, 0x34, 0x4e, 0xd0, 0xda, 0xcd, 0x73, 0xcd, 0x91, 0xb0, 0x56, - 0x8e, 0x41, 0xfd, 0xdf, 0x73, 0x50, 0x91, 0x3f, 0x40, 0x3d, 0x90, 0xf1, 0x2f, 0x40, 0xd4, 0xd5, - 0x6d, 0xc3, 0x34, 0xc9, 0x5f, 0x2c, 0x5e, 0x69, 0x2b, 0x63, 0x27, 0x49, 0xfc, 0xbf, 0x2a, 0x24, - 0x58, 0x15, 0x45, 0x3f, 0xf1, 0x5b, 0x29, 0x92, 0x64, 0xb5, 0x9a, 0xa6, 0x2d, 0xee, 0xc1, 0x7c, - 0xa6, 0x2a, 0xb9, 0xf6, 0x99, 0x7c, 0x54, 0xb5, 0xcf, 0x7f, 0x4e, 0xc2, 0x7c, 0xe6, 0x87, 0xbf, - 0x27, 0x7e, 0x8a, 0xd5, 0x13, 0x94, 0x7f, 0x24, 0x27, 0xe8, 0x2b, 0x2d, 0x6b, 0x65, 0xd9, 0x47, - 0x94, 0xb7, 0x8f, 0xf0, 0x35, 0xf4, 0x51, 0xad, 0xb1, 0xba, 0x2d, 0x27, 0x1f, 0xe8, 0x4c, 0x14, - 0x8e, 0x7a, 0x26, 0xd0, 0x2b, 0xac, 0x8c, 0xa5, 0xb6, 0x8a, 0xd4, 0x96, 0x88, 0x10, 0x29, 0x53, - 0x45, 0x0e, 0xa1, 0xf7, 0x61, 0x5a, 0x48, 0xb0, 0xe6, 0x49, 0x29, 0xe9, 0x6c, 0x70, 0x9e, 0x74, - 0xff, 0x64, 0x4a, 0xc6, 0xff, 0xb8, 0x7b, 0xf8, 0x77, 0x1a, 0xcc, 0xa6, 0x6e, 0x02, 0x3c, 0x3d, - 0xef, 0xa0, 0xaf, 0x35, 0x28, 0xc7, 0x97, 0x50, 0x1e, 0x3a, 0x91, 0x5f, 0x85, 0x02, 0x66, 0x17, - 0x21, 0x58, 0xb8, 0x3b, 0x91, 0xba, 0xa8, 0x46, 0x68, 0xfc, 0x6a, 0x5a, 0xea, 0xee, 0x43, 0x8b, - 0x0b, 0xea, 0xff, 0xab, 0x89, 0x14, 0x3d, 0xf1, 0xe9, 0x89, 0x2e, 0x45, 0x32, 0xa6, 0xfc, 0x83, - 0x8e, 0xe9, 0xbf, 0xca, 0x30, 0x49, 0xf9, 0x48, 0x09, 0x1d, 0x62, 0xbf, 0x6f, 0x39, 0x86, 0x4d, - 0x87, 0x53, 0x62, 0xe7, 0x56, 0x60, 0xf2, 0xb9, 0x15, 0x18, 0xda, 0x85, 0xd9, 0xa4, 0xed, 0x47, - 0xd5, 0x64, 0xdf, 0x7f, 0xfb, 0x50, 0x65, 0x62, 0x8d, 0xfd, 0x94, 0xa4, 0x7a, 0x41, 0x20, 0x45, - 0x44, 0x26, 0xcc, 0x74, 0x5d, 0x27, 0x34, 0x2c, 0x07, 0xfb, 0xcc, 0x50, 0x3e, 0xeb, 0xfe, 0xcf, - 0x05, 0x85, 0x87, 0x75, 0x4f, 0x54, 0x39, 0xf5, 0xfe, 0x8f, 0x4a, 0x43, 0x9f, 0xc1, 0xb4, 0x28, - 0x63, 0x98, 0x91, 0x89, 0xac, 0xfb, 0x3f, 0x6b, 0x32, 0x0b, 0xdb, 0xd2, 0x8a, 0x94, 0x7a, 0xff, - 0x47, 0x21, 0x21, 0x1b, 0xaa, 0x9e, 0x6b, 0x6e, 0x3b, 0x3c, 0x79, 0x37, 0x3a, 0x36, 0xe6, 0xbd, - 0xe6, 0xa5, 0x91, 0x94, 0x47, 0xe1, 0x62, 0xa1, 0x38, 0x2d, 0xab, 0xde, 0xa8, 0x4b, 0x53, 0xd1, - 0xa7, 0x30, 0x65, 0x93, 0x6a, 0x72, 0xed, 0xc0, 0xb3, 0x7c, 0x6c, 0x66, 0xdf, 0x7f, 0xbb, 0x2a, - 0x71, 0xb0, 0x40, 0x28, 0xcb, 0xa8, 0x77, 0x80, 0x64, 0x0a, 0x59, 0xfd, 0xbe, 0x71, 0xd0, 0x8a, - 0x9c, 0x60, 0xed, 0x80, 0xdf, 0x65, 0x2a, 0x66, 0xad, 0xfe, 0x86, 0xca, 0xc4, 0x56, 0x3f, 0x25, - 0xa9, 0xae, 0x7e, 0x8a, 0x88, 0xae, 0xd2, 0x38, 0xcf, 0x96, 0x84, 0xdd, 0x83, 0x5b, 0x18, 0x99, - 0x2d, 0xb6, 0x1a, 0xac, 0xed, 0xc3, 0x9f, 0x14, 0xa5, 0xb1, 0x06, 0xbe, 0x06, 0x74, 0xd8, 0x2d, - 0x1c, 0x46, 0xbe, 0x83, 0x4d, 0x5e, 0x54, 0x8d, 0xae, 0x81, 0xc2, 0x15, 0xaf, 0x81, 0x82, 0x8e, - 0xac, 0x81, 0x42, 0x25, 0x7b, 0xca, 0x73, 0xcd, 0x1b, 0xec, 0xc8, 0x84, 0xf1, 0xc5, 0xb8, 0x67, - 0x47, 0x4c, 0x25, 0x2c, 0x6c, 0x4f, 0x29, 0x52, 0xea, 0x9e, 0x52, 0x48, 0xfc, 0x2e, 0x96, 0x7c, - 0x73, 0x87, 0xcd, 0x54, 0x65, 0xcc, 0x5d, 0xac, 0x11, 0xce, 0xf8, 0x2e, 0xd6, 0x08, 0x65, 0xe4, - 0x2e, 0xd6, 0x08, 0x07, 0xb1, 0xde, 0x33, 0x9c, 0xde, 0xba, 0xdb, 0x51, 0x77, 0xf5, 0x54, 0x96, - 0xf5, 0x4b, 0x19, 0x9c, 0xcc, 0x7a, 0x96, 0x0e, 0xd5, 0x7a, 0x16, 0x47, 0xb3, 0x24, 0xda, 0x43, - 0xfa, 0xb7, 0x1a, 0xcc, 0xa6, 0xe2, 0x0c, 0x7a, 0x0f, 0xe2, 0x1b, 0x27, 0x37, 0x0e, 0x3d, 0x91, - 0x26, 0x2b, 0x37, 0x54, 0x08, 0x9e, 0x75, 0x43, 0x85, 0xe0, 0xe8, 0x2a, 0x40, 0xfc, 0x4e, 0xba, - 0x5b, 0x90, 0xa6, 0x39, 0x5a, 0xc2, 0x29, 0xe7, 0x68, 0x09, 0xaa, 0xff, 0x90, 0x87, 0x92, 0xd8, - 0xa8, 0x8f, 0xa5, 0x8c, 0x5a, 0x81, 0x62, 0x1f, 0x07, 0xf4, 0xa6, 0x4a, 0x2e, 0xc9, 0x86, 0x38, - 0x24, 0x67, 0x43, 0x1c, 0x52, 0x93, 0xb5, 0xfc, 0x03, 0x25, 0x6b, 0x13, 0x47, 0x4e, 0xd6, 0x30, - 0xfd, 0x4a, 0x2d, 0x85, 0x5b, 0xf1, 0x5d, 0xe8, 0xee, 0x31, 0x5c, 0x7c, 0xc3, 0x96, 0x05, 0x53, - 0xdf, 0xb0, 0x65, 0x12, 0xda, 0x83, 0xe3, 0xd2, 0xb7, 0x2b, 0xde, 0x3b, 0x24, 0x81, 0x6f, 0x66, - 0xfc, 0x95, 0x80, 0x16, 0xe5, 0x62, 0xc7, 0x7b, 0x2f, 0x85, 0xca, 0xd9, 0x6e, 0x9a, 0xa6, 0xff, - 0x32, 0x07, 0x33, 0xaa, 0xbf, 0x8f, 0x65, 0x61, 0x5f, 0x83, 0x32, 0x3e, 0xb0, 0xc2, 0x76, 0xd7, - 0x35, 0x31, 0x2f, 0x19, 0xe9, 0x3a, 0x11, 0xf0, 0x82, 0x6b, 0x2a, 0xeb, 0x24, 0x30, 0x79, 0x37, - 0xe4, 0x8f, 0xb4, 0x1b, 0x92, 0x56, 0xeb, 0xc4, 0xbd, 0x5b, 0xad, 0xd9, 0xf3, 0x5c, 0x7e, 0x4c, - 0xf3, 0x7c, 0x27, 0x07, 0xd5, 0x74, 0x34, 0xfe, 0x79, 0x1c, 0x21, 0xf5, 0x34, 0xe4, 0x8f, 0x7c, - 0x1a, 0xde, 0x87, 0x69, 0x92, 0x3b, 0x1a, 0x61, 0xc8, 0xef, 0x70, 0x4e, 0xd0, 0x9c, 0x8b, 0xc5, - 0xa6, 0xc8, 0x59, 0x15, 0xb8, 0x12, 0x9b, 0x24, 0x5c, 0xff, 0x32, 0x07, 0xd3, 0xca, 0x5b, 0xe3, - 0xe9, 0x0b, 0x29, 0xfa, 0x2c, 0x4c, 0x2b, 0xc9, 0x98, 0xfe, 0x77, 0x6c, 0x9f, 0xa8, 0x59, 0xd0, - 0xd3, 0x37, 0x2f, 0x33, 0x30, 0x25, 0x67, 0x75, 0x7a, 0x13, 0x66, 0x53, 0x49, 0x98, 0x3c, 0x00, - 0xed, 0x28, 0x03, 0xd0, 0x17, 0x60, 0x2e, 0x2b, 0x77, 0xd0, 0x2f, 0xc1, 0x5c, 0xd6, 0x5b, 0xfd, - 0xfe, 0x0d, 0x7c, 0xa7, 0x51, 0x0b, 0xa3, 0xb7, 0xbd, 0x2f, 0x03, 0x38, 0xf8, 0x76, 0xfb, 0x9e, - 0xe5, 0x1f, 0x9b, 0x4f, 0x7c, 0x7b, 0x3d, 0x55, 0x2d, 0x95, 0x04, 0x46, 0x34, 0xb9, 0xb6, 0xd9, - 0xbe, 0x67, 0xd1, 0x45, 0x35, 0xb9, 0xb6, 0x39, 0xa2, 0x49, 0x60, 0xfa, 0x3f, 0xe4, 0x45, 0x65, - 0x9e, 0x5c, 0x97, 0xfe, 0x04, 0xaa, 0x9e, 0x78, 0xb8, 0xb7, 0xb7, 0xb4, 0x36, 0x89, 0xf9, 0xd3, - 0x96, 0x66, 0x54, 0x8a, 0xaa, 0x9b, 0x17, 0x9d, 0xb9, 0x23, 0xea, 0x6e, 0xa5, 0xaa, 0xcf, 0x19, - 0x95, 0x82, 0xfe, 0x1a, 0x8e, 0x8b, 0xdb, 0x64, 0xfb, 0x58, 0x38, 0x9e, 0x1f, 0xab, 0x9c, 0xdd, - 0xee, 0x8e, 0x05, 0xd2, 0x9e, 0xcf, 0xa6, 0x48, 0x29, 0xf5, 0xdc, 0xf7, 0x89, 0xa3, 0xaa, 0x4f, - 0x3b, 0x3f, 0x9b, 0x22, 0xe9, 0x5f, 0x6b, 0x30, 0x9b, 0xba, 0x80, 0x8e, 0x2e, 0x42, 0x89, 0xfe, - 0x3e, 0xed, 0xee, 0x2b, 0x40, 0x37, 0x24, 0xe5, 0x53, 0x2c, 0x14, 0x39, 0x84, 0xde, 0x80, 0x72, - 0x7c, 0x4f, 0x9d, 0x7f, 0x55, 0x66, 0x87, 0x4f, 0x80, 0xca, 0xe1, 0x13, 0xa0, 0xfe, 0x2f, 0x1a, - 0x9c, 0x1c, 0x7b, 0x39, 0xfd, 0x49, 0xf7, 0x0c, 0x5e, 0x7c, 0x05, 0x4a, 0xe2, 0xbb, 0x2f, 0x02, - 0x28, 0x7c, 0xb4, 0xbd, 0xb6, 0xbd, 0x76, 0xb1, 0x7a, 0x0c, 0x55, 0xa0, 0xb8, 0xb9, 0x76, 0xed, - 0xe2, 0x95, 0x6b, 0x97, 0xaa, 0x1a, 0x79, 0x68, 0x6d, 0x5f, 0xbb, 0x46, 0x1e, 0x72, 0x2f, 0x5e, - 0x95, 0x6f, 0xa1, 0xb1, 0xf7, 0x31, 0x9a, 0x82, 0xd2, 0xaa, 0xe7, 0xd1, 0x00, 0xc0, 0x64, 0xd7, - 0xf6, 0x2d, 0x72, 0x56, 0xab, 0x1a, 0x2a, 0x42, 0xfe, 0xfa, 0xf5, 0x8d, 0x6a, 0x0e, 0xcd, 0x41, - 0xf5, 0x22, 0x36, 0x4c, 0xdb, 0x72, 0xb0, 0x88, 0x3a, 0xd5, 0x7c, 0xf3, 0xd6, 0xf7, 0x3f, 0x2e, - 0x69, 0x3f, 0xfc, 0xb8, 0xa4, 0xfd, 0xe2, 0xc7, 0x25, 0xed, 0xce, 0x4f, 0x4b, 0xc7, 0x7e, 0xf8, - 0x69, 0xe9, 0xd8, 0xff, 0xfd, 0xb4, 0x74, 0xec, 0x93, 0x57, 0xa4, 0xdf, 0x62, 0xb2, 0x31, 0x79, - 0xbe, 0x4b, 0x02, 0x2e, 0x7f, 0x5a, 0x49, 0xff, 0x3a, 0xf5, 0xbb, 0xdc, 0xe9, 0x55, 0xfa, 0xb8, - 0xc9, 0xf8, 0x1a, 0x57, 0xdc, 0x06, 0x03, 0xe8, 0x0f, 0x08, 0x83, 0x4e, 0x81, 0xfe, 0x50, 0xf0, - 0xb5, 0x3f, 0x04, 0x00, 0x00, 0xff, 0xff, 0xe4, 0xb3, 0x51, 0x43, 0xd8, 0x3a, 0x00, 0x00, + 0xb9, 0xf6, 0x90, 0x12, 0x1f, 0x3f, 0xf5, 0xa0, 0x8f, 0x25, 0x85, 0x56, 0x6c, 0x51, 0x1e, 0xe7, + 0xde, 0x38, 0x41, 0x42, 0x26, 0xce, 0x03, 0x79, 0x5c, 0x24, 0x10, 0x6d, 0xc5, 0xb6, 0x62, 0xd9, + 0x0a, 0x65, 0xe5, 0xfa, 0x06, 0xb9, 0x60, 0x86, 0x9c, 0x23, 0x6a, 0xac, 0xe1, 0x0c, 0x33, 0x0f, + 0x59, 0x02, 0xb2, 0xb8, 0xb9, 0xb8, 0x37, 0xdd, 0xa5, 0x06, 0x9a, 0x45, 0x81, 0x2e, 0xd2, 0x6d, + 0x03, 0x74, 0xdd, 0x75, 0x57, 0xcd, 0xa2, 0x28, 0xd2, 0x5d, 0x57, 0x6c, 0x91, 0xa0, 0x8b, 0x72, + 0xd1, 0x75, 0xdb, 0x4d, 0x8b, 0xf3, 0x9a, 0x39, 0x67, 0x38, 0xb4, 0xe5, 0x57, 0x9d, 0xc2, 0x2b, + 0x69, 0xbe, 0xff, 0x39, 0xe7, 0xf1, 0xcf, 0xff, 0xff, 0xe7, 0x10, 0x4e, 0xf6, 0x77, 0xbb, 0x75, + 0xc3, 0xeb, 0x19, 0xa6, 0x81, 0xf7, 0xb0, 0x13, 0xf8, 0x75, 0xf6, 0xa7, 0xd6, 0xf7, 0xdc, 0xc0, + 0x45, 0x53, 0x32, 0x69, 0x51, 0xdf, 0x7d, 0xcd, 0xaf, 0x59, 0x6e, 0xdd, 0xe8, 0x5b, 0xf5, 0x8e, + 0xeb, 0xe1, 0xfa, 0xde, 0x8b, 0xf5, 0x2e, 0x76, 0xb0, 0x67, 0x04, 0xd8, 0x64, 0x12, 0x8b, 0x67, + 0x24, 0x1e, 0x07, 0x07, 0x37, 0x5d, 0x6f, 0xd7, 0x72, 0xba, 0x69, 0x9c, 0xd5, 0xae, 0xeb, 0x76, + 0x6d, 0x5c, 0xa7, 0x4f, 0xed, 0x70, 0xbb, 0x1e, 0x58, 0x3d, 0xec, 0x07, 0x46, 0xaf, 0xcf, 0x19, + 0x5e, 0x8e, 0x55, 0xf5, 0x8c, 0xce, 0x8e, 0xe5, 0x60, 0xef, 0xa0, 0x4e, 0xfd, 0xed, 0x5b, 0x75, + 0x0f, 0xfb, 0x6e, 0xe8, 0x75, 0xf0, 0x88, 0xda, 0xe7, 0xbb, 0x56, 0xb0, 0x13, 0xb6, 0x6b, 0x1d, + 0xb7, 0x57, 0xef, 0xba, 0x5d, 0x37, 0xd6, 0x4f, 0x9e, 0xe8, 0x03, 0xfd, 0x8f, 0xb3, 0xbf, 0x61, + 0x39, 0x01, 0xf6, 0x1c, 0xc3, 0xae, 0xfb, 0x9d, 0x1d, 0x6c, 0x86, 0x36, 0xf6, 0xe2, 0xff, 0xdc, + 0xf6, 0x0d, 0xdc, 0x09, 0xfc, 0x11, 0x80, 0xc9, 0xea, 0xb7, 0xe6, 0x60, 0x7a, 0x95, 0x0c, 0xcd, + 0x26, 0xfe, 0x38, 0xc4, 0x4e, 0x07, 0xa3, 0x67, 0x60, 0xf2, 0xe3, 0x10, 0x87, 0xb8, 0xa2, 0x2d, + 0x6b, 0x67, 0x8a, 0x8d, 0x63, 0xc3, 0x41, 0x75, 0x96, 0x02, 0xcf, 0xb9, 0x3d, 0x2b, 0xc0, 0xbd, + 0x7e, 0x70, 0xd0, 0x64, 0x1c, 0xe8, 0x0d, 0x98, 0xba, 0xe1, 0xb6, 0x5b, 0x3e, 0x0e, 0x5a, 0x8e, + 0xd1, 0xc3, 0x95, 0x0c, 0x95, 0xa8, 0x0c, 0x07, 0xd5, 0xb9, 0x1b, 0x6e, 0x7b, 0x13, 0x07, 0x57, + 0x8c, 0x9e, 0x2c, 0x06, 0x31, 0x8a, 0x9e, 0x87, 0x7c, 0xe8, 0x63, 0xaf, 0x65, 0x99, 0x95, 0x2c, + 0x15, 0x9b, 0x1b, 0x0e, 0xaa, 0x65, 0x02, 0x5d, 0x32, 0x25, 0x91, 0x1c, 0x43, 0xd0, 0x73, 0x90, + 0xeb, 0x7a, 0x6e, 0xd8, 0xf7, 0x2b, 0x13, 0xcb, 0x59, 0xc1, 0xcd, 0x10, 0x99, 0x9b, 0x21, 0xe8, + 0x2a, 0xe4, 0xd8, 0x7c, 0x57, 0x26, 0x97, 0xb3, 0x67, 0x4a, 0x67, 0x4f, 0xd5, 0xe4, 0x45, 0x50, + 0x53, 0x5e, 0x98, 0x3d, 0x31, 0x85, 0x8c, 0x2e, 0x2b, 0xe4, 0xcb, 0xe6, 0x4f, 0x47, 0x61, 0x92, + 0xf2, 0xa1, 0xab, 0x90, 0xef, 0x78, 0x98, 0x4c, 0x56, 0x05, 0x2d, 0x6b, 0x67, 0x4a, 0x67, 0x17, + 0x6b, 0x6c, 0x11, 0xd4, 0xc4, 0x24, 0xd5, 0xae, 0x89, 0x45, 0xd0, 0x38, 0x3e, 0x1c, 0x54, 0x8f, + 0x72, 0xf6, 0x58, 0xeb, 0xad, 0xdf, 0x57, 0xb5, 0xa6, 0xd0, 0x82, 0x36, 0xa0, 0xe8, 0x87, 0xed, + 0x9e, 0x15, 0xac, 0xb9, 0x6d, 0x3a, 0xe6, 0xa5, 0xb3, 0x4f, 0xa8, 0xee, 0x6e, 0x0a, 0x72, 0xe3, + 0x89, 0xe1, 0xa0, 0x7a, 0x2c, 0xe2, 0x8e, 0x35, 0x5e, 0x3c, 0xd2, 0x8c, 0x95, 0xa0, 0x1d, 0x98, + 0xf5, 0x70, 0xdf, 0xb3, 0x5c, 0xcf, 0x0a, 0x2c, 0x1f, 0x13, 0xbd, 0x19, 0xaa, 0xf7, 0xa4, 0xaa, + 0xb7, 0xa9, 0x32, 0x35, 0x4e, 0x0e, 0x07, 0xd5, 0xe3, 0x09, 0x49, 0xc5, 0x46, 0x52, 0x2d, 0x0a, + 0x00, 0x25, 0xa0, 0x4d, 0x1c, 0xd0, 0xf9, 0x2c, 0x9d, 0x5d, 0xbe, 0xad, 0xb1, 0x4d, 0x1c, 0x34, + 0x96, 0x87, 0x83, 0xea, 0x89, 0x51, 0x79, 0xc5, 0x64, 0x8a, 0x7e, 0x64, 0x43, 0x59, 0x46, 0x4d, + 0xf2, 0x82, 0x13, 0xd4, 0xe6, 0xd2, 0x78, 0x9b, 0x84, 0xab, 0xb1, 0x34, 0x1c, 0x54, 0x17, 0x93, + 0xb2, 0x8a, 0xbd, 0x11, 0xcd, 0x64, 0x7e, 0x3a, 0x86, 0xd3, 0xc1, 0x36, 0x31, 0x33, 0x99, 0x36, + 0x3f, 0xe7, 0x04, 0x99, 0xcd, 0x4f, 0xc4, 0xad, 0xce, 0x4f, 0x04, 0xa3, 0x0f, 0x61, 0x2a, 0x7a, + 0x20, 0xe3, 0x95, 0xe3, 0xeb, 0x28, 0x5d, 0x29, 0x19, 0xa9, 0xc5, 0xe1, 0xa0, 0xba, 0x20, 0xcb, + 0x28, 0xaa, 0x15, 0x6d, 0xb1, 0x76, 0x9b, 0x8d, 0x4c, 0x7e, 0xbc, 0x76, 0xc6, 0x21, 0x6b, 0xb7, + 0x47, 0x47, 0x44, 0xd1, 0x46, 0xb4, 0x93, 0x4d, 0x1c, 0x76, 0x3a, 0x18, 0x9b, 0xd8, 0xac, 0x14, + 0xd2, 0xb4, 0xaf, 0x49, 0x1c, 0x4c, 0xbb, 0x2c, 0xa3, 0x6a, 0x97, 0x29, 0x64, 0xac, 0x6f, 0xb8, + 0xed, 0x55, 0xcf, 0x73, 0x3d, 0xbf, 0x52, 0x4c, 0x1b, 0xeb, 0x35, 0x41, 0x66, 0x63, 0x1d, 0x71, + 0xab, 0x63, 0x1d, 0xc1, 0xdc, 0xdf, 0x66, 0xe8, 0x5c, 0xc6, 0x86, 0x8f, 0xcd, 0x0a, 0x8c, 0xf1, + 0x37, 0xe2, 0x88, 0xfc, 0x8d, 0x90, 0x11, 0x7f, 0x23, 0x0a, 0x32, 0x61, 0x86, 0x3d, 0xaf, 0xf8, + 0xbe, 0xd5, 0x75, 0xb0, 0x59, 0x29, 0x51, 0xfd, 0x27, 0xd2, 0xf4, 0x0b, 0x9e, 0xc6, 0x89, 0xe1, + 0xa0, 0x5a, 0x51, 0xe5, 0x14, 0x1b, 0x09, 0x9d, 0xe8, 0x23, 0x98, 0x66, 0x48, 0x33, 0x74, 0x1c, + 0xcb, 0xe9, 0x56, 0xa6, 0xa8, 0x91, 0x27, 0xd3, 0x8c, 0x70, 0x96, 0xc6, 0x93, 0xc3, 0x41, 0xf5, + 0x09, 0x45, 0x4a, 0x31, 0xa1, 0x2a, 0x24, 0x11, 0x83, 0x01, 0xf1, 0xc4, 0x4e, 0xa7, 0x45, 0x8c, + 0x35, 0x95, 0x89, 0x45, 0x8c, 0x84, 0xa4, 0x1a, 0x31, 0x12, 0xc4, 0x78, 0x3e, 0xf8, 0x24, 0xcf, + 0x8c, 0x9f, 0x0f, 0x3e, 0xcf, 0xd2, 0x7c, 0xa4, 0x4c, 0xb5, 0xa2, 0x0d, 0x7d, 0x02, 0xe4, 0xc3, + 0x73, 0x3e, 0xec, 0xdb, 0x56, 0xc7, 0x08, 0xf0, 0x79, 0x1c, 0xe0, 0x0e, 0x89, 0xd4, 0xb3, 0xd4, + 0x8a, 0x3e, 0x62, 0x65, 0x84, 0xb3, 0xa1, 0x0f, 0x07, 0xd5, 0xa5, 0x34, 0x1d, 0x8a, 0xd5, 0x54, + 0x2b, 0xe8, 0x7f, 0x34, 0x98, 0xf7, 0x03, 0xc3, 0x31, 0x0d, 0xdb, 0x75, 0xf0, 0x25, 0xa7, 0xeb, + 0x61, 0xdf, 0xbf, 0xe4, 0x6c, 0xbb, 0x95, 0x32, 0xb5, 0x7f, 0x3a, 0x11, 0xd6, 0xd3, 0x58, 0x1b, + 0xa7, 0x87, 0x83, 0x6a, 0x35, 0x55, 0x8b, 0xe2, 0x41, 0xba, 0x21, 0xb4, 0x0f, 0xc7, 0x44, 0x56, + 0xb1, 0x15, 0x58, 0xb6, 0xe5, 0x1b, 0x81, 0xe5, 0x3a, 0x95, 0xa3, 0xd4, 0xfe, 0xa9, 0x64, 0x74, + 0x1c, 0x61, 0x6c, 0x9c, 0x1a, 0x0e, 0xaa, 0x27, 0x53, 0x34, 0x28, 0xb6, 0xd3, 0x4c, 0xc4, 0x4b, + 0x68, 0xc3, 0xc3, 0x84, 0x11, 0x9b, 0x95, 0x63, 0xe3, 0x97, 0x50, 0xc4, 0x24, 0x2f, 0xa1, 0x08, + 0x4c, 0x5b, 0x42, 0x11, 0x91, 0x58, 0xea, 0x1b, 0x5e, 0x60, 0x11, 0xb3, 0xeb, 0x86, 0xb7, 0x8b, + 0xbd, 0xca, 0x5c, 0x9a, 0xa5, 0x0d, 0x95, 0x89, 0x59, 0x4a, 0x48, 0xaa, 0x96, 0x12, 0x44, 0x74, + 0x4b, 0x03, 0xd5, 0x35, 0xcb, 0x75, 0x9a, 0x24, 0x6d, 0xf0, 0xc9, 0xeb, 0xcd, 0x53, 0xa3, 0x4f, + 0xdf, 0xe6, 0xf5, 0x64, 0xf6, 0xc6, 0xd3, 0xc3, 0x41, 0xf5, 0xf4, 0x58, 0x6d, 0x8a, 0x23, 0xe3, + 0x8d, 0xa2, 0xeb, 0x50, 0x22, 0x44, 0x4c, 0x13, 0x30, 0xb3, 0xb2, 0x40, 0x7d, 0x38, 0x3e, 0xea, + 0x03, 0x67, 0xa0, 0x19, 0xc8, 0xbc, 0x24, 0xa1, 0xd8, 0x91, 0x55, 0x35, 0xf2, 0x30, 0x49, 0xe5, + 0xf5, 0x61, 0x0e, 0x8e, 0xa5, 0xac, 0x0d, 0xf4, 0x16, 0xe4, 0xbc, 0xd0, 0x21, 0x09, 0x1b, 0xcb, + 0x52, 0x90, 0x6a, 0x75, 0x2b, 0xb4, 0x4c, 0x96, 0x2d, 0x7a, 0xa1, 0xa3, 0xe4, 0x70, 0x93, 0x14, + 0x20, 0xf2, 0x24, 0x5b, 0xb4, 0x4c, 0x9e, 0x8d, 0x8c, 0x95, 0xbf, 0xe1, 0xb6, 0x55, 0x79, 0x0a, + 0x20, 0x0c, 0xd3, 0x62, 0xe1, 0xb5, 0x2c, 0xb2, 0xab, 0x58, 0x9e, 0xf1, 0x94, 0xaa, 0xe6, 0xdd, + 0xb0, 0x8d, 0x3d, 0x07, 0x07, 0xd8, 0x17, 0xef, 0x40, 0xb7, 0x15, 0x8d, 0x22, 0x9e, 0x84, 0x48, + 0xfa, 0xa7, 0x64, 0x1c, 0x7d, 0xa1, 0x41, 0xa5, 0x67, 0xec, 0xb7, 0x04, 0xe8, 0xb7, 0xb6, 0x5d, + 0xaf, 0xd5, 0xc7, 0x9e, 0xe5, 0x9a, 0x34, 0xf9, 0x2c, 0x9d, 0xfd, 0x8f, 0x3b, 0x6e, 0xa4, 0xda, + 0xba, 0xb1, 0x2f, 0x60, 0xff, 0x1d, 0xd7, 0xdb, 0xa0, 0xe2, 0xab, 0x4e, 0xe0, 0x1d, 0x34, 0x4e, + 0x7e, 0x3d, 0xa8, 0x1e, 0x21, 0xd3, 0xd2, 0x4b, 0xe3, 0x69, 0xa6, 0xc3, 0xe8, 0x87, 0x1a, 0x2c, + 0x04, 0x6e, 0x60, 0xd8, 0xad, 0x4e, 0xd8, 0x0b, 0x6d, 0x23, 0xb0, 0xf6, 0x70, 0x2b, 0xf4, 0x8d, + 0x2e, 0xe6, 0x39, 0xee, 0x9b, 0x77, 0x76, 0xea, 0x1a, 0x91, 0x3f, 0x17, 0x89, 0x6f, 0x11, 0x69, + 0xe6, 0xd3, 0x09, 0xee, 0xd3, 0x5c, 0x90, 0xc2, 0xd2, 0x4c, 0x45, 0x17, 0x7f, 0xaa, 0xc1, 0xe2, + 0xf8, 0xd7, 0x44, 0xa7, 0x21, 0xbb, 0x8b, 0x0f, 0x78, 0x15, 0x71, 0x74, 0x38, 0xa8, 0x4e, 0xef, + 0xe2, 0x03, 0x69, 0xd4, 0x09, 0x15, 0xfd, 0x17, 0x4c, 0xee, 0x19, 0x76, 0x88, 0xf9, 0x92, 0xa8, + 0xd5, 0x58, 0xbd, 0x54, 0x93, 0xeb, 0xa5, 0x5a, 0x7f, 0xb7, 0x4b, 0x80, 0x9a, 0x98, 0x91, 0xda, + 0x7b, 0xa1, 0xe1, 0x04, 0x56, 0x70, 0xc0, 0x96, 0x0b, 0x55, 0x20, 0x2f, 0x17, 0x0a, 0xbc, 0x91, + 0x79, 0x4d, 0x5b, 0xfc, 0x52, 0x83, 0xe3, 0x63, 0x5f, 0xfa, 0xfb, 0xe0, 0xa1, 0xde, 0x82, 0x09, + 0xb2, 0xf0, 0x49, 0x7d, 0xb3, 0x63, 0x75, 0x77, 0x5e, 0x7d, 0x99, 0xba, 0x93, 0x63, 0xe5, 0x08, + 0x43, 0xe4, 0x72, 0x84, 0x21, 0xa4, 0x46, 0xb3, 0xdd, 0x9b, 0xaf, 0xbe, 0x4c, 0x9d, 0xca, 0x31, + 0x23, 0x14, 0x90, 0x8d, 0x50, 0x40, 0xff, 0x7b, 0x0e, 0x8a, 0x51, 0x01, 0x21, 0xed, 0x41, 0xed, + 0x9e, 0xf6, 0xe0, 0x45, 0x28, 0x9b, 0xd8, 0xe4, 0x5f, 0x3e, 0xcb, 0x75, 0xc4, 0x6e, 0x2e, 0xb2, + 0xe8, 0xaa, 0xd0, 0x14, 0xf9, 0xd9, 0x04, 0x09, 0x9d, 0x85, 0x02, 0x4f, 0xb4, 0x0f, 0xe8, 0x46, + 0x9e, 0x6e, 0x2c, 0x0c, 0x07, 0x55, 0x24, 0x30, 0x49, 0x34, 0xe2, 0x43, 0x4d, 0x00, 0x56, 0xbd, + 0xae, 0xe3, 0xc0, 0xe0, 0x29, 0x7f, 0x45, 0x7d, 0x83, 0xab, 0x11, 0x9d, 0xd5, 0xa1, 0x31, 0xbf, + 0x5c, 0x87, 0xc6, 0x28, 0xfa, 0x10, 0xa0, 0x67, 0x58, 0x0e, 0x93, 0xe3, 0xf9, 0xbd, 0x3e, 0x2e, + 0xa4, 0xac, 0x47, 0x9c, 0x4c, 0x7b, 0x2c, 0x29, 0x6b, 0x8f, 0x51, 0x52, 0x2d, 0xf2, 0x7a, 0xbb, + 0x92, 0xa3, 0xbb, 0x74, 0x69, 0x9c, 0x6a, 0xae, 0x76, 0x9e, 0x54, 0x8c, 0x5c, 0x44, 0xd2, 0x29, + 0xb4, 0x90, 0x61, 0xb3, 0xad, 0x6d, 0x1c, 0x58, 0x3d, 0x4c, 0x33, 0x7b, 0x3e, 0x6c, 0x02, 0x93, + 0x87, 0x4d, 0x60, 0xe8, 0x35, 0x00, 0x23, 0x58, 0x77, 0xfd, 0xe0, 0xaa, 0xd3, 0xc1, 0x34, 0x63, + 0x2f, 0x30, 0xf7, 0x63, 0x54, 0x76, 0x3f, 0x46, 0xd1, 0x9b, 0x50, 0xea, 0xf3, 0x8f, 0x50, 0xdb, + 0xc6, 0x34, 0x23, 0x2f, 0xb0, 0x4f, 0x8a, 0x04, 0x4b, 0xb2, 0x32, 0x37, 0xba, 0x00, 0xb3, 0x1d, + 0xd7, 0xe9, 0x84, 0x9e, 0x87, 0x9d, 0xce, 0xc1, 0xa6, 0xb1, 0x8d, 0x69, 0xf6, 0x5d, 0x60, 0x4b, + 0x25, 0x41, 0x92, 0x97, 0x4a, 0x82, 0x84, 0x5e, 0x81, 0x62, 0xd4, 0xbd, 0xa0, 0x09, 0x76, 0x91, + 0x17, 0xc2, 0x02, 0x94, 0x84, 0x63, 0x4e, 0xe2, 0xbc, 0xe5, 0x47, 0x59, 0x1a, 0x4d, 0x9a, 0xb9, + 0xf3, 0x12, 0x2c, 0x3b, 0x2f, 0xc1, 0xe8, 0x12, 0x1c, 0xa5, 0xdf, 0xc5, 0x56, 0x10, 0xd8, 0x2d, + 0x1f, 0x77, 0x5c, 0xc7, 0xf4, 0x69, 0x4e, 0x9c, 0x65, 0xee, 0x53, 0xe2, 0xb5, 0xc0, 0xde, 0x64, + 0x24, 0xd9, 0xfd, 0x04, 0x49, 0xff, 0xb5, 0x06, 0x73, 0x69, 0x4b, 0x28, 0xb1, 0x9c, 0xb5, 0x07, + 0xb2, 0x9c, 0xdf, 0x87, 0x42, 0xdf, 0x35, 0x5b, 0x7e, 0x1f, 0x77, 0x78, 0xc4, 0x4a, 0x2c, 0xe6, + 0x0d, 0xd7, 0xdc, 0xec, 0xe3, 0xce, 0x7f, 0x5a, 0xc1, 0xce, 0xca, 0x9e, 0x6b, 0x99, 0x97, 0x2d, + 0x9f, 0xaf, 0xba, 0x3e, 0xa3, 0x28, 0x19, 0x42, 0x9e, 0x83, 0x8d, 0x02, 0xe4, 0x98, 0x15, 0xfd, + 0x37, 0x59, 0x28, 0x27, 0x97, 0xed, 0xbf, 0xd2, 0xab, 0xa0, 0xeb, 0x90, 0xb7, 0x58, 0xca, 0xcc, + 0x33, 0x88, 0x7f, 0x93, 0x62, 0x7a, 0x2d, 0x6e, 0xf8, 0xd5, 0xf6, 0x5e, 0xac, 0xf1, 0xdc, 0x9a, + 0x0e, 0x01, 0xd5, 0xcc, 0x25, 0x55, 0xcd, 0x1c, 0x44, 0x4d, 0xc8, 0xfb, 0xd8, 0xdb, 0xb3, 0x3a, + 0x98, 0x07, 0xa7, 0xaa, 0xac, 0xb9, 0xe3, 0x7a, 0x98, 0xe8, 0xdc, 0x64, 0x2c, 0xb1, 0x4e, 0x2e, + 0xa3, 0xea, 0xe4, 0x20, 0x7a, 0x1f, 0x8a, 0x1d, 0xd7, 0xd9, 0xb6, 0xba, 0xeb, 0x46, 0x9f, 0x87, + 0xa7, 0x93, 0x69, 0x5a, 0xcf, 0x09, 0x26, 0xde, 0x84, 0x10, 0x8f, 0x89, 0x26, 0x44, 0xc4, 0x15, + 0x4f, 0xe8, 0x9f, 0x27, 0x00, 0xe2, 0xc9, 0x41, 0xaf, 0x43, 0x09, 0xef, 0xe3, 0x4e, 0x18, 0xb8, + 0x9e, 0xf8, 0x4e, 0xf0, 0x9e, 0x9e, 0x80, 0x95, 0xc0, 0x0e, 0x31, 0x4a, 0x36, 0xaa, 0x63, 0xf4, + 0xb0, 0xdf, 0x37, 0x3a, 0xa2, 0x19, 0x48, 0x9d, 0x89, 0x40, 0x79, 0xa3, 0x46, 0x20, 0xfa, 0x77, + 0x98, 0xa0, 0xed, 0x43, 0xd6, 0x07, 0x44, 0xc3, 0x41, 0x75, 0xc6, 0x51, 0x1b, 0x87, 0x94, 0x8e, + 0xde, 0x86, 0xe9, 0xdd, 0x68, 0xe1, 0x11, 0xdf, 0x26, 0xa8, 0x00, 0x4d, 0xed, 0x62, 0x82, 0xe2, + 0xdd, 0x94, 0x8c, 0xa3, 0x6d, 0x28, 0x19, 0x8e, 0xe3, 0x06, 0xf4, 0x1b, 0x24, 0x7a, 0x83, 0xcf, + 0x8c, 0x5b, 0xa6, 0xb5, 0x95, 0x98, 0x97, 0x65, 0x49, 0x34, 0x78, 0x48, 0x1a, 0xe4, 0xe0, 0x21, + 0xc1, 0xa8, 0x09, 0x39, 0xdb, 0x68, 0x63, 0x5b, 0x04, 0xfd, 0xa7, 0xc6, 0x9a, 0xb8, 0x4c, 0xd9, + 0x98, 0x76, 0xfa, 0xc9, 0x67, 0x72, 0xf2, 0x27, 0x9f, 0x21, 0x8b, 0xdb, 0x50, 0x4e, 0xfa, 0x73, + 0xb8, 0x04, 0xe6, 0x19, 0x39, 0x81, 0x29, 0xde, 0x31, 0x65, 0x32, 0xa0, 0x24, 0x39, 0xf5, 0x30, + 0x4c, 0xe8, 0x3f, 0xd3, 0x60, 0x2e, 0x6d, 0xef, 0xa2, 0x75, 0x69, 0xc7, 0x6b, 0xbc, 0xc7, 0x91, + 0xb2, 0xd4, 0xb9, 0xec, 0x98, 0xad, 0x1e, 0x6f, 0xf4, 0x06, 0xcc, 0x38, 0xae, 0x89, 0x5b, 0x06, + 0x31, 0x60, 0x5b, 0x7e, 0x50, 0xc9, 0xd0, 0xde, 0x31, 0xed, 0x8d, 0x10, 0xca, 0x8a, 0x20, 0x48, + 0xd2, 0xd3, 0x0a, 0x41, 0xff, 0x7f, 0x0d, 0x66, 0x13, 0xad, 0xcb, 0xfb, 0x4e, 0xa2, 0xe4, 0xd4, + 0x27, 0x73, 0xb8, 0xd4, 0x47, 0xff, 0x51, 0x06, 0x4a, 0x52, 0x5d, 0x77, 0xdf, 0x3e, 0xdc, 0x80, + 0x59, 0xfe, 0xa5, 0xb4, 0x9c, 0x2e, 0x2b, 0xa7, 0x32, 0xbc, 0x49, 0x31, 0x72, 0x52, 0xb0, 0xe6, + 0xb6, 0x37, 0x23, 0x5e, 0x5a, 0x4d, 0xd1, 0x0e, 0x96, 0xaf, 0x60, 0x92, 0x89, 0x19, 0x95, 0x82, + 0xae, 0xc3, 0x42, 0xd8, 0x37, 0x8d, 0x00, 0xb7, 0x7c, 0xde, 0x73, 0x6f, 0x39, 0x61, 0xaf, 0x8d, + 0x3d, 0xba, 0xe3, 0x27, 0x59, 0xcf, 0x85, 0x71, 0x88, 0xa6, 0xfc, 0x15, 0x4a, 0x97, 0x74, 0xce, + 0xa5, 0xd1, 0xf5, 0x8b, 0x80, 0x46, 0xfb, 0xca, 0xca, 0xf8, 0x6a, 0x87, 0x1c, 0xdf, 0xcf, 0x34, + 0x28, 0x27, 0xdb, 0xc5, 0x8f, 0x64, 0xa2, 0x0f, 0xa0, 0x18, 0xb5, 0x7e, 0xef, 0xdb, 0x81, 0xe7, + 0x20, 0xe7, 0x61, 0xc3, 0x77, 0x1d, 0xbe, 0x33, 0x69, 0x88, 0x61, 0x88, 0x1c, 0x62, 0x18, 0xa2, + 0x5f, 0x83, 0x29, 0x36, 0x82, 0xef, 0x58, 0x76, 0x80, 0x3d, 0x74, 0x1e, 0x72, 0x7e, 0x60, 0x04, + 0xd8, 0xaf, 0x68, 0xcb, 0xd9, 0x33, 0x33, 0x67, 0x17, 0x46, 0xbb, 0xbc, 0x84, 0xcc, 0xb4, 0x32, + 0x4e, 0x59, 0x2b, 0x43, 0xf4, 0xff, 0xd5, 0x60, 0x4a, 0x6e, 0x66, 0x3f, 0x18, 0xb5, 0x77, 0xf9, + 0x6a, 0x9f, 0x08, 0x1f, 0xec, 0x07, 0x33, 0xb3, 0x77, 0x67, 0xfd, 0x17, 0x1a, 0x1b, 0xd9, 0xa8, + 0x0b, 0x7a, 0xbf, 0xe6, 0xbb, 0x71, 0x2b, 0x84, 0xec, 0x30, 0x9f, 0x06, 0xb6, 0xc3, 0xb6, 0x42, + 0x68, 0xf8, 0x53, 0xc4, 0xe5, 0xf0, 0xa7, 0x10, 0xf4, 0x2f, 0x26, 0xa9, 0xe7, 0x71, 0xc7, 0xfb, + 0x51, 0x37, 0x81, 0x12, 0xd9, 0x49, 0xf6, 0x2e, 0xb2, 0x93, 0xe7, 0x21, 0x4f, 0x3f, 0x07, 0x51, + 0xe2, 0x40, 0x27, 0x8d, 0x40, 0xea, 0x89, 0x23, 0x43, 0x6e, 0x13, 0xb5, 0x26, 0xef, 0x2f, 0x6a, + 0xa1, 0x16, 0x1c, 0xdf, 0x31, 0xfc, 0x96, 0x88, 0xb3, 0x66, 0xcb, 0x08, 0x5a, 0x51, 0x9c, 0xc8, + 0xd1, 0x32, 0xe5, 0xa9, 0xe1, 0xa0, 0xba, 0xbc, 0x63, 0xf8, 0x9b, 0x82, 0x67, 0x25, 0xd8, 0x18, + 0x8d, 0x1a, 0x0b, 0xe9, 0x1c, 0x68, 0x0b, 0xe6, 0xd3, 0x95, 0xe7, 0xa9, 0xe7, 0xb4, 0xc9, 0xeb, + 0xdf, 0x56, 0xf3, 0xb1, 0x14, 0x32, 0xfa, 0x54, 0x83, 0x0a, 0xf9, 0x3e, 0x7b, 0xf8, 0xe3, 0xd0, + 0xf2, 0x70, 0x8f, 0xcc, 0x58, 0xcb, 0xdd, 0xc3, 0x9e, 0x6d, 0x1c, 0xf0, 0xd3, 0x9a, 0x53, 0xa3, + 0x5f, 0x8f, 0x0d, 0xd7, 0x6c, 0x4a, 0x02, 0xec, 0xd5, 0xfa, 0x2a, 0x78, 0x95, 0x29, 0x91, 0x5f, + 0x2d, 0x9d, 0x63, 0x6d, 0xa2, 0x50, 0x28, 0x17, 0xf5, 0xbf, 0x6a, 0x30, 0xa3, 0x1e, 0xaa, 0x3c, + 0xf2, 0x85, 0x39, 0xb2, 0x25, 0xb3, 0x0f, 0x69, 0x4b, 0xfe, 0x45, 0x83, 0x69, 0xe5, 0xac, 0xe7, + 0xf1, 0x79, 0xf5, 0x1f, 0x67, 0x60, 0x21, 0x5d, 0xcd, 0x43, 0x29, 0x40, 0x2f, 0x02, 0x49, 0x25, + 0x2f, 0xc5, 0xb9, 0xd1, 0xfc, 0x48, 0xfd, 0x49, 0x5f, 0x41, 0xe4, 0xa1, 0x23, 0x87, 0x34, 0x42, + 0x1c, 0x5d, 0x87, 0x92, 0x25, 0x1d, 0x07, 0x65, 0xd3, 0xba, 0xf6, 0xf2, 0x21, 0x10, 0xeb, 0x52, + 0x8c, 0x39, 0xfa, 0x91, 0x55, 0x35, 0x72, 0x30, 0x41, 0x92, 0x37, 0x7d, 0x0f, 0xf2, 0xdc, 0x1d, + 0xf4, 0x12, 0x14, 0x69, 0x9c, 0xa3, 0x35, 0x15, 0x4b, 0xdc, 0x69, 0xda, 0x41, 0xc0, 0xc4, 0x85, + 0x8c, 0x82, 0xc0, 0xd0, 0xab, 0x00, 0x64, 0x6b, 0xf3, 0x08, 0x97, 0xa1, 0x71, 0x82, 0xd6, 0x6e, + 0x7d, 0xd7, 0x1c, 0x09, 0x6b, 0xc5, 0x08, 0xd4, 0x7f, 0x9e, 0x81, 0x92, 0x7c, 0x00, 0x75, 0x4f, + 0xc6, 0x3f, 0x01, 0x51, 0x57, 0xb7, 0x0c, 0xd3, 0x24, 0x7f, 0xb1, 0xf8, 0xa4, 0xd5, 0xc7, 0x0e, + 0x92, 0xf8, 0x7f, 0x45, 0x48, 0xb0, 0x2a, 0x8a, 0x1e, 0xf1, 0x5b, 0x09, 0x92, 0x64, 0xb5, 0x9c, + 0xa4, 0x2d, 0xee, 0xc2, 0x7c, 0xaa, 0x2a, 0xb9, 0xf6, 0x99, 0x7c, 0x50, 0xb5, 0xcf, 0x2f, 0x27, + 0x61, 0x3e, 0xf5, 0xe0, 0xef, 0x91, 0xef, 0x62, 0x75, 0x07, 0x65, 0x1f, 0xc8, 0x0e, 0xfa, 0x4c, + 0x4b, 0x9b, 0x59, 0x76, 0x88, 0xf2, 0xfa, 0x21, 0x4e, 0x43, 0x1f, 0xd4, 0x1c, 0xab, 0xcb, 0x72, + 0xf2, 0x9e, 0xf6, 0x44, 0xee, 0xb0, 0x7b, 0x02, 0xbd, 0xc0, 0xca, 0x58, 0x6a, 0x2b, 0x4f, 0x6d, + 0x89, 0x08, 0x91, 0x30, 0x95, 0xe7, 0x10, 0x7a, 0x1b, 0xa6, 0x85, 0x04, 0x6b, 0x9e, 0x14, 0xe2, + 0xce, 0x06, 0xe7, 0x49, 0xf6, 0x4f, 0xa6, 0x64, 0xfc, 0x9f, 0xbb, 0x86, 0xff, 0xa6, 0xc1, 0x6c, + 0xe2, 0x26, 0xc0, 0xe3, 0xf3, 0x0d, 0xfa, 0x5c, 0x83, 0x62, 0x74, 0x09, 0xe5, 0xbe, 0x13, 0xf9, + 0x15, 0xc8, 0x61, 0x76, 0x11, 0x82, 0x85, 0xbb, 0x63, 0x89, 0x8b, 0x6a, 0x84, 0xc6, 0xaf, 0xa6, + 0x25, 0xee, 0x3e, 0x34, 0xb9, 0xa0, 0xfe, 0x5b, 0x4d, 0xa4, 0xe8, 0xb1, 0x4f, 0x8f, 0x74, 0x2a, + 0xe2, 0x77, 0xca, 0xde, 0xeb, 0x3b, 0xfd, 0xaa, 0x08, 0x93, 0x94, 0x8f, 0x94, 0xd0, 0x01, 0xf6, + 0x7a, 0x96, 0x63, 0xd8, 0xf4, 0x75, 0x0a, 0x6c, 0xdf, 0x0a, 0x4c, 0xde, 0xb7, 0x02, 0x43, 0x3b, + 0x30, 0x1b, 0xb7, 0xfd, 0xa8, 0x9a, 0xf4, 0xfb, 0x6f, 0xef, 0xaa, 0x4c, 0xac, 0xb1, 0x9f, 0x90, + 0x54, 0x2f, 0x08, 0x24, 0x88, 0xc8, 0x84, 0x99, 0x8e, 0xeb, 0x04, 0x86, 0xe5, 0x60, 0x8f, 0x19, + 0xca, 0xa6, 0xdd, 0xff, 0x39, 0xa7, 0xf0, 0xb0, 0xee, 0x89, 0x2a, 0xa7, 0xde, 0xff, 0x51, 0x69, + 0xe8, 0x23, 0x98, 0x16, 0x65, 0x0c, 0x33, 0x32, 0x91, 0x76, 0xff, 0x67, 0x55, 0x66, 0x61, 0x4b, + 0x5a, 0x91, 0x52, 0xef, 0xff, 0x28, 0x24, 0x64, 0x43, 0xb9, 0xef, 0x9a, 0x5b, 0x0e, 0x4f, 0xde, + 0x8d, 0xb6, 0x8d, 0x79, 0xaf, 0x79, 0x69, 0x24, 0xe5, 0x51, 0xb8, 0x58, 0x28, 0x4e, 0xca, 0xaa, + 0x37, 0xea, 0x92, 0x54, 0xf4, 0x21, 0x4c, 0xd9, 0xa4, 0x9a, 0x5c, 0xdd, 0xef, 0x5b, 0x1e, 0x36, + 0xd3, 0xef, 0xbf, 0x5d, 0x96, 0x38, 0x58, 0x20, 0x94, 0x65, 0xd4, 0x3b, 0x40, 0x32, 0x85, 0xcc, + 0x7e, 0xcf, 0xd8, 0x6f, 0x86, 0x8e, 0xbf, 0xba, 0xcf, 0xef, 0x32, 0xe5, 0xd3, 0x66, 0x7f, 0x5d, + 0x65, 0x62, 0xb3, 0x9f, 0x90, 0x54, 0x67, 0x3f, 0x41, 0x44, 0x97, 0x69, 0x9c, 0x67, 0x53, 0xc2, + 0xee, 0xc1, 0x2d, 0x8c, 0x8c, 0x16, 0x9b, 0x0d, 0xd6, 0xf6, 0xe1, 0x4f, 0x8a, 0xd2, 0x48, 0x03, + 0x9f, 0x03, 0xfa, 0xda, 0x4d, 0x1c, 0x84, 0x9e, 0x83, 0x4d, 0x5e, 0x54, 0x8d, 0xce, 0x81, 0xc2, + 0x15, 0xcd, 0x81, 0x82, 0x8e, 0xcc, 0x81, 0x42, 0x25, 0x6b, 0xaa, 0xef, 0x9a, 0xd7, 0xd8, 0x96, + 0x09, 0xa2, 0x8b, 0x71, 0x4f, 0x8e, 0x98, 0x8a, 0x59, 0xd8, 0x9a, 0x52, 0xa4, 0xd4, 0x35, 0xa5, + 0x90, 0xf8, 0x5d, 0x2c, 0xf9, 0xe6, 0x0e, 0x1b, 0xa9, 0xd2, 0x98, 0xbb, 0x58, 0x23, 0x9c, 0xd1, + 0x5d, 0xac, 0x11, 0xca, 0xc8, 0x5d, 0xac, 0x11, 0x0e, 0x62, 0xbd, 0x6b, 0x38, 0xdd, 0x35, 0xb7, + 0xad, 0xae, 0xea, 0xa9, 0x34, 0xeb, 0x17, 0x52, 0x38, 0x99, 0xf5, 0x34, 0x1d, 0xaa, 0xf5, 0x34, + 0x8e, 0x46, 0x41, 0xb4, 0x87, 0xf4, 0x2f, 0x35, 0x98, 0x4d, 0xc4, 0x19, 0xf4, 0x16, 0x44, 0x37, + 0x4e, 0xae, 0x1d, 0xf4, 0x45, 0x9a, 0xac, 0xdc, 0x50, 0x21, 0x78, 0xda, 0x0d, 0x15, 0x82, 0xa3, + 0xcb, 0x00, 0xd1, 0x37, 0xe9, 0x76, 0x41, 0x9a, 0xe6, 0x68, 0x31, 0xa7, 0x9c, 0xa3, 0xc5, 0xa8, + 0xfe, 0x4d, 0x16, 0x0a, 0x62, 0xa1, 0x3e, 0x94, 0x32, 0xaa, 0x0e, 0xf9, 0x1e, 0xf6, 0xe9, 0x4d, + 0x95, 0x4c, 0x9c, 0x0d, 0x71, 0x48, 0xce, 0x86, 0x38, 0xa4, 0x26, 0x6b, 0xd9, 0x7b, 0x4a, 0xd6, + 0x26, 0x0e, 0x9d, 0xac, 0x61, 0x7a, 0x4a, 0x2d, 0x85, 0x5b, 0x71, 0x2e, 0x74, 0xfb, 0x18, 0x2e, + 0xce, 0xb0, 0x65, 0xc1, 0xc4, 0x19, 0xb6, 0x4c, 0x42, 0xbb, 0x70, 0x54, 0x3a, 0xbb, 0xe2, 0xbd, + 0x43, 0x12, 0xf8, 0x66, 0xc6, 0x5f, 0x09, 0x68, 0x52, 0x2e, 0xb6, 0xbd, 0x77, 0x13, 0xa8, 0x9c, + 0xed, 0x26, 0x69, 0xfa, 0x1f, 0x33, 0x30, 0xa3, 0xfa, 0xfb, 0x50, 0x26, 0xf6, 0x25, 0x28, 0xe2, + 0x7d, 0x2b, 0x68, 0x75, 0x5c, 0x13, 0xf3, 0x92, 0x91, 0xce, 0x13, 0x01, 0xcf, 0xb9, 0xa6, 0x32, + 0x4f, 0x02, 0x93, 0x57, 0x43, 0xf6, 0x50, 0xab, 0x21, 0x6e, 0xb5, 0x4e, 0xdc, 0xb9, 0xd5, 0x9a, + 0x3e, 0xce, 0xc5, 0x87, 0x34, 0xce, 0xb7, 0x32, 0x50, 0x4e, 0x46, 0xe3, 0xef, 0xc7, 0x16, 0x52, + 0x77, 0x43, 0xf6, 0xd0, 0xbb, 0xe1, 0x6d, 0x98, 0x26, 0xb9, 0xa3, 0x11, 0x04, 0xfc, 0x0e, 0xe7, + 0x04, 0xcd, 0xb9, 0x58, 0x6c, 0x0a, 0x9d, 0x15, 0x81, 0x2b, 0xb1, 0x49, 0xc2, 0xf5, 0x4f, 0x33, + 0x30, 0xad, 0x7c, 0x35, 0x1e, 0xbf, 0x90, 0xa2, 0xcf, 0xc2, 0xb4, 0x92, 0x8c, 0xe9, 0xff, 0xc7, + 0xd6, 0x89, 0x9a, 0x05, 0x3d, 0x7e, 0xe3, 0x32, 0x03, 0x53, 0x72, 0x56, 0xa7, 0x37, 0x60, 0x36, + 0x91, 0x84, 0xc9, 0x2f, 0xa0, 0x1d, 0xe6, 0x05, 0xf4, 0x05, 0x98, 0x4b, 0xcb, 0x1d, 0xf4, 0x0b, + 0x30, 0x97, 0xf6, 0x55, 0xbf, 0x7b, 0x03, 0x5f, 0x69, 0xd4, 0xc2, 0xe8, 0x6d, 0xef, 0x8b, 0x00, + 0x0e, 0xbe, 0xd9, 0xba, 0x63, 0xf9, 0xc7, 0xc6, 0x13, 0xdf, 0x5c, 0x4b, 0x54, 0x4b, 0x05, 0x81, + 0x11, 0x4d, 0xae, 0x6d, 0xb6, 0xee, 0x58, 0x74, 0x51, 0x4d, 0xae, 0x6d, 0x8e, 0x68, 0x12, 0x98, + 0xfe, 0x83, 0xac, 0xa8, 0xcc, 0xe3, 0xeb, 0xd2, 0x1f, 0x40, 0xb9, 0x2f, 0x1e, 0xee, 0xec, 0x2d, + 0xad, 0x4d, 0x22, 0xfe, 0xa4, 0xa5, 0x19, 0x95, 0xa2, 0xea, 0xe6, 0x45, 0x67, 0xe6, 0x90, 0xba, + 0x9b, 0x89, 0xea, 0x73, 0x46, 0xa5, 0xa0, 0xff, 0x86, 0xa3, 0xe2, 0x36, 0xd9, 0x1e, 0x16, 0x8e, + 0x67, 0xc7, 0x2a, 0x67, 0xb7, 0xbb, 0x23, 0x81, 0xa4, 0xe7, 0xb3, 0x09, 0x52, 0x42, 0x3d, 0xf7, + 0x7d, 0xe2, 0xb0, 0xea, 0x93, 0xce, 0xcf, 0x26, 0x48, 0xfa, 0xe7, 0x1a, 0xcc, 0x26, 0x2e, 0xa0, + 0xa3, 0xf3, 0x50, 0xa0, 0xbf, 0x4f, 0xbb, 0xfd, 0x0c, 0xd0, 0x05, 0x49, 0xf9, 0x14, 0x0b, 0x79, + 0x0e, 0xa1, 0x57, 0xa0, 0x18, 0xdd, 0x53, 0xe7, 0xa7, 0xca, 0x6c, 0xf3, 0x09, 0x50, 0xd9, 0x7c, + 0x02, 0xd4, 0x7f, 0xa2, 0xc1, 0xf1, 0xb1, 0x97, 0xd3, 0x1f, 0x75, 0xcf, 0xe0, 0xd9, 0x17, 0xa0, + 0x20, 0xce, 0x7d, 0x11, 0x40, 0xee, 0xbd, 0xad, 0xd5, 0xad, 0xd5, 0xf3, 0xe5, 0x23, 0xa8, 0x04, + 0xf9, 0x8d, 0xd5, 0x2b, 0xe7, 0x2f, 0x5d, 0xb9, 0x50, 0xd6, 0xc8, 0x43, 0x73, 0xeb, 0xca, 0x15, + 0xf2, 0x90, 0x79, 0xf6, 0xb2, 0x7c, 0x0b, 0x8d, 0x7d, 0x8f, 0xd1, 0x14, 0x14, 0x56, 0xfa, 0x7d, + 0x1a, 0x00, 0x98, 0xec, 0xea, 0x9e, 0x45, 0xf6, 0x6a, 0x59, 0x43, 0x79, 0xc8, 0x5e, 0xbd, 0xba, + 0x5e, 0xce, 0xa0, 0x39, 0x28, 0x9f, 0xc7, 0x86, 0x69, 0x5b, 0x0e, 0x16, 0x51, 0xa7, 0x9c, 0x6d, + 0xdc, 0xf8, 0xfa, 0xdb, 0x25, 0xed, 0x9b, 0x6f, 0x97, 0xb4, 0x3f, 0x7c, 0xbb, 0xa4, 0xdd, 0xfa, + 0x6e, 0xe9, 0xc8, 0x37, 0xdf, 0x2d, 0x1d, 0xf9, 0xdd, 0x77, 0x4b, 0x47, 0x3e, 0x78, 0x41, 0xfa, + 0x2d, 0x26, 0x7b, 0xa7, 0xbe, 0xe7, 0x92, 0x80, 0xcb, 0x9f, 0xea, 0xc9, 0x5f, 0x9f, 0x7e, 0x95, + 0x39, 0xb9, 0x42, 0x1f, 0x37, 0x18, 0x5f, 0xed, 0x92, 0x5b, 0x63, 0x00, 0xfd, 0x01, 0xa1, 0xdf, + 0xce, 0xd1, 0x1f, 0x0a, 0xbe, 0xf4, 0x8f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x71, 0x86, 0xa5, + 0xb8, 0x3a, 0x00, 0x00, } func (m *EventSequence) Marshal() (dAtA []byte, err error) { diff --git a/pkg/armadaevents/events.proto b/pkg/armadaevents/events.proto index f78e8bb5839..d68759a6d75 100644 --- a/pkg/armadaevents/events.proto +++ b/pkg/armadaevents/events.proto @@ -6,10 +6,10 @@ option csharp_namespace = "ArmadaProject.Io.ArmadaEvents"; import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/api/networking/v1/generated.proto"; import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "internal/scheduler/schedulerobjects/schedulerobjects.proto"; + // Armada state transition model // // The central concepts are: diff --git a/pkg/client/queue/permission_subject_kind.go b/pkg/client/queue/permission_subject_kind.go index 02f2f7a5386..4c674b60104 100644 --- a/pkg/client/queue/permission_subject_kind.go +++ b/pkg/client/queue/permission_subject_kind.go @@ -26,6 +26,8 @@ func NewPermissionSubjectKind(in string) (PermissionSubjectKind, error) { } // UnmarshalJSON is implementation of https://pkg.go.dev/encoding/json#Unmarshaler interface. +// +// TODO: Unused. func (kind *PermissionSubjectKind) UnmarshalJSON(data []byte) error { subjectKind := "" if err := json.Unmarshal(data, &subjectKind); err != nil { @@ -42,7 +44,9 @@ func (kind *PermissionSubjectKind) UnmarshalJSON(data []byte) error { } // Generate is implementation of https://pkg.go.dev/testing/quick#Generator interface. -// This method is used for writing tests usign https://pkg.go.dev/testing/quick package +// This method is used for writing tests usign https://pkg.go.dev/testing/quick package. +// +// TODO: Unused. func (kind PermissionSubjectKind) Generate(rand *rand.Rand, size int) reflect.Value { values := []PermissionSubjectKind{ PermissionSubjectKindUser, diff --git a/pkg/client/queue/queue.go b/pkg/client/queue/queue.go index 54450e803dd..4e3689029a1 100644 --- a/pkg/client/queue/queue.go +++ b/pkg/client/queue/queue.go @@ -7,13 +7,13 @@ import ( ) type Queue struct { - Name string `json:"name"` - Permissions []Permissions `json:"permissions"` - PriorityFactor PriorityFactor `json:"priorityFactor"` - ResourceLimits ResourceLimits `json:"resourceLimits"` + Name string `json:"name"` + Permissions []Permissions `json:"permissions"` + PriorityFactor PriorityFactor `json:"priorityFactor"` + ResourceLimitsByPriorityClassName map[string]api.PriorityClassResourceLimits } -// NewQueue returnes new Queue using the in parameter. Error is returned if +// NewQueue returns new Queue using the in parameter. Error is returned if // any of the queue fields has corresponding value in in that is invalid. func NewQueue(in *api.Queue) (Queue, error) { if in == nil { @@ -25,11 +25,6 @@ func NewQueue(in *api.Queue) (Queue, error) { return Queue{}, fmt.Errorf("failed to map priority factor. %s", err) } - resourceLimits, err := NewResourceLimits(in.ResourceLimits) - if err != nil { - return Queue{}, fmt.Errorf("failed to map resource limits: %v. %s", in.ResourceLimits, err) - } - permissions := []Permissions{} if len(in.GroupOwners) != 0 || len(in.UserOwners) != 0 { permissions = append(permissions, NewPermissionsFromOwners(in.UserOwners, in.GroupOwners)) @@ -44,32 +39,24 @@ func NewQueue(in *api.Queue) (Queue, error) { } return Queue{ - Name: in.Name, - // Kind: "Queue", - PriorityFactor: priorityFactor, - ResourceLimits: resourceLimits, - Permissions: permissions, + Name: in.Name, + PriorityFactor: priorityFactor, + Permissions: permissions, + ResourceLimitsByPriorityClassName: in.ResourceLimitsByPriorityClassName, }, nil } // ToAPI transforms Queue to *api.Queue structure func (q Queue) ToAPI() *api.Queue { - result := &api.Queue{ - Name: q.Name, - // Kind: q.Kind, - PriorityFactor: float64(q.PriorityFactor), - ResourceLimits: map[string]float64{}, - } - - for resourceName, resourceLimit := range q.ResourceLimits { - result.ResourceLimits[string(resourceName)] = float64(resourceLimit) + rv := &api.Queue{ + Name: q.Name, + PriorityFactor: float64(q.PriorityFactor), + ResourceLimitsByPriorityClassName: q.ResourceLimitsByPriorityClassName, } - for _, permission := range q.Permissions { - result.Permissions = append(result.Permissions, permission.ToAPI()) + rv.Permissions = append(rv.Permissions, permission.ToAPI()) } - - return result + return rv } // HasPermission returns true if the inputSubject is allowed to peform a queue operation diff --git a/pkg/client/resource.go b/pkg/client/resource.go index 5449dca9554..6d08f7a5900 100644 --- a/pkg/client/resource.go +++ b/pkg/client/resource.go @@ -6,8 +6,8 @@ import ( ) type Resource struct { - Version APIVersion `json:"apiVersion"` - Kind ResourceKind `json:"kind"` + Version APIVersion `json:"apiVersion" yaml:"apiVersion"` + Kind ResourceKind `json:"kind" yaml:"kind"` } type ResourceKind string