Skip to content

Commit

Permalink
Merge pull request #125 from sp-yduck/feature/plugin-config
Browse files Browse the repository at this point in the history
Feature/plugin config
  • Loading branch information
sp-yduck authored Nov 9, 2023
2 parents 3d65695 + 3693d03 commit ef8f925
Show file tree
Hide file tree
Showing 9 changed files with 297 additions and 43 deletions.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ build-e2e-image: ## Build cappx image to be used for e2e test

USE_EXISTING_CLUSTER := false
.PHONY: e2e
e2e: generate-e2e-templates build-e2e-image cleanup-e2e-artifacts ## Run e2e test
e2e: generate-e2e-templates build-e2e-image cleanup-e2e-artifacts $(KUBECTL) ## Run e2e test
go test $(E2E_DIR)/... -v \
-timeout=$(GINKGO_TIMEOUT) \
--e2e.artifacts-folder=$(E2E_DIR) \
Expand Down Expand Up @@ -184,7 +184,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified
.PHONY: deploy
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl diff -f -
$(KUSTOMIZE) build config/default | kubectl apply -f -

.PHONY: undeploy
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
Expand Down
32 changes: 30 additions & 2 deletions cloud/scheduler/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,12 @@ Basic flow of the node selection process is `filter => score => select one node

### Filter Plugins

Filter plugins filter the node based on nodename, overcommit ratio etc.
Filter plugins filter the node based on nodename, overcommit ratio etc. So that we can avoid to run qemus on not desired Proxmox nodes.

- [NodeName plugin](./plugins/nodename/node_name.go) (pass the node matching specified node name)
- [CPUOvercommit plugin](./plugins/overcommit/cpu_overcommit.go) (pass the node that has enough cpu against running vm)
- [MemoryOvercommit plugin](./plugins/overcommit/memory_overcommit.go) (pass the node that has enough memory against running vm)
- [NodeRegex plugin](./plugins/regex/node_regex.go) (pass the node matching specified regex)

#### regex plugin

Expand All @@ -20,11 +25,17 @@ value(example): node[0-9]+

### Score Plugins

Score plugins score the nodes based on resource etc.
Score plugins score the nodes based on resource etc. So that we can run qemus on the most appropriate Proxmox node.

- [NodeResource plugin](./plugins/noderesource/node_resrouce.go) (nodes with more resources have higher scores)
- [Random plugin](./plugins/random/random.go) (diabled by default. just a reference implementation of score plugin)

## How to specify vmid
qemu-scheduler reads context and find key registerd to scheduler. If the context has any value of the registerd key, qemu-scheduler uses the plugin that matchies the key.

- [Range plugin](./plugins/idrange/idrange.go) (select minimum availabe vmid from the specified id range)
- [VMIDRegex plugin](./plugins/regex/vmid_regex.go) (select minimum availabe vmid matching specified regex)

### Range Plugin
You can specify vmid range with `(start id)-(end id)` format.
```sh
Expand Down Expand Up @@ -64,4 +75,21 @@ spec:
metadata:
annotations:
node.qemu-scheduler/regex: node[0-9]+ # this annotation will be propagated to your ProxmoxMachine via MachineSet
```

## How to configure (or disable/enable) specific Plugins

By default, all the plugins are enabled. You can disable specific plugins via plugin-config. for CAPPX, check example ConfigMap [here](../../config/manager/manager.yaml)
```sh
# example plugin-config.yaml

# plugin type name (scores, filters, vmids)
filters:
CPUOvercommit:
enable: false # disable
MemoryOvercommit:
enable: true # enable (can be omitted)
vmids:
Regex:
enable: false # disable
```
98 changes: 91 additions & 7 deletions cloud/scheduler/plugins/registry.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
package plugins

import (
"os"

"gopkg.in/yaml.v3"

"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/framework"
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins/idrange"
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins/nodename"
Expand All @@ -9,25 +13,105 @@ import (
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins/regex"
)

func NewNodeFilterPlugins() []framework.NodeFilterPlugin {
return []framework.NodeFilterPlugin{
type PluginConfigs struct {
FilterPlugins map[string]PluginConfig `yaml:"filters,omitempty"`
ScorePlugins map[string]PluginConfig `yaml:"scores,omitempty"`
VMIDPlugins map[string]PluginConfig `yaml:"vmids,omitempty"`
}

type PluginConfig struct {
Enable bool `yaml:"enable,omitempty"`
Config map[string]interface{} `yaml:"config,omitempty"`
}

type PluginRegistry struct {
filterPlugins []framework.NodeFilterPlugin
scorePlugins []framework.NodeScorePlugin
vmidPlugins []framework.VMIDPlugin
}

func (r *PluginRegistry) FilterPlugins() []framework.NodeFilterPlugin {
return r.filterPlugins
}

func (r *PluginRegistry) ScorePlugins() []framework.NodeScorePlugin {
return r.scorePlugins
}

func (r *PluginRegistry) VMIDPlugins() []framework.VMIDPlugin {
return r.vmidPlugins
}

func NewRegistry(configs PluginConfigs) PluginRegistry {
r := PluginRegistry{
filterPlugins: NewNodeFilterPlugins(configs.FilterPlugins),
scorePlugins: NewNodeScorePlugins(configs.ScorePlugins),
vmidPlugins: NewVMIDPlugins(configs.VMIDPlugins),
}
return r
}

func NewNodeFilterPlugins(config map[string]PluginConfig) []framework.NodeFilterPlugin {
pls := []framework.NodeFilterPlugin{
&nodename.NodeName{},
&overcommit.CPUOvercommit{},
&overcommit.MemoryOvercommit{},
&regex.NodeRegex{},
}
plugins := []framework.NodeFilterPlugin{}
for _, pl := range pls {
c, ok := config[pl.Name()]
if ok && !c.Enable {
continue
}
plugins = append(plugins, pl)
}
return plugins
}

func NewNodeScorePlugins() []framework.NodeScorePlugin {
return []framework.NodeScorePlugin{
// &random.Random{},
func NewNodeScorePlugins(config map[string]PluginConfig) []framework.NodeScorePlugin {
pls := []framework.NodeScorePlugin{
&noderesource.NodeResource{},
}
plugins := []framework.NodeScorePlugin{}
for _, pl := range pls {
c, ok := config[pl.Name()]
if ok && !c.Enable {
continue
}
plugins = append(plugins, pl)
}
return plugins
}

func NewVMIDPlugins() []framework.VMIDPlugin {
return []framework.VMIDPlugin{
func NewVMIDPlugins(config map[string]PluginConfig) []framework.VMIDPlugin {
pls := []framework.VMIDPlugin{
&idrange.Range{},
&regex.Regex{},
}
plugins := []framework.VMIDPlugin{}
for _, pl := range pls {
c, ok := config[pl.Name()]
if ok && !c.Enable {
continue
}
plugins = append(plugins, pl)
}
return plugins
}

// Read config file and unmarshal it to PluginConfig type
func GetPluginConfigFromFile(path string) (PluginConfigs, error) {
var config PluginConfigs
if path == "" {
return config, nil
}
b, err := os.ReadFile(path)
if err != nil {
return config, err
}
if err := yaml.Unmarshal(b, &config); err != nil {
return config, err
}
return config, nil
}
69 changes: 69 additions & 0 deletions cloud/scheduler/plugins/registry_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
package plugins_test

import (
"os"
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scheduler/plugins"
)

func TestPlugins(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Plugins Suite")
}

var _ = Describe("GetPluginConfigFromFile", Label("unit", "scheduler"), func() {
path := "./test-plugin-config.yaml"
BeforeEach(func() {
content := `scores:
Random:
enable: false`
err := stringToFile(content, path)
Expect(err).ToNot(HaveOccurred())
})

AfterEach(func() {
err := rm(path)
Expect(err).NotTo(HaveOccurred())
})

Context("with empty file path", func() {
path := ""
It("should not error", func() {
config, err := plugins.GetPluginConfigFromFile(path)
Expect(err).NotTo(HaveOccurred())
Expect(config).To(Equal(plugins.PluginConfigs{}))
})
})

Context("with non-empty file path", func() {
It("should not error", func() {
config, err := plugins.GetPluginConfigFromFile(path)
Expect(err).NotTo(HaveOccurred())
scores := map[string]plugins.PluginConfig{}
scores["Random"] = plugins.PluginConfig{Enable: false}
Expect(config).To(Equal(plugins.PluginConfigs{ScorePlugins: scores}))
})
})

Context("with wrong file path", func() {
It("shold error", func() {
path := "./wrong-plugin-config.yaml"
config, err := plugins.GetPluginConfigFromFile(path)
Expect(err).To(HaveOccurred())
Expect(config).To(Equal(plugins.PluginConfigs{}))
})
})
})

func stringToFile(str string, path string) error {
b := []byte(str)
return os.WriteFile(path, b, 0666)
}

func rm(path string) error {
return os.Remove(path)
}
32 changes: 21 additions & 11 deletions cloud/scheduler/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,15 @@ type Manager struct {
}

// return manager with initialized scheduler-table
func NewManager(params SchedulerParams) *Manager {
func NewManager(params SchedulerParams) (*Manager, error) {
table := make(map[schedulerID]*Scheduler)
return &Manager{ctx: context.Background(), params: params, table: table}
config, err := plugins.GetPluginConfigFromFile(params.PluginConfigFile)
if err != nil {
return nil, fmt.Errorf("failed to read plugin config: %v", err)
}
params.pluginconfigs = config
params.Logger.Info(fmt.Sprintf("load plugin config: %v", config))
return &Manager{ctx: context.Background(), params: params, table: table}, nil
}

// return new/existing scheduler
Expand Down Expand Up @@ -70,9 +76,7 @@ func (m *Manager) NewScheduler(client *proxmox.Service, opts ...SchedulerOption)
client: client,
schedulingQueue: queue.New(),

filterPlugins: plugins.NewNodeFilterPlugins(),
scorePlugins: plugins.NewNodeScorePlugins(),
vmidPlugins: plugins.NewVMIDPlugins(),
registry: plugins.NewRegistry(m.params.PluginConfigs()),

resultMap: make(map[string]chan *framework.CycleState),
logger: m.params.Logger.WithValues("Name", "qemu-scheduler"),
Expand Down Expand Up @@ -122,9 +126,7 @@ type Scheduler struct {
client *proxmox.Service
schedulingQueue *queue.SchedulingQueue

filterPlugins []framework.NodeFilterPlugin
scorePlugins []framework.NodeScorePlugin
vmidPlugins []framework.VMIDPlugin
registry plugins.PluginRegistry

// to do : cache

Expand All @@ -144,6 +146,14 @@ type Scheduler struct {

type SchedulerParams struct {
Logger logr.Logger

// file path for pluginConfig
PluginConfigFile string
pluginconfigs plugins.PluginConfigs
}

func (p *SchedulerParams) PluginConfigs() plugins.PluginConfigs {
return p.pluginconfigs
}

type schedulerID struct {
Expand Down Expand Up @@ -320,7 +330,7 @@ func (s *Scheduler) RunFilterPlugins(ctx context.Context, state *framework.Cycle
}
for _, nodeInfo := range nodeInfos {
status := framework.NewStatus()
for _, pl := range s.filterPlugins {
for _, pl := range s.registry.FilterPlugins() {
status = pl.Filter(ctx, state, config, nodeInfo)
if !status.IsSuccess() {
status.SetFailedPlugin(pl.Name())
Expand All @@ -344,7 +354,7 @@ func (s *Scheduler) RunScorePlugins(ctx context.Context, state *framework.CycleS
return nil, status
}
for index, nodeInfo := range nodeInfos {
for _, pl := range s.scorePlugins {
for _, pl := range s.registry.ScorePlugins() {
score, status := pl.Score(ctx, state, config, nodeInfo)
if !status.IsSuccess() {
return nil, status
Expand Down Expand Up @@ -379,7 +389,7 @@ func selectHighestScoreNode(scoreList framework.NodeScoreList) (string, error) {
}

func (s *Scheduler) RunVMIDPlugins(ctx context.Context, state *framework.CycleState, config api.VirtualMachineCreateOptions, nextid int, usedID map[int]bool) (int, error) {
for _, pl := range s.vmidPlugins {
for _, pl := range s.registry.VMIDPlugins() {
key := pl.PluginKey()
value := ctx.Value(key)
if value != nil {
Expand Down
Loading

0 comments on commit ef8f925

Please sign in to comment.