Skip to content

Commit

Permalink
Merge pull request kubernetes#14643 from liguangbo/change_Oom_to_OOM
Browse files Browse the repository at this point in the history
Auto commit by PR queue bot
  • Loading branch information
k8s-merge-robot committed Sep 30, 2015
2 parents bec96e6 + 1229908 commit b661cfd
Show file tree
Hide file tree
Showing 16 changed files with 97 additions and 97 deletions.
14 changes: 7 additions & 7 deletions cmd/kube-proxy/app/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ type ProxyServer struct {
EndpointsConfig *proxyconfig.EndpointsConfig
EndpointsHandler proxyconfig.EndpointsConfigHandler
IptInterface utiliptables.Interface
OomAdjuster *oom.OomAdjuster
OOMAdjuster *oom.OOMAdjuster
Proxier proxy.ProxyProvider
Recorder record.EventRecorder
ServiceConfig *proxyconfig.ServiceConfig
Expand Down Expand Up @@ -114,7 +114,7 @@ func NewProxyConfig() *ProxyServerConfig {
BindAddress: net.ParseIP("0.0.0.0"),
HealthzPort: 10249,
HealthzBindAddress: net.ParseIP("127.0.0.1"),
OOMScoreAdj: qos.KubeProxyOomScoreAdj,
OOMScoreAdj: qos.KubeProxyOOMScoreAdj,
ResourceContainer: "/kube-proxy",
SyncPeriod: 30 * time.Second,
}
Expand All @@ -126,7 +126,7 @@ func NewProxyServer(
endpointsConfig *proxyconfig.EndpointsConfig,
endpointsHandler proxyconfig.EndpointsConfigHandler,
iptInterface utiliptables.Interface,
oomAdjuster *oom.OomAdjuster,
oomAdjuster *oom.OOMAdjuster,
proxier proxy.ProxyProvider,
recorder record.EventRecorder,
serviceConfig *proxyconfig.ServiceConfig,
Expand All @@ -137,7 +137,7 @@ func NewProxyServer(
EndpointsConfig: endpointsConfig,
EndpointsHandler: endpointsHandler,
IptInterface: iptInterface,
OomAdjuster: oomAdjuster,
OOMAdjuster: oomAdjuster,
Proxier: proxier,
Recorder: recorder,
ServiceConfig: serviceConfig,
Expand All @@ -162,10 +162,10 @@ func NewProxyServerDefault(config *ProxyServerConfig) (*ProxyServer, error) {
}

// TODO(vmarmol): Use container config for this.
var oomAdjuster *oom.OomAdjuster
var oomAdjuster *oom.OOMAdjuster
if config.OOMScoreAdj != 0 {
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(0, config.OOMScoreAdj); err != nil {
oomAdjuster := oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, config.OOMScoreAdj); err != nil {
glog.V(2).Info(err)
}
}
Expand Down
6 changes: 3 additions & 3 deletions cmd/kubelet/app/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ func NewKubeletServer() *KubeletServer {
NetworkPluginDir: "/usr/libexec/kubernetes/kubelet-plugins/net/exec/",
NetworkPluginName: "",
NodeStatusUpdateFrequency: 10 * time.Second,
OOMScoreAdj: qos.KubeletOomScoreAdj,
OOMScoreAdj: qos.KubeletOOMScoreAdj,
PodInfraContainerImage: dockertools.PodInfraContainerImage,
Port: ports.KubeletPort,
ReadOnlyPort: ports.KubeletReadOnlyPort,
Expand Down Expand Up @@ -449,8 +449,8 @@ func (s *KubeletServer) Run(kcfg *KubeletConfig) error {
glog.V(2).Infof("Using root directory: %v", s.RootDirectory)

// TODO(vmarmol): Do this through container config.
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
oomAdjuster := oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.Warning(err)
}

Expand Down
4 changes: 2 additions & 2 deletions contrib/mesos/pkg/executor/service/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ func (s *KubeletExecutorServer) syncExternalShutdownWatcher() (io.Closer, error)
func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error {
rand.Seed(time.Now().UTC().UnixNano())

oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
oomAdjuster := oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil {
log.Info(err)
}

Expand Down
2 changes: 1 addition & 1 deletion docs/proposals/resource-qos.md
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ Container OOM score configuration
- Hack, because these critical tasks might die if they conflict with guaranteed containers. in the future, we should place all user-pods into a separate cgroup, and set a limit on the memory they can consume.

Setting OOM_SCORE_ADJ for a container
- Refactor existing ApplyOomScoreAdj to util/oom.go
- Refactor existing ApplyOOMScoreAdj to util/oom.go
- To set OOM_SCORE_ADJ of a container, we loop through all processes in the container, and set OOM_SCORE_ADJ
- We keep looping until the list of processes in the container stabilizes. This is sufficient because child processes inherit OOM_SCORE_ADJ.

Expand Down
6 changes: 3 additions & 3 deletions hack/verify-flags/exceptions.txt
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ hack/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]}
hack/local-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}"
hack/local-up-cluster.sh: runtime_config=""
pkg/cloudprovider/providers/vagrant/vagrant_test.go: testSaltMinionsResponse = []byte(`{ "return": [{"kubernetes-minion-1": {"kernel": "Linux", "domain": "", "zmqversion": "3.2.4", "kernelrelease": "3.11.10-301.fc20.x86_64", "pythonpath": ["/usr/bin", "/usr/lib64/python27.zip", "/usr/lib64/python2.7", "/usr/lib64/python2.7/plat-linux2", "/usr/lib64/python2.7/lib-tk", "/usr/lib64/python2.7/lib-old", "/usr/lib64/python2.7/lib-dynload", "/usr/lib64/python2.7/site-packages", "/usr/lib/python2.7/site-packages"], "etcd_servers": "10.245.1.2", "ip_interfaces": {"lo": ["127.0.0.1"], "docker0": ["172.17.42.1"], "enp0s8": ["10.245.2.2"], "p2p1": ["10.0.2.15"]}, "shell": "/bin/sh", "mem_total": 491, "saltversioninfo": [2014, 1, 7], "osmajorrelease": ["20"], "node_ip": "10.245.2.2", "id": "kubernetes-minion-1", "osrelease": "20", "ps": "ps -efH", "server_id": 1005530826, "num_cpus": 1, "hwaddr_interfaces": {"lo": "00:00:00:00:00:00", "docker0": "56:84:7a:fe:97:99", "enp0s8": "08:00:27:17:c5:0f", "p2p1": "08:00:27:96:96:e1"}, "virtual": "VirtualBox", "osfullname": "Fedora", "master": "kubernetes-master", "ipv4": ["10.0.2.15", "10.245.2.2", "127.0.0.1", "172.17.42.1"], "ipv6": ["::1", "fe80::a00:27ff:fe17:c50f", "fe80::a00:27ff:fe96:96e1"], "cpu_flags": ["fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "mmx", "fxsr", "sse", "sse2", "syscall", "nx", "rdtscp", "lm", "constant_tsc", "rep_good", "nopl", "pni", "monitor", "ssse3", "lahf_lm"], "localhost": "kubernetes-minion-1", "lsb_distrib_id": "Fedora", "fqdn_ip4": ["127.0.0.1"], "fqdn_ip6": [], "nodename": "kubernetes-minion-1", "saltversion": "2014.1.7", "saltpath": "/usr/lib/python2.7/site-packages/salt", "pythonversion": [2, 7, 5, "final", 0], "host": "kubernetes-minion-1", "os_family": "RedHat", "oscodename": "Heisenbug", "defaultencoding": "UTF-8", "osfinger": "Fedora-20", "roles": ["kubernetes-pool"], "num_gpus": 1, "cpu_model": "Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz", "fqdn": "kubernetes-minion-1", "osarch": "x86_64", "cpuarch": "x86_64", "gpus": [{"model": "VirtualBox Graphics Adapter", "vendor": "unknown"}], "path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", "os": "Fedora", "defaultlanguage": "en_US"}}]}`)
pkg/kubelet/qos/memory_policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOomScoreAdj, test.highOomScoreAdj, oomScoreAdj)
pkg/kubelet/qos/memory_policy_test.go: highOomScoreAdj int // The min oom_score_adj score the container should be assigned.
pkg/kubelet/qos/memory_policy_test.go: lowOomScoreAdj int // The max oom_score_adj score the container should be assigned.
pkg/kubelet/qos/memory_policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOOMScoreAdj, test.highOOMScoreAdj, oomScoreAdj)
pkg/kubelet/qos/memory_policy_test.go: highOOMScoreAdj int // The min oom_score_adj score the container should be assigned.
pkg/kubelet/qos/memory_policy_test.go: lowOOMScoreAdj int // The max oom_score_adj score the container should be assigned.
pkg/util/oom/oom_linux.go: err = fmt.Errorf("failed to set oom_score_adj to %d: %v", oomScoreAdj, writeErr)
pkg/util/oom/oom_linux.go: return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid)
pkg/util/oom/oom_linux.go: oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj")
Expand Down
4 changes: 2 additions & 2 deletions pkg/kubelet/container_manager_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -283,8 +283,8 @@ func ensureDockerInContainer(cadvisor cadvisor.Interface, oomScoreAdj int, manag
}

// Also apply oom-score-adj to processes
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(pid, oomScoreAdj); err != nil {
oomAdjuster := oom.NewOOMAdjuster()
if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid))
}
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/kubelet/dockertools/fake_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,11 @@ func NewFakeDockerManager(
generator kubecontainer.RunContainerOptionsGenerator,
httpClient kubeletTypes.HttpGetter) *DockerManager {

fakeOomAdjuster := oom.NewFakeOomAdjuster()
fakeOOMAdjuster := oom.NewFakeOOMAdjuster()
fakeProcFs := procfs.NewFakeProcFs()
dm := NewDockerManager(client, recorder, readinessManager, containerRefManager, machineInfo, podInfraContainerImage, qps,
burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, &NativeExecHandler{},
fakeOomAdjuster, fakeProcFs, false)
fakeOOMAdjuster, fakeProcFs, false)
dm.dockerPuller = &FakeDockerPuller{}
dm.prober = prober.New(nil, readinessManager, containerRefManager, recorder)
return dm
Expand Down
10 changes: 5 additions & 5 deletions pkg/kubelet/dockertools/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ type DockerManager struct {
execHandler ExecHandler

// Used to set OOM scores of processes.
oomAdjuster *oom.OomAdjuster
oomAdjuster *oom.OOMAdjuster

// Get information from /proc mount.
procFs procfs.ProcFsInterface
Expand All @@ -155,7 +155,7 @@ func NewDockerManager(
generator kubecontainer.RunContainerOptionsGenerator,
httpClient kubeletTypes.HttpGetter,
execHandler ExecHandler,
oomAdjuster *oom.OomAdjuster,
oomAdjuster *oom.OOMAdjuster,
procFs procfs.ProcFsInterface,
cpuCFSQuota bool) *DockerManager {
// Work out the location of the Docker runtime, defaulting to /var/lib/docker
Expand Down Expand Up @@ -1470,15 +1470,15 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
// whole pod will die.
var oomScoreAdj int
if container.Name == PodInfraContainerName {
oomScoreAdj = qos.PodInfraOomAdj
oomScoreAdj = qos.PodInfraOOMAdj
} else {
oomScoreAdj = qos.GetContainerOomScoreAdjust(container, dm.machineInfo.MemoryCapacity)
oomScoreAdj = qos.GetContainerOOMScoreAdjust(container, dm.machineInfo.MemoryCapacity)
}
cgroupName, err := dm.procFs.GetFullContainerName(containerInfo.State.Pid)
if err != nil {
return "", err
}
if err = dm.oomAdjuster.ApplyOomScoreAdjContainer(cgroupName, oomScoreAdj, 5); err != nil {
if err = dm.oomAdjuster.ApplyOOMScoreAdjContainer(cgroupName, oomScoreAdj, 5); err != nil {
return "", err
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/kubelet/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ func NewMainKubelet(
return nil, err
}

oomAdjuster := oom.NewOomAdjuster()
oomAdjuster := oom.NewOOMAdjuster()
procFs := procfs.NewProcFs()

// Initialize the runtime.
Expand Down
10 changes: 5 additions & 5 deletions pkg/kubelet/qos/memory_policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ import (
)

const (
PodInfraOomAdj int = -999
KubeletOomScoreAdj int = -999
KubeProxyOomScoreAdj int = -999
PodInfraOOMAdj int = -999
KubeletOOMScoreAdj int = -999
KubeProxyOOMScoreAdj int = -999
)

// isMemoryBestEffort returns true if the container's memory requirements are best-effort.
Expand All @@ -42,12 +42,12 @@ func isMemoryGuaranteed(container *api.Container) bool {
return (*memoryRequest).Cmp(*memoryLimit) == 0 && memoryRequest.Value() != 0
}

// GetContainerOomAdjust returns the amount by which the OOM score of all processes in the
// GetContainerOOMAdjust returns the amount by which the OOM score of all processes in the
// container should be adjusted. The OOM score of a process is the percentage of memory it consumes
// multiplied by 100 (barring exceptional cases) + a configurable quantity which is between -1000
// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
// See https://lwn.net/Articles/391222/ for more information.
func GetContainerOomScoreAdjust(container *api.Container, memoryCapacity int64) int {
func GetContainerOOMScoreAdjust(container *api.Container, memoryCapacity int64) int {
if isMemoryGuaranteed(container) {
// Memory guaranteed containers should be the last to get killed.
return -999
Expand Down
40 changes: 20 additions & 20 deletions pkg/kubelet/qos/memory_policy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,60 +128,60 @@ func TestIsMemoryGuaranteed(t *testing.T) {
type oomTest struct {
container *api.Container
memoryCapacity int64
lowOomScoreAdj int // The max oom_score_adj score the container should be assigned.
highOomScoreAdj int // The min oom_score_adj score the container should be assigned.
lowOOMScoreAdj int // The max oom_score_adj score the container should be assigned.
highOOMScoreAdj int // The min oom_score_adj score the container should be assigned.
}

func TestGetContainerOomScoreAdjust(t *testing.T) {
func TestGetContainerOOMScoreAdjust(t *testing.T) {

oomTests := []oomTest{
{
container: &zeroRequestMemoryBestEffort,
memoryCapacity: 4000000000,
lowOomScoreAdj: 1000,
highOomScoreAdj: 1000,
lowOOMScoreAdj: 1000,
highOOMScoreAdj: 1000,
},
{
container: &edgeMemoryBestEffort,
memoryCapacity: 8000000000,
lowOomScoreAdj: 1000,
highOomScoreAdj: 1000,
lowOOMScoreAdj: 1000,
highOOMScoreAdj: 1000,
},
{
container: &noRequestMemoryBestEffort,
memoryCapacity: 7230457451,
lowOomScoreAdj: 1000,
highOomScoreAdj: 1000,
lowOOMScoreAdj: 1000,
highOOMScoreAdj: 1000,
},
{
container: &noLimitMemoryBestEffort,
memoryCapacity: 4000000000,
lowOomScoreAdj: 1000,
highOomScoreAdj: 1000,
lowOOMScoreAdj: 1000,
highOOMScoreAdj: 1000,
},
{
container: &memoryGuaranteed,
memoryCapacity: 123456789,
lowOomScoreAdj: -999,
highOomScoreAdj: -999,
lowOOMScoreAdj: -999,
highOOMScoreAdj: -999,
},
{
container: &memoryBurstable,
memoryCapacity: standardMemoryAmount,
lowOomScoreAdj: 495,
highOomScoreAdj: 505,
lowOOMScoreAdj: 495,
highOOMScoreAdj: 505,
},
{
container: &memoryBurstableNoLimit,
memoryCapacity: standardMemoryAmount,
lowOomScoreAdj: 2,
highOomScoreAdj: 2,
lowOOMScoreAdj: 2,
highOOMScoreAdj: 2,
},
}
for _, test := range oomTests {
oomScoreAdj := GetContainerOomScoreAdjust(test.container, test.memoryCapacity)
if oomScoreAdj < test.lowOomScoreAdj || oomScoreAdj > test.highOomScoreAdj {
t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOomScoreAdj, test.highOomScoreAdj, oomScoreAdj)
oomScoreAdj := GetContainerOOMScoreAdjust(test.container, test.memoryCapacity)
if oomScoreAdj < test.lowOOMScoreAdj || oomScoreAdj > test.highOOMScoreAdj {
t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOOMScoreAdj, test.highOOMScoreAdj, oomScoreAdj)
}
}
}
6 changes: 3 additions & 3 deletions pkg/util/oom/oom.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ package oom
// This is a struct instead of an interface to allow injection of process ID listers and
// applying OOM score in tests.
// TODO: make this an interface, and inject a mock ioutil struct for testing.
type OomAdjuster struct {
type OOMAdjuster struct {
pidLister func(cgroupName string) ([]int, error)
ApplyOomScoreAdj func(pid int, oomScoreAdj int) error
ApplyOomScoreAdjContainer func(cgroupName string, oomScoreAdj, maxTries int) error
ApplyOOMScoreAdj func(pid int, oomScoreAdj int) error
ApplyOOMScoreAdjContainer func(cgroupName string, oomScoreAdj, maxTries int) error
}
14 changes: 7 additions & 7 deletions pkg/util/oom/oom_fake.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,19 @@ limitations under the License.

package oom

type FakeOomAdjuster struct{}
type FakeOOMAdjuster struct{}

func NewFakeOomAdjuster() *OomAdjuster {
return &OomAdjuster{
ApplyOomScoreAdj: fakeApplyOomScoreAdj,
ApplyOomScoreAdjContainer: fakeApplyOomScoreAdjContainer,
func NewFakeOOMAdjuster() *OOMAdjuster {
return &OOMAdjuster{
ApplyOOMScoreAdj: fakeApplyOOMScoreAdj,
ApplyOOMScoreAdjContainer: fakeApplyOOMScoreAdjContainer,
}
}

func fakeApplyOomScoreAdj(pid int, oomScoreAdj int) error {
func fakeApplyOOMScoreAdj(pid int, oomScoreAdj int) error {
return nil
}

func fakeApplyOomScoreAdjContainer(cgroupName string, oomScoreAdj, maxTries int) error {
func fakeApplyOOMScoreAdjContainer(cgroupName string, oomScoreAdj, maxTries int) error {
return nil
}
14 changes: 7 additions & 7 deletions pkg/util/oom/oom_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@ import (
"github.com/golang/glog"
)

func NewOomAdjuster() *OomAdjuster {
oomAdjuster := &OomAdjuster{
func NewOOMAdjuster() *OOMAdjuster {
oomAdjuster := &OOMAdjuster{
pidLister: getPids,
ApplyOomScoreAdj: applyOomScoreAdj,
ApplyOOMScoreAdj: applyOOMScoreAdj,
}
oomAdjuster.ApplyOomScoreAdjContainer = oomAdjuster.applyOomScoreAdjContainer
oomAdjuster.ApplyOOMScoreAdjContainer = oomAdjuster.applyOOMScoreAdjContainer
return oomAdjuster
}

Expand All @@ -48,7 +48,7 @@ func getPids(cgroupName string) ([]int, error) {
}

// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self
func applyOomScoreAdj(pid int, oomScoreAdj int) error {
func applyOOMScoreAdj(pid int, oomScoreAdj int) error {
if pid < 0 {
return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid)
}
Expand Down Expand Up @@ -79,7 +79,7 @@ func applyOomScoreAdj(pid int, oomScoreAdj int) error {

// Writes 'value' to /proc/<pid>/oom_score_adj for all processes in cgroup cgroupName.
// Keeps trying to write until the process list of the cgroup stabilizes, or until maxTries tries.
func (oomAdjuster *OomAdjuster) applyOomScoreAdjContainer(cgroupName string, oomScoreAdj, maxTries int) error {
func (oomAdjuster *OOMAdjuster) applyOOMScoreAdjContainer(cgroupName string, oomScoreAdj, maxTries int) error {
adjustedProcessSet := make(map[int]bool)
for i := 0; i < maxTries; i++ {
continueAdjusting := false
Expand All @@ -93,7 +93,7 @@ func (oomAdjuster *OomAdjuster) applyOomScoreAdjContainer(cgroupName string, oom
for _, pid := range pidList {
if !adjustedProcessSet[pid] {
continueAdjusting = true
if err = oomAdjuster.ApplyOomScoreAdj(pid, oomScoreAdj); err == nil {
if err = oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err == nil {
adjustedProcessSet[pid] = true
}
}
Expand Down
Loading

0 comments on commit b661cfd

Please sign in to comment.