diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go
index 7b42e66..df338db 100644
--- a/cmd/kube-proxy/app/server.go
+++ b/cmd/kube-proxy/app/server.go
@@ -502,7 +502,8 @@ func (s *ProxyServer) Run() error {
 
 	// Tune conntrack, if requested
 	// Conntracker is always nil for windows
-	if s.Conntracker != nil {
+	// KUBERNETES-AIX Conntracker not nil for aix but not supported either
+	if s.Conntracker != nil && goruntime.GOOS != "aix" {
 		max, err := getConntrackMax(s.ConntrackConfiguration)
 		if err != nil {
 			return err
diff --git a/cmd/kube-proxy/app/server_aix.go b/cmd/kube-proxy/app/server_aix.go
new file mode 100644
index 0000000..b34db4c
--- /dev/null
+++ b/cmd/kube-proxy/app/server_aix.go
@@ -0,0 +1,202 @@
+// +build aix
+
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package app does all of the work necessary to configure and run a
+// Kubernetes app process.
+package app
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	_ "net/http/pprof"
+
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/client-go/tools/record"
+	"k8s.io/kubernetes/pkg/proxy"
+	proxyconfigapi "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig"
+	proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
+	"k8s.io/kubernetes/pkg/proxy/healthcheck"
+	"k8s.io/kubernetes/pkg/proxy/iptables"
+	"k8s.io/kubernetes/pkg/proxy/ipvs"
+	"k8s.io/kubernetes/pkg/proxy/metrics"
+	"k8s.io/kubernetes/pkg/proxy/userspace"
+	"k8s.io/kubernetes/pkg/util/configz"
+	utildbus "k8s.io/kubernetes/pkg/util/dbus"
+	utilipset "k8s.io/kubernetes/pkg/util/ipset"
+	utiliptables "k8s.io/kubernetes/pkg/util/iptables"
+	utilipvs "k8s.io/kubernetes/pkg/util/ipvs"
+	utilnode "k8s.io/kubernetes/pkg/util/node"
+	utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
+	"k8s.io/utils/exec"
+
+	"github.com/golang/glog"
+)
+
+// NewProxyServer returns a new ProxyServer.
+func NewProxyServer(o *Options) (*ProxyServer, error) {
+	return newProxyServer(o.config, o.CleanupAndExit, o.scheme, o.master)
+}
+
+func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExit bool, scheme *runtime.Scheme, master string) (*ProxyServer, error) {
+	if config == nil {
+		return nil, errors.New("config is required")
+	}
+
+	if c, err := configz.New(proxyconfigapi.GroupName); err == nil {
+		c.Set(config)
+	} else {
+		return nil, fmt.Errorf("unable to register configz: %s", err)
+	}
+
+	// We omit creation of pretty much everything if we run in cleanup mode
+	if cleanupAndExit {
+		return &ProxyServer{CleanupAndExit: cleanupAndExit}, nil
+	}
+
+	client, eventClient, err := createClients(config.ClientConnection, master)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create event recorder
+	hostname := utilnode.GetHostname(config.HostnameOverride)
+	eventBroadcaster := record.NewBroadcaster()
+	recorder := eventBroadcaster.NewRecorder(scheme, v1.EventSource{Component: "kube-proxy", Host: hostname})
+
+	nodeRef := &v1.ObjectReference{
+		Kind:      "Node",
+		Name:      hostname,
+		UID:       types.UID(hostname),
+		Namespace: "",
+	}
+
+	var healthzServer *healthcheck.HealthzServer
+	var healthzUpdater healthcheck.HealthzUpdater
+	if len(config.HealthzBindAddress) > 0 {
+		healthzServer = healthcheck.NewDefaultHealthzServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration, recorder, nodeRef)
+		healthzUpdater = healthzServer
+	}
+
+	var proxier proxy.ProxyProvider
+	var serviceEventHandler proxyconfig.ServiceHandler
+	var endpointsEventHandler proxyconfig.EndpointsHandler
+
+	if string(config.Mode) == proxyModeIPTables {
+		glog.V(0).Info("Using iptables Proxier.")
+		var nodeIP net.IP
+		if config.BindAddress != "0.0.0.0" {
+			nodeIP = net.ParseIP(config.BindAddress)
+		} else {
+			nodeIP = getNodeIP(client, hostname)
+		}
+		if config.IPTables.MasqueradeBit == nil {
+			// MasqueradeBit must be specified or defaulted.
+			return nil, fmt.Errorf("unable to read IPTables MasqueradeBit from config")
+		}
+
+		// Create a iptables utils.
+		protocol := utiliptables.ProtocolIpv4
+		if net.ParseIP(config.BindAddress).To4() == nil {
+			protocol = utiliptables.ProtocolIpv6
+		}
+		var ipsetInterface utilipset.Interface
+		var dbus utildbus.Interface
+		execer := exec.New()
+		dbus = utildbus.New()
+		iptInterface := utiliptables.New(execer, dbus, protocol)
+		ipvsInterface := utilipvs.New(execer)
+		ipsetInterface = utilipset.New(execer)
+
+		// TODO this has side effects that should only happen when Run() is invoked.
+		proxierIPTables, err := iptables.NewProxier(
+			iptInterface,
+			utilsysctl.New(),
+			execer,
+			config.IPTables.SyncPeriod.Duration,
+			config.IPTables.MinSyncPeriod.Duration,
+			config.IPTables.MasqueradeAll,
+			int(*config.IPTables.MasqueradeBit),
+			config.ClusterCIDR,
+			hostname,
+			nodeIP,
+			recorder,
+			healthzUpdater,
+			config.NodePortAddresses,
+		)
+		if err != nil {
+			return nil, fmt.Errorf("unable to create proxier: %v", err)
+		}
+		metrics.RegisterMetrics()
+		proxier = proxierIPTables
+		serviceEventHandler = proxierIPTables
+		endpointsEventHandler = proxierIPTables
+		// No turning back. Remove artifacts that might still exist from the userspace Proxier.
+		glog.V(0).Info("Tearing down inactive rules.")
+		// TODO this has side effects that should only happen when Run() is invoked.
+		userspace.CleanupLeftovers(iptInterface)
+		// IPVS Proxier will generate some iptables rules,
+		// need to clean them before switching to other proxy mode.
+		ipvs.CleanupLeftovers(ipvsInterface, iptInterface, ipsetInterface, cleanupAndExit)
+	} else {
+		glog.V(0).Info("Using userspace Proxier.")
+
+		// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
+		// our config.EndpointsConfigHandler.
+		loadBalancer := userspace.NewLoadBalancerRR()
+
+		// set EndpointsConfigHandler to our loadBalancer
+		endpointsEventHandler = loadBalancer
+		proxierUserspace, err := userspace.NewProxier(
+			loadBalancer,
+			net.ParseIP(config.BindAddress),
+			*utilnet.ParsePortRangeOrDie(config.PortRange),
+			// TODO @pires replace below with default values, if applicable
+			config.IPTables.SyncPeriod.Duration,
+			config.UDPIdleTimeout.Duration,
+		)
+		if err != nil {
+			return nil, fmt.Errorf("unable to create proxier: %v", err)
+		}
+		proxier = proxierUserspace
+		serviceEventHandler = proxierUserspace
+		glog.V(0).Info("Tearing down proxy rules.")
+		//userspace.CleanupLeftovers()
+	}
+
+	return &ProxyServer{
+		Client:                client,
+		EventClient:           eventClient,
+		Proxier:               proxier,
+		Broadcaster:           eventBroadcaster,
+		Recorder:              recorder,
+		ProxyMode:             string(config.Mode),
+		NodeRef:               nodeRef,
+		MetricsBindAddress:    config.MetricsBindAddress,
+		EnableProfiling:       config.EnableProfiling,
+		OOMScoreAdj:           config.OOMScoreAdj,
+		ResourceContainer:     config.ResourceContainer,
+		ConfigSyncPeriod:      config.ConfigSyncPeriod.Duration,
+		ServiceEventHandler:   serviceEventHandler,
+		EndpointsEventHandler: endpointsEventHandler,
+		HealthzServer:         healthzServer,
+	}, nil
+}
diff --git a/cmd/kube-proxy/app/server_others.go b/cmd/kube-proxy/app/server_others.go
index 46b1acb..ce36035 100644
--- a/cmd/kube-proxy/app/server_others.go
+++ b/cmd/kube-proxy/app/server_others.go
@@ -1,4 +1,4 @@
-// +build !windows
+// +build !windows,!aix
 
 /*
 Copyright 2014 The Kubernetes Authors.
diff --git a/cmd/kubelet/app/options/globalflags.go b/cmd/kubelet/app/options/globalflags.go
index d088245..0da08c0 100644
--- a/cmd/kubelet/app/options/globalflags.go
+++ b/cmd/kubelet/app/options/globalflags.go
@@ -107,6 +107,7 @@ func addGlogFlags(fs *pflag.FlagSet) {
 	local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
 
 	register(global, local, "logtostderr")
+	register(global, local, "logtosyslog")
 	register(global, local, "alsologtostderr")
 	register(global, local, "v")
 	register(global, local, "stderrthreshold")
diff --git a/pkg/cloudprovider/providers/gce/gce_alpha.go b/pkg/cloudprovider/providers/gce/gce_alpha.go
deleted file mode 100644
index 6e22725..0000000
--- a/pkg/cloudprovider/providers/gce/gce_alpha.go
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package gce
-
-import (
-	"fmt"
-
-	utilerrors "k8s.io/apimachinery/pkg/util/errors"
-)
-
-const (
-	// alpha: v1.8 (for Services)
-	//
-	// Allows Services backed by a GCP load balancer to choose what network
-	// tier to use. Currently supports "Standard" and "Premium" (default).
-	AlphaFeatureNetworkTiers = "NetworkTiers"
-
-	AlphaFeatureNetworkEndpointGroup = "NetworkEndpointGroup"
-)
-
-// All known alpha features
-var knownAlphaFeatures = map[string]bool{
-	AlphaFeatureNetworkTiers:         true,
-	AlphaFeatureNetworkEndpointGroup: true,
-}
-
-type AlphaFeatureGate struct {
-	features map[string]bool
-}
-
-func (af *AlphaFeatureGate) Enabled(key string) bool {
-	return af.features[key]
-}
-
-func NewAlphaFeatureGate(features []string) (*AlphaFeatureGate, error) {
-	errList := []error{}
-	featureMap := make(map[string]bool)
-	for _, name := range features {
-		if _, ok := knownAlphaFeatures[name]; !ok {
-			errList = append(errList, fmt.Errorf("alpha feature %q is not supported.", name))
-		} else {
-			featureMap[name] = true
-		}
-	}
-	return &AlphaFeatureGate{featureMap}, utilerrors.NewAggregate(errList)
-}
-
-func (gce *GCECloud) alphaFeatureEnabled(feature string) error {
-	if !gce.AlphaFeatureGate.Enabled(feature) {
-		return fmt.Errorf("alpha feature %q is not enabled.", feature)
-	}
-	return nil
-}
diff --git a/pkg/cloudprovider/providers/gce/gce_alphafeat.go b/pkg/cloudprovider/providers/gce/gce_alphafeat.go
new file mode 100644
index 0000000..6e22725
--- /dev/null
+++ b/pkg/cloudprovider/providers/gce/gce_alphafeat.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gce
+
+import (
+	"fmt"
+
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+)
+
+const (
+	// alpha: v1.8 (for Services)
+	//
+	// Allows Services backed by a GCP load balancer to choose what network
+	// tier to use. Currently supports "Standard" and "Premium" (default).
+	AlphaFeatureNetworkTiers = "NetworkTiers"
+
+	AlphaFeatureNetworkEndpointGroup = "NetworkEndpointGroup"
+)
+
+// All known alpha features
+var knownAlphaFeatures = map[string]bool{
+	AlphaFeatureNetworkTiers:         true,
+	AlphaFeatureNetworkEndpointGroup: true,
+}
+
+type AlphaFeatureGate struct {
+	features map[string]bool
+}
+
+func (af *AlphaFeatureGate) Enabled(key string) bool {
+	return af.features[key]
+}
+
+func NewAlphaFeatureGate(features []string) (*AlphaFeatureGate, error) {
+	errList := []error{}
+	featureMap := make(map[string]bool)
+	for _, name := range features {
+		if _, ok := knownAlphaFeatures[name]; !ok {
+			errList = append(errList, fmt.Errorf("alpha feature %q is not supported.", name))
+		} else {
+			featureMap[name] = true
+		}
+	}
+	return &AlphaFeatureGate{featureMap}, utilerrors.NewAggregate(errList)
+}
+
+func (gce *GCECloud) alphaFeatureEnabled(feature string) error {
+	if !gce.AlphaFeatureGate.Enabled(feature) {
+		return fmt.Errorf("alpha feature %q is not enabled.", feature)
+	}
+	return nil
+}
diff --git a/pkg/kubectl/cmd/cp.go b/pkg/kubectl/cmd/cp.go
index 602be6d..7090f08 100644
--- a/pkg/kubectl/cmd/cp.go
+++ b/pkg/kubectl/cmd/cp.go
@@ -333,7 +333,10 @@ func untarAll(reader io.Reader, destFile, prefix string) error {
 		}
 		entrySeq++
 		mode := header.FileInfo().Mode()
-		outFileName := path.Join(destFile, clean(header.Name[len(prefix):]))
+
+		// Some versions of tar (like AIX tar) do not remove leading '/'
+		tarname := getPrefix(header.Name)
+		outFileName := path.Join(destFile, clean(tarname[len(prefix):]))
 		baseName := path.Dir(outFileName)
 		if err := os.MkdirAll(baseName, 0755); err != nil {
 			return err
diff --git a/pkg/kubectl/cmd/util/editor/editor.go b/pkg/kubectl/cmd/util/editor/editor.go
index a53e8c0..057d5d4 100644
--- a/pkg/kubectl/cmd/util/editor/editor.go
+++ b/pkg/kubectl/cmd/util/editor/editor.go
@@ -38,7 +38,7 @@ const (
 	// with the given file type. If we can't because of the need of
 	// blocking, use a script with 'ftype' and 'assoc' to detect it.
 	defaultEditor = "vi"
-	defaultShell  = "/bin/bash"
+	defaultShell  = "/bin/ksh"
 	windowsEditor = "notepad"
 	windowsShell  = "cmd"
 )
diff --git a/pkg/kubectl/util/term/resizeevents.go b/pkg/kubectl/util/term/resizeevents.go
index e3476f9..c88cb06 100644
--- a/pkg/kubectl/util/term/resizeevents.go
+++ b/pkg/kubectl/util/term/resizeevents.go
@@ -1,4 +1,4 @@
-// +build !windows
+// +build !windows,!aix
 
 /*
 Copyright 2016 The Kubernetes Authors.
diff --git a/pkg/kubectl/util/term/resizeevents_aix.go b/pkg/kubectl/util/term/resizeevents_aix.go
new file mode 100644
index 0000000..a6bca64
--- /dev/null
+++ b/pkg/kubectl/util/term/resizeevents_aix.go
@@ -0,0 +1,63 @@
+// +build aix
+
+// This is a copy of resizeevents.go, except it uses syscall package instead of unix
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package term
+
+import (
+	"os"
+	"os/signal"
+	"syscall"
+
+	"k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/client-go/tools/remotecommand"
+)
+
+// monitorResizeEvents spawns a goroutine that waits for SIGWINCH signals (these indicate the
+// terminal has resized). After receiving a SIGWINCH, this gets the terminal size and tries to send
+// it to the resizeEvents channel. The goroutine stops when the stop channel is closed.
+func monitorResizeEvents(fd uintptr, resizeEvents chan<- remotecommand.TerminalSize, stop chan struct{}) {
+	go func() {
+		defer runtime.HandleCrash()
+
+		winch := make(chan os.Signal, 1)
+		signal.Notify(winch, syscall.SIGWINCH)
+		defer signal.Stop(winch)
+
+		for {
+			select {
+			case <-winch:
+				size := GetSize(fd)
+				if size == nil {
+					return
+				}
+
+				// try to send size
+				select {
+				case resizeEvents <- *size:
+					// success
+				default:
+					// not sent
+				}
+			case <-stop:
+				return
+			}
+		}
+	}()
+}
diff --git a/pkg/kubectl/util/umask.go b/pkg/kubectl/util/umask.go
index 93e1447..e11f5a7 100644
--- a/pkg/kubectl/util/umask.go
+++ b/pkg/kubectl/util/umask.go
@@ -1,4 +1,4 @@
-// +build !windows
+// +build !windows,!aix
 
 /*
 Copyright 2014 The Kubernetes Authors.
diff --git a/pkg/kubectl/util/umask_aix.go b/pkg/kubectl/util/umask_aix.go
new file mode 100644
index 0000000..0016861
--- /dev/null
+++ b/pkg/kubectl/util/umask_aix.go
@@ -0,0 +1,29 @@
+// +build aix
+
+// This is a copy of umask.go, except it uses syscall package instead of unix
+
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+        "syscall"
+)
+
+func Umask(mask int) (old int, err error) {
+	return syscall.Umask(mask), nil
+}
diff --git a/pkg/kubelet/aixstats/aixstats_cadvisor.go b/pkg/kubelet/aixstats/aixstats_cadvisor.go
new file mode 100644
index 0000000..d360007
--- /dev/null
+++ b/pkg/kubelet/aixstats/aixstats_cadvisor.go
@@ -0,0 +1,293 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package aixstats provides a client to get node and wpar level stats on aix
+package aixstats
+
+import (
+	"fmt"
+	"os/exec"
+	"strings"
+	"sync"
+	"time"
+
+	cadvisorapi "github.com/google/cadvisor/info/v1"
+	cadvisorapiv2 "github.com/google/cadvisor/info/v2"
+	"k8s.io/apimachinery/pkg/util/wait"
+)
+
+// Client is an interface that is used to get stats information.
+type Client interface {
+	AixWparInfos(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error)
+	AixMachineInfo() (*cadvisorapi.MachineInfo, error)
+	AixVersionInfo() (*cadvisorapi.VersionInfo, error)
+	AixGlobalFsInfo(path string) (cadvisorapiv2.FsInfo, error)
+}
+
+// StatsClient is a client that implements the Client interface
+type statsClient struct {
+	client aixNodeStatsClient
+}
+
+type aixNodeStatsClient interface {
+	startMonitoring() error
+	getNodeMetrics() (nodeMetrics, error)
+	getNodeInfo() nodeInfo
+	getMachineInfo() (*cadvisorapi.MachineInfo, error)
+	getVersionInfo() (*cadvisorapi.VersionInfo, error)
+	getGlobalFsInfo(path string) (cadvisorapiv2.FsInfo, error)
+}
+
+const (
+	perfCounterUpdatePeriod = 1 * time.Second
+)
+
+// perfCounterNodeStatsClient is a client that provides Aix Stats via libperfstat
+type perfCounterNodeStatsClient struct {
+	nodeMetrics
+	mu sync.RWMutex // mu protects nodeMetrics
+	nodeInfo
+	cachedWparMetrics map[string]WparMetrics
+}
+
+// newClient constructs a Client.
+func newClient(statsNodeClient aixNodeStatsClient) (Client, error) {
+	statsClient := new(statsClient)
+	statsClient.client = statsNodeClient
+
+	err := statsClient.client.startMonitoring()
+	if err != nil {
+		return nil, err
+	}
+
+	return statsClient, nil
+}
+
+// NewPerfStatsClient creates a client using aix libperfstat
+func NewPerfStatsClient() (Client, error) {
+	return newClient(&perfCounterNodeStatsClient{})
+}
+
+func (p *perfCounterNodeStatsClient) startMonitoring() error {
+	p.nodeInfo = nodeInfo{
+		startTime:             time.Now(),
+		cpuUsageCoreBaseValue: 0,
+	}
+
+	err := p.perfstatPartitionInfo()
+	if err != nil {
+		return err
+	}
+
+	go wait.Forever(func() {
+		p.collectMetricsData()
+	}, perfCounterUpdatePeriod)
+
+	return nil
+}
+
+func (p *perfCounterNodeStatsClient) collectMetricsData() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	p.nodeMetrics = nodeMetrics{
+		timeStamp: time.Now(),
+	}
+	err := p.perfstatGlobalCpuMetrics()
+	if err != nil {
+		return err
+	}
+	err = p.perfstatGlobalMemoryMetrics()
+	if err != nil {
+		return err
+	}
+	p.cachedWparMetrics, err = perfstatCacheAllWparMetrics()
+	return nil
+}
+
+func (p *perfCounterNodeStatsClient) getNodeInfo() nodeInfo {
+	return p.nodeInfo
+}
+
+func (p *perfCounterNodeStatsClient) getMachineInfo() (*cadvisorapi.MachineInfo, error) {
+	machineID := ""
+	output, err := exec.Command("/usr/sbin/lsattr", "-El", "sys0", "-F", "value", "-a", "partition_uuid").Output()
+	if err == nil {
+		machineID = strings.TrimSpace(string(output))
+	}
+	systemUUID := ""
+	output, err = exec.Command("/usr/sbin/lsattr", "-El", "sys0", "-F", "value", "-a", "os_uuid").Output()
+	if err == nil {
+		systemUUID = strings.TrimSpace(string(output))
+	}
+
+	return &cadvisorapi.MachineInfo{
+		NumCores:       p.nodeInfo.numCPU,
+		MemoryCapacity: p.nodeInfo.memoryPhysicalCapacityBytes,
+		MachineID:      machineID,
+		SystemUUID:     systemUUID,
+	}, nil
+}
+
+func (p *perfCounterNodeStatsClient) getVersionInfo() (*cadvisorapi.VersionInfo, error) {
+	return &cadvisorapi.VersionInfo{
+		KernelVersion:      p.nodeInfo.kernelVersion,
+		ContainerOsVersion: p.nodeInfo.osImageVersion,
+	}, nil
+}
+
+func (p *perfCounterNodeStatsClient) getNodeMetrics() (nodeMetrics, error) {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+	return p.nodeMetrics, nil
+}
+
+func (c *statsClient) createRootContainerInfo() (*cadvisorapiv2.ContainerInfo, error) {
+	nodeMetrics, err := c.client.getNodeMetrics()
+	if err != nil {
+		return nil, err
+	}
+	var stats []*cadvisorapiv2.ContainerStats
+
+	stats = append(stats, &cadvisorapiv2.ContainerStats{
+		Timestamp: nodeMetrics.timeStamp,
+		Cpu: &cadvisorapi.CpuStats{
+			Usage: cadvisorapi.CpuUsage{
+				Total: nodeMetrics.cpuUsageCoreNanoSeconds,
+			},
+		},
+		//CpuInst: &cadvisorapiv2.CpuInstStats{
+		//	Usage: cadvisorapiv2.CpuInstUsage{
+		//		Total: 900000000,
+		//	},
+		//},
+		Memory: &cadvisorapi.MemoryStats{
+			WorkingSet: nodeMetrics.memoryPrivWorkingSetBytes,
+			Usage:      nodeMetrics.memoryCommittedBytes,
+		},
+	})
+
+	nodeInfo := c.client.getNodeInfo()
+	rootInfo := cadvisorapiv2.ContainerInfo{
+		Spec: cadvisorapiv2.ContainerSpec{
+			CreationTime: nodeInfo.startTime,
+			HasCpu:       true,
+			HasMemory:    true,
+			Memory: cadvisorapiv2.MemorySpec{
+				Limit: nodeInfo.memoryPhysicalCapacityBytes,
+			},
+		},
+		Stats: stats,
+	}
+
+	return &rootInfo, nil
+}
+
+func (p *perfCounterNodeStatsClient) getGlobalFsInfo(path string) (cadvisorapiv2.FsInfo, error) {
+	fs, _ := perfstatFsInfo(path)
+	fsInfo := cadvisorapiv2.FsInfo{
+		Timestamp:  time.Now(),
+		Device:     fs.device,
+		Mountpoint: fs.mountPoint,
+		Capacity:   fs.limit,
+		Usage:      fs.used,
+		Available:  fs.avail,
+		Inodes:     &fs.inodes,
+		InodesFree: &fs.inodesFree,
+	}
+	return fsInfo, nil
+}
+
+// AixWparInfos returns a map of container infos. The map may contain node and
+// wpar level stats. Analogous to cadvisor GetContainerInfoV2 method.
+func (c *statsClient) AixWparInfos(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
+	infos := make(map[string]cadvisorapiv2.ContainerInfo)
+	if name == "/" {
+		// "/" refers to the root container. It corresponds to AIX global wpar.
+		rootContainerInfo, err := c.createRootContainerInfo()
+		if err != nil {
+			return nil, err
+		}
+		infos["/"] = *rootContainerInfo
+		if options.Recursive == false {
+			return infos, nil
+		}
+	} else if options.Recursive == true {
+		// AIX doesn't have subcontainers of a container. However, any wpar is considered a subcontainer of the global.
+		return nil, fmt.Errorf("invalid request - AIX container (wpar) %q has no subcontainers", name)
+	}
+	wparMap, _ := perfstatCacheAllWparMetrics()
+	wparMapRef := &wparMap
+	if name != "/" {
+		if metrics, ok := wparMap[name]; ok {
+			wm := make(map[string]WparMetrics)
+			wm[name] = metrics
+			wparMapRef = &wm
+		} else {
+			return nil, fmt.Errorf("invalid request - AIX container (wpar) %q not found", name)
+		}
+	}
+	for wparName, metrics := range *wparMapRef {
+		var stats []*cadvisorapiv2.ContainerStats
+
+		stats = append(stats, &cadvisorapiv2.ContainerStats{
+			Timestamp: time.Unix(0, metrics.TimeStamp),
+			Cpu: &cadvisorapi.CpuStats{
+				Usage: cadvisorapi.CpuUsage{
+					Total: metrics.CpuUsageCoreNanoSeconds,
+				},
+			},
+			Memory: &cadvisorapi.MemoryStats{
+				WorkingSet: metrics.MemoryPrivWorkingSetBytes,
+				Usage:      metrics.MemoryUsageBytes,
+			},
+		})
+
+		wparInfo := cadvisorapiv2.ContainerInfo{
+			Spec: cadvisorapiv2.ContainerSpec{
+				// TODO: retrieve start time
+				//CreationTime: nodeInfo.startTime,
+				HasCpu:    true,
+				HasMemory: true,
+				Memory: cadvisorapiv2.MemorySpec{
+					Limit: metrics.MemoryLimit,
+				},
+				Cpu: cadvisorapiv2.CpuSpec{
+					Limit: metrics.CpuLimit,
+				},
+			},
+			Stats: stats,
+		}
+
+		infos[wparName] = wparInfo
+	}
+
+	return infos, nil
+}
+
+// AixMachineInfo returns a cadvisorapi.MachineInfo with details about the
+// node machine. Analogous to cadvisor MachineInfo method.
+func (c *statsClient) AixMachineInfo() (*cadvisorapi.MachineInfo, error) {
+	return c.client.getMachineInfo()
+}
+
+// AixVersionInfo returns a cadvisorapi.VersionInfo with details about the
+// node machine. Analogous to cadvisor VersionInfo method.
+func (c *statsClient) AixVersionInfo() (*cadvisorapi.VersionInfo, error) {
+	return c.client.getVersionInfo()
+}
+
+// AixGlobalFsInfo returns a cadvisorapi.VersionInfo with details about the
+// node machine. Analogous to cadvisor VersionInfo method.
+func (c *statsClient) AixGlobalFsInfo(path string) (cadvisorapiv2.FsInfo, error) {
+	return c.client.getGlobalFsInfo(path)
+}
diff --git a/pkg/kubelet/aixstats/perfstats_aix.go b/pkg/kubelet/aixstats/perfstats_aix.go
new file mode 100644
index 0000000..c1dae69
--- /dev/null
+++ b/pkg/kubelet/aixstats/perfstats_aix.go
@@ -0,0 +1,345 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package aixstats provides a client to get node and wpar level stats on aix
+package aixstats
+
+// #cgo CFLAGS: -I/usr/include
+// #cgo LDFLAGS: -L/usr/lib -lperfstat
+// #include <libperfstat.h>
+// #include <stdlib.h>
+// #include <sys/statfs.h>
+// #include <fstab.h>
+// #include <string.h>
+// #include <strings.h>
+// #include <sys/time.h>
+// #include <sys/types.h>
+// #include <sys/mntctl.h>
+// #include <sys/vmount.h>
+/*
+void init_wpar_id(perfstat_id_wpar_t *wparid, char *name)
+{
+    bzero(wparid, sizeof(perfstat_id_wpar_t));
+    wparid->spec = WPARNAME;
+    if (name) {
+        strncpy(wparid->u.wparname, name, MAXCORRALNAMELEN);
+    } else {
+        strcpy(wparid->u.wparname, FIRST_WPARNAME);
+    }
+}
+
+int get_devname_and_mntpt(int vfsnum, int len, char *devname, char *mntpt)
+{
+        int count, ct, mntsiz;
+        int rc = -1;
+        struct vmount *vmt;
+        char *mnttab;
+
+        mnttab = (char *) &count;
+        mntsiz = sizeof(count);
+
+        if (!devname || !mntpt)
+        {
+                return(-1);
+        }
+
+        while ((ct = mntctl(MCTL_QUERY, mntsiz, mnttab)) <= 0)
+        {
+                if (ct < 0) {
+                    return -1;
+                }
+
+                mntsiz = *((int *) mnttab);
+                mnttab = (mnttab == (char *) &count) ?
+                        (char *) malloc(mntsiz) :
+                                (char *) realloc(mnttab, mntsiz);
+
+                if (mnttab == NULL)
+                {
+                    return -1;
+                }
+        }
+        for (vmt = (struct vmount *) mnttab; ct > 0;
+             ct--, vmt = (struct vmount *) ((char *) vmt + vmt->vmt_length))
+        {
+                if (vfsnum == vmt->vmt_vfsnumber)
+                {
+                        strncpy(devname, vmt2dataptr(vmt, VMT_OBJECT), len);
+                        strncpy(mntpt, vmt2dataptr(vmt, VMT_STUB), len);
+                        rc = 0;
+                        break;
+                }
+        }
+        if (mnttab != (char *)&count)
+        {
+                free(mnttab);
+        }
+        return rc;
+}
+*/
+import "C"
+
+import (
+	"sync"
+	"time"
+	"unsafe"
+
+	"github.com/golang/glog"
+)
+
+const (
+	nanoSecsPerTick     = C.NS_PER_TICK
+	megaBytesToBytes    = 1024 * 1024
+	pageSize            = 4096
+	minNanoSecsInterval = 500000000
+	pathLen             = 1024
+)
+
+type procTimeBase struct {
+	TimeStamp               int64
+	TimeBase                uint64
+	Cpu                     uint64
+	CpuPercent              float64
+	CpuUsageCoreNanoSeconds uint64
+}
+
+var mu = sync.RWMutex{}
+var wparMap = map[string]procTimeBase{}
+
+type nodeMetrics struct {
+	cpuUsageCoreNanoSeconds   uint64
+	memoryPrivWorkingSetBytes uint64
+	memoryCommittedBytes      uint64
+	timeStamp                 time.Time
+}
+
+type WparMetrics struct {
+	CpuUsageCoreNanoSeconds   uint64
+	MemoryPrivWorkingSetBytes uint64
+	MemoryUsageBytes          uint64
+	CpuLimit                  uint64
+	MemoryLimit               uint64
+	TimeStamp                 int64
+	Name                      string
+}
+
+type nodeInfo struct {
+	memoryPhysicalCapacityBytes uint64
+	kernelVersion               string
+	osImageVersion              string
+	// startTime is the time when the node was started
+	startTime             time.Time
+	numCPU                int
+	cpuUsageCoreBaseValue uint64
+}
+
+type fsInfo struct {
+	limit      uint64
+	avail      uint64
+	used       uint64
+	inodes     uint64
+	inodesFree uint64
+	mountPoint string
+	device     string
+}
+
+func perfstatFsInfo(path string) (fsInfo, error) {
+	fsbuf := C.struct_statfs64{}
+	fs := fsInfo{}
+	cpath := C.CString(path)
+	defer C.free(unsafe.Pointer(cpath))
+	if C.statfs64(cpath, &fsbuf) != -1 {
+		fs = fsInfo{
+			used:       uint64(fsbuf.f_bsize) * uint64(fsbuf.f_blocks-fsbuf.f_bfree),
+			avail:      uint64(fsbuf.f_bsize) * uint64(fsbuf.f_bfree),
+			limit:      uint64(fsbuf.f_bsize) * uint64(fsbuf.f_blocks),
+			inodes:     uint64(fsbuf.f_files),
+			inodesFree: uint64(fsbuf.f_ffree),
+			mountPoint: C.GoString(&fsbuf.f_fname[0]),
+		}
+		devBuf := C.malloc(C.sizeof_char * pathLen)
+		defer C.free(unsafe.Pointer(devBuf))
+		mntBuf := C.malloc(C.sizeof_char * pathLen)
+		defer C.free(unsafe.Pointer(mntBuf))
+		ret := C.get_devname_and_mntpt(fsbuf.f_vfsnumber, pathLen, (*C.char)(devBuf), (*C.char)(mntBuf))
+		if ret >= 0 {
+			fs.device = C.GoString((*C.char)(devBuf))
+			fs.mountPoint = C.GoString((*C.char)(mntBuf))
+		}
+	}
+	return fs, nil
+}
+
+func perfstatCacheAllWparMetrics() (map[string]WparMetrics, error) {
+	mp := make(map[string]WparMetrics)
+	tot := C.perfstat_wpar_total(nil, nil, C.sizeof_perfstat_wpar_total_t, 0)
+	numWpar := int(tot)
+
+	if numWpar <= 0 {
+		return mp, nil
+	}
+
+	/* allocate enough memory for all the structures */
+	winfo := make([]C.perfstat_wpar_total_t, numWpar)
+
+	wparid := C.perfstat_id_wpar_t{}
+	C.init_wpar_id(&wparid, nil)
+	rc := C.perfstat_wpar_total(&wparid, (*C.perfstat_wpar_total_t)(unsafe.Pointer(&winfo[0])), C.sizeof_perfstat_wpar_total_t, tot)
+
+	if int(rc) < 0 {
+		return mp, nil
+	}
+
+	for i := 0; i < numWpar; i++ {
+		wparData := WparMetrics{CpuLimit: uint64(winfo[i].cpu_limit),
+			MemoryLimit: uint64(winfo[i].mem_limit)}
+		wparName := C.perfstat_id_wpar_t{}
+		C.init_wpar_id(&wparName, &winfo[i].name[0])
+
+		err := perfstatWparCpuMetrics(&wparData, &wparName)
+		if err != nil {
+		}
+		err = perfstatWparMemoryMetrics(&wparData, &wparName)
+		if err != nil {
+		}
+		mp[C.GoString(&winfo[i].name[0])] = wparData
+	}
+	return mp, nil
+}
+
+func PerfstatInstantWparMetrics(name string) (WparMetrics, error) {
+	wparName := C.perfstat_id_wpar_t{}
+	cname := C.CString(name)
+	defer C.free(unsafe.Pointer(cname))
+	C.init_wpar_id(&wparName, cname)
+	wm := WparMetrics{TimeStamp: time.Now().UnixNano(), Name: name}
+	err := perfstatWparLimits(&wm, &wparName)
+	err = perfstatWparCpuMetrics(&wm, &wparName)
+	err = perfstatWparMemoryMetrics(&wm, &wparName)
+	if err != nil {
+		glog.V(4).Infof("AIX: aixstats error: cpu/mem metrics")
+	}
+	return wm, nil
+}
+
+func perfstatWparLimits(wm *WparMetrics, id *C.perfstat_id_wpar_t) error {
+	/* allocate enough memory for all the structures */
+	winfo := make([]C.perfstat_wpar_total_t, 1)
+
+	rc := C.perfstat_wpar_total(id, (*C.perfstat_wpar_total_t)(unsafe.Pointer(&winfo[0])), C.sizeof_perfstat_wpar_total_t, 1)
+
+	if int(rc) < 0 {
+		glog.V(4).Infof("AIX: aixstats error: cpu/mem limits")
+	}
+	wm.CpuLimit = uint64(winfo[0].cpu_limit)
+	wm.MemoryLimit = uint64(winfo[0].mem_limit)
+	return nil
+}
+
+func perfstatWparCpuMetrics(wm *WparMetrics, id *C.perfstat_id_wpar_t) error {
+	/* allocate enough memory for all the structures */
+	winfo := make([]C.perfstat_cpu_total_wpar_t, 1)
+
+	partinfo := C.perfstat_partition_total_t{}
+	rc := C.perfstat_partition_total(nil, &partinfo, C.sizeof_perfstat_partition_total_t, 1)
+	if int(rc) < 0 {
+		glog.V(4).Infof("AIX: aixstats error: wpar cpu metrics")
+	}
+	rc = C.perfstat_cpu_total_wpar(id, (*C.perfstat_cpu_total_wpar_t)(unsafe.Pointer(&winfo[0])), C.sizeof_perfstat_cpu_total_wpar_t, 1)
+
+	if int(rc) < 0 {
+		glog.V(4).Infof("AIX: aixstats error: total wpar cpu metrics")
+	}
+	cpu := uint64(winfo[0].puser + winfo[0].psys)
+	tstamp := time.Now().UnixNano()
+	mu.Lock()
+	defer mu.Unlock()
+	if wparMap == nil {
+		wparMap = make(map[string]procTimeBase)
+	}
+	if prevData, ok := wparMap[wm.Name]; ok {
+		timeDelta := tstamp - prevData.TimeStamp
+		cpuUsage := prevData.CpuUsageCoreNanoSeconds + uint64(float64(timeDelta)*prevData.CpuPercent)
+		if timeDelta > minNanoSecsInterval {
+			cpuPercent := float64(cpu-prevData.Cpu) / float64(uint64(partinfo.timebase_last)-prevData.TimeBase)
+			cpuUsage = prevData.CpuUsageCoreNanoSeconds + uint64(float64(timeDelta)*cpuPercent)
+			wparMap[wm.Name] = procTimeBase{
+				CpuUsageCoreNanoSeconds: cpuUsage,
+				CpuPercent:              cpuPercent,
+				TimeStamp:               tstamp,
+				TimeBase:                uint64(partinfo.timebase_last),
+				Cpu:                     cpu,
+			}
+		}
+		wm.CpuUsageCoreNanoSeconds = cpuUsage
+	} else {
+		wparMap[wm.Name] = procTimeBase{
+			Cpu:       cpu,
+			TimeStamp: tstamp,
+			TimeBase:  uint64(partinfo.timebase_last),
+		}
+	}
+	return nil
+}
+
+func perfstatWparMemoryMetrics(wm *WparMetrics, id *C.perfstat_id_wpar_t) error {
+	/* allocate enough memory for all the structures */
+	winfo := make([]C.perfstat_memory_total_wpar_t, 1)
+
+	rc := C.perfstat_memory_total_wpar(id, (*C.perfstat_memory_total_wpar_t)(unsafe.Pointer(&winfo[0])), C.sizeof_perfstat_memory_total_wpar_t, 1)
+
+	if int(rc) < 0 {
+		glog.V(4).Infof("AIX: aixstats error: memory metrics")
+	}
+	wm.MemoryUsageBytes = uint64(winfo[0].real_inuse) * pageSize
+	wm.MemoryPrivWorkingSetBytes = uint64(winfo[0].real_inuse) * pageSize
+	return nil
+}
+
+func (p *perfCounterNodeStatsClient) perfstatGlobalCpuMetrics() error {
+	cpuinfo := C.perfstat_cpu_total_t{}
+	_, err := C.perfstat_cpu_total(nil, &cpuinfo, C.sizeof_perfstat_cpu_total_t, 1)
+	if err != nil {
+		glog.V(4).Infof("AIX: aixstats error: global CPU metrics")
+	}
+	cpu := uint64(cpuinfo.wait+cpuinfo.user+cpuinfo.sys) * nanoSecsPerTick
+	if p.nodeInfo.cpuUsageCoreBaseValue == 0 {
+		p.nodeInfo.cpuUsageCoreBaseValue = cpu
+	}
+	p.nodeMetrics.cpuUsageCoreNanoSeconds = cpu - p.nodeInfo.cpuUsageCoreBaseValue
+	return err
+}
+
+func (p *perfCounterNodeStatsClient) perfstatGlobalMemoryMetrics() error {
+	meminfo := C.perfstat_memory_total_t{}
+	_, err := C.perfstat_memory_total(nil, &meminfo, C.sizeof_perfstat_memory_total_t, 1)
+	if err != nil {
+		glog.V(4).Infof("AIX: aixstats error: global memory metrics")
+	}
+	//p.nodeMetrics.memoryCommittedBytes = uint64(meminfo.real_total - meminfo.real_free) * pageSize
+	//p.nodeMetrics.memoryPrivWorkingSetBytes = uint64(meminfo.real_total - meminfo.real_free) * pageSize
+	return err
+}
+
+func (p *perfCounterNodeStatsClient) perfstatPartitionInfo() error {
+	lparInfo := C.perfstat_partition_config_t{}
+	_, err := C.perfstat_partition_config(nil, &lparInfo, C.sizeof_perfstat_partition_config_t, 1)
+	if err != nil {
+		glog.V(4).Infof("AIX: aixstats error: partition config")
+	}
+	p.nodeInfo.memoryPhysicalCapacityBytes = uint64(lparInfo.mem.online) * megaBytesToBytes
+	p.nodeInfo.numCPU = int(lparInfo.lcpus)
+	p.nodeInfo.kernelVersion = C.GoString(&lparInfo.OSBuild[0])
+	p.nodeInfo.osImageVersion = C.GoString(&lparInfo.OSVersion[0])
+	return err
+}
diff --git a/pkg/kubelet/apis/kubeletconfig/v1beta1/defaults.go b/pkg/kubelet/apis/kubeletconfig/v1beta1/defaults.go
index b8d6bb3..ba8d88f 100644
--- a/pkg/kubelet/apis/kubeletconfig/v1beta1/defaults.go
+++ b/pkg/kubelet/apis/kubeletconfig/v1beta1/defaults.go
@@ -181,7 +181,7 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
 		obj.EnableControllerAttachDetach = utilpointer.BoolPtr(true)
 	}
 	if obj.MakeIPTablesUtilChains == nil {
-		obj.MakeIPTablesUtilChains = utilpointer.BoolPtr(true)
+		obj.MakeIPTablesUtilChains = utilpointer.BoolPtr(false) //(true)
 	}
 	if obj.IPTablesMasqueradeBit == nil {
 		obj.IPTablesMasqueradeBit = utilpointer.Int32Ptr(DefaultIPTablesMasqueradeBit)
diff --git a/pkg/kubelet/cadvisor/cadvisor_aix.go b/pkg/kubelet/cadvisor/cadvisor_aix.go
new file mode 100644
index 0000000..096bc8a
--- /dev/null
+++ b/pkg/kubelet/cadvisor/cadvisor_aix.go
@@ -0,0 +1,92 @@
+// +build aix
+
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cadvisor
+
+import (
+	"github.com/google/cadvisor/events"
+	cadvisorapi "github.com/google/cadvisor/info/v1"
+	cadvisorapiv2 "github.com/google/cadvisor/info/v2"
+	"k8s.io/kubernetes/pkg/kubelet/aixstats"
+)
+
+type cadvisorClient struct {
+	aixStatsClient aixstats.Client
+	rootPath       string
+}
+
+var _ Interface = new(cadvisorClient)
+
+// New creates a cAdvisor and exports its API on the specified port if port > 0.
+func New(address string, port uint, imageFsInfoProvider ImageFsInfoProvider, rootPath string, usingLegacyStats bool) (Interface, error) {
+	client, err := aixstats.NewPerfStatsClient()
+	return &cadvisorClient{aixStatsClient: client, rootPath: rootPath}, err
+}
+
+func (cu *cadvisorClient) Start() error {
+	return nil
+}
+
+func (cu *cadvisorClient) DockerContainer(name string, req *cadvisorapi.ContainerInfoRequest) (cadvisorapi.ContainerInfo, error) {
+	return cadvisorapi.ContainerInfo{}, nil
+}
+
+func (cu *cadvisorClient) ContainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) {
+	//
+	return &cadvisorapi.ContainerInfo{}, nil
+}
+
+func (cu *cadvisorClient) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
+	return cu.aixStatsClient.AixWparInfos(name, options)
+}
+
+func (cu *cadvisorClient) SubcontainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) {
+	return nil, nil
+}
+
+func (cu *cadvisorClient) MachineInfo() (*cadvisorapi.MachineInfo, error) {
+	return cu.aixStatsClient.AixMachineInfo()
+}
+
+func (cu *cadvisorClient) VersionInfo() (*cadvisorapi.VersionInfo, error) {
+	return cu.aixStatsClient.AixVersionInfo()
+}
+
+func (cu *cadvisorClient) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) {
+	return cadvisorapiv2.FsInfo{}, nil
+}
+
+func (cu *cadvisorClient) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
+	return cu.GetDirFsInfo(cu.rootPath)
+}
+
+func (cu *cadvisorClient) WatchEvents(request *events.Request) (*events.EventChannel, error) {
+	return &events.EventChannel{}, nil
+}
+
+func (cu *cadvisorClient) HasDedicatedImageFs() (bool, error) {
+	return false, nil
+}
+
+func (cu *cadvisorClient) GetFsInfoByFsUUID(uuid string) (cadvisorapiv2.FsInfo, error) {
+	return cadvisorapiv2.FsInfo{}, nil
+}
+
+func (cu *cadvisorClient) GetDirFsInfo(path string) (cadvisorapiv2.FsInfo, error) {
+	return cu.aixStatsClient.AixGlobalFsInfo(path)
+}
diff --git a/pkg/kubelet/cadvisor/cadvisor_unsupported.go b/pkg/kubelet/cadvisor/cadvisor_unsupported.go
index 501c743..3055efe 100644
--- a/pkg/kubelet/cadvisor/cadvisor_unsupported.go
+++ b/pkg/kubelet/cadvisor/cadvisor_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows linux,!cgo
+// +build !aix,!linux,!windows linux,!cgo
 
 /*
 Copyright 2015 The Kubernetes Authors.
diff --git a/pkg/kubelet/cm/container_manager_aix.go b/pkg/kubelet/cm/container_manager_aix.go
new file mode 100644
index 0000000..f6306fb
--- /dev/null
+++ b/pkg/kubelet/cm/container_manager_aix.go
@@ -0,0 +1,46 @@
+// +build aix
+
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cm
+
+import (
+	"github.com/golang/glog"
+
+	"k8s.io/api/core/v1"
+	"k8s.io/client-go/tools/record"
+	internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
+	"k8s.io/kubernetes/pkg/kubelet/cadvisor"
+        "k8s.io/kubernetes/pkg/kubelet/config"
+	"k8s.io/kubernetes/pkg/kubelet/status"
+	"k8s.io/kubernetes/pkg/util/mount"
+)
+
+type containerManagerImpl struct {
+	containerManagerStub
+}
+
+var _ ContainerManager = &containerManagerImpl{}
+
+func (cm *containerManagerImpl) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService) error {
+	glog.V(2).Infof("Starting AIX stub container manager")
+	return nil
+}
+
+func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig, failSwapOn bool, devicePluginEnabled bool, recorder record.EventRecorder) (ContainerManager, error) {
+	return &containerManagerImpl{}, nil
+}
diff --git a/pkg/kubelet/cm/container_manager_unsupported.go b/pkg/kubelet/cm/container_manager_unsupported.go
index 97a3a3a..9912d6d 100644
--- a/pkg/kubelet/cm/container_manager_unsupported.go
+++ b/pkg/kubelet/cm/container_manager_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows
+// +build !aix,!linux,!windows
 
 /*
 Copyright 2015 The Kubernetes Authors.
diff --git a/pkg/kubelet/config/file_aix.go b/pkg/kubelet/config/file_aix.go
new file mode 100644
index 0000000..51f47d9
--- /dev/null
+++ b/pkg/kubelet/config/file_aix.go
@@ -0,0 +1,47 @@
+// +build aix
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Reads the pod configuration from file or a directory of files.
+package config
+
+import (
+	"fmt"
+	"os"
+
+	"k8s.io/api/core/v1"
+	kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
+)
+
+func (s *sourceFile) watch() error {
+	_, err := os.Stat(s.path)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return err
+		}
+		// Emit an update with an empty PodList to allow FileSource to be marked as seen
+		s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource}
+		return fmt.Errorf("path does not exist, ignoring")
+	}
+
+	// Reset store with manifest files already existing when starting
+	if err := s.resetStoreFromPath(); err != nil {
+		return fmt.Errorf("unable to read manifest path %q: %v", s.path, err)
+	}
+
+	return nil
+}
diff --git a/pkg/kubelet/config/file_unsupported.go b/pkg/kubelet/config/file_unsupported.go
index c30ede4..3948dad 100644
--- a/pkg/kubelet/config/file_unsupported.go
+++ b/pkg/kubelet/config/file_unsupported.go
@@ -1,4 +1,5 @@
 // +build !linux
+// +build !aix
 
 /*
 Copyright 2016 The Kubernetes Authors.
diff --git a/pkg/kubelet/container/pty_aix.go b/pkg/kubelet/container/pty_aix.go
new file mode 100644
index 0000000..40bd34d
--- /dev/null
+++ b/pkg/kubelet/container/pty_aix.go
@@ -0,0 +1,88 @@
+// +build aix
+
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package container
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"syscall"
+	"unsafe"
+)
+
+func openTty() (pty, tty *os.File, err error) {
+	p, err := os.OpenFile("/dev/ptc", os.O_RDWR, 0)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	sname, err := ttyname(p)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0)
+	if err != nil {
+		return nil, nil, err
+	}
+	return p, t, nil
+}
+
+const TXTTYNAME = ('X' << 8) + 1
+const TTNAMEMAX = 32
+
+//extern ioctl
+func ioctl(fd int32, cmd int32, arg uintptr) int32
+
+func ttyname(f *os.File) (string, error) {
+	var buf [TTNAMEMAX]byte
+	rc := ioctl(int32(f.Fd()), TXTTYNAME, uintptr(unsafe.Pointer(&buf[0])))
+	if rc == -1 {
+		return "", fmt.Errorf("ioctl TXTTYNAME")
+	}
+	return "/dev/" + cstring(buf[:]), nil
+}
+
+func cstring(b []byte) string {
+	var i int
+	for i = 0; i < len(b) && b[i] != 0; i++ {
+	}
+	return string(b[:i])
+}
+
+func StartPty(c *exec.Cmd) (*os.File, error) {
+	pty, tty, err := openTty()
+	if err != nil {
+		return nil, err
+	}
+	defer tty.Close()
+	c.Stdout = tty
+	c.Stdin = tty
+	c.Stderr = tty
+	if c.SysProcAttr == nil {
+		c.SysProcAttr = &syscall.SysProcAttr{}
+	}
+	c.SysProcAttr.Setsid = true
+	err = c.Start()
+	if err != nil {
+		pty.Close()
+		return nil, err
+	}
+	return pty, err
+}
diff --git a/pkg/kubelet/container/pty_unsupported.go b/pkg/kubelet/container/pty_unsupported.go
index 24ea2f7..102be98 100644
--- a/pkg/kubelet/container/pty_unsupported.go
+++ b/pkg/kubelet/container/pty_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !aix,!linux
 
 /*
 Copyright 2015 The Kubernetes Authors.
diff --git a/pkg/kubelet/dockershim/cm/container_manager_aix.go b/pkg/kubelet/dockershim/cm/container_manager_aix.go
new file mode 100644
index 0000000..46589f0
--- /dev/null
+++ b/pkg/kubelet/dockershim/cm/container_manager_aix.go
@@ -0,0 +1,37 @@
+// +build aix
+
+// This is a copy of container_manager_windows.go
+
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cm
+
+import (
+	"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
+)
+
+// no-op
+type containerManager struct {
+}
+
+func NewContainerManager(_ string, _ libdocker.Interface) ContainerManager {
+	return &containerManager{}
+}
+
+func (m *containerManager) Start() error {
+	return nil
+}
diff --git a/pkg/kubelet/dockershim/cm/container_manager_unsupported.go b/pkg/kubelet/dockershim/cm/container_manager_unsupported.go
index 7b35c5f..5a26fa3 100644
--- a/pkg/kubelet/dockershim/cm/container_manager_unsupported.go
+++ b/pkg/kubelet/dockershim/cm/container_manager_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows
+// +build !linux,!windows,!aix
 
 /*
 Copyright 2016 The Kubernetes Authors.
diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go
index 92c10ad..b22d881 100644
--- a/pkg/kubelet/eviction/helpers.go
+++ b/pkg/kubelet/eviction/helpers.go
@@ -735,6 +735,7 @@ func makeSignalObservations(summary *statsapi.Summary) (signalObservations, stat
 			time:      memory.Time,
 		}
 	}
+/*
 	if allocatableContainer, err := getSysContainer(summary.Node.SystemContainers, statsapi.SystemContainerPods); err != nil {
 		glog.Errorf("eviction manager: failed to construct signal: %q error: %v", evictionapi.SignalAllocatableMemoryAvailable, err)
 	} else {
@@ -746,6 +747,7 @@ func makeSignalObservations(summary *statsapi.Summary) (signalObservations, stat
 			}
 		}
 	}
+*/
 	if nodeFs := summary.Node.Fs; nodeFs != nil {
 		if nodeFs.AvailableBytes != nil && nodeFs.CapacityBytes != nil {
 			result[evictionapi.SignalNodeFsAvailable] = signalObservation{
diff --git a/pkg/kubelet/kuberuntime/helpers_aix.go b/pkg/kubelet/kuberuntime/helpers_aix.go
new file mode 100644
index 0000000..a871c1d
--- /dev/null
+++ b/pkg/kubelet/kuberuntime/helpers_aix.go
@@ -0,0 +1,69 @@
+// +build aix
+
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kuberuntime
+
+const (
+	// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
+	minShares     = 2
+	sharesPerCPU  = 1024
+	milliCPUToCPU = 1000
+
+	// 100000 is equivalent to 100ms
+	quotaPeriod    = 100 * minQuotaPeriod
+	minQuotaPeriod = 1000
+)
+
+// milliCPUToShares converts milliCPU to CPU shares
+func milliCPUToShares(milliCPU int64) int64 {
+	if milliCPU == 0 {
+		// Return 2 here to really match kernel default for zero milliCPU.
+		return minShares
+	}
+	// Conceptually (milliCPU / milliCPUToCPU) * sharesPerCPU, but factored to improve rounding.
+	shares := (milliCPU * sharesPerCPU) / milliCPUToCPU
+	if shares < minShares {
+		return minShares
+	}
+	return shares
+}
+
+// milliCPUToQuota converts milliCPU to CFS quota and period values
+func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
+	// CFS quota is measured in two values:
+	//  - cfs_period_us=100ms (the amount of time to measure usage across)
+	//  - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
+	// so in the above example, you are limited to 20% of a single CPU
+	// for multi-cpu environments, you just scale equivalent amounts
+	if milliCPU == 0 {
+		return
+	}
+
+	// we set the period to 100ms by default
+	period = quotaPeriod
+
+	// we then convert your milliCPU to a value normalized over a period
+	quota = (milliCPU * quotaPeriod) / milliCPUToCPU
+
+	// quota needs to be a minimum of 1ms.
+	if quota < minQuotaPeriod {
+		quota = minQuotaPeriod
+	}
+
+	return
+}
diff --git a/pkg/kubelet/kuberuntime/helpers_unsupported.go b/pkg/kubelet/kuberuntime/helpers_unsupported.go
index 1e99c4f..22b05a1 100644
--- a/pkg/kubelet/kuberuntime/helpers_unsupported.go
+++ b/pkg/kubelet/kuberuntime/helpers_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows
+// +build !linux,!windows,!aix
 
 /*
 Copyright 2018 The Kubernetes Authors.
diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_aix.go b/pkg/kubelet/kuberuntime/kuberuntime_container_aix.go
new file mode 100644
index 0000000..510fd3c
--- /dev/null
+++ b/pkg/kubelet/kuberuntime/kuberuntime_container_aix.go
@@ -0,0 +1,74 @@
+// +build aix
+
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kuberuntime
+
+import (
+	"k8s.io/api/core/v1"
+	runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
+	"k8s.io/kubernetes/pkg/kubelet/qos"
+)
+
+// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig.
+func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string) error {
+	config.Linux = m.generateAIXContainerConfig(container, pod, uid, username)
+	return nil
+}
+
+// generateAIXContainerConfig generates AIX container config for kubelet runtime v1.
+func (m *kubeGenericRuntimeManager) generateAIXContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) *runtimeapi.LinuxContainerConfig {
+	lc := &runtimeapi.LinuxContainerConfig{
+		Resources:       &runtimeapi.LinuxContainerResources{},
+		SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username),
+	}
+
+	// set linux container resources
+	var cpuShares int64
+	cpuRequest := container.Resources.Requests.Cpu()
+	cpuLimit := container.Resources.Limits.Cpu()
+	memoryLimit := container.Resources.Limits.Memory().Value()
+	oomScoreAdj := int64(qos.GetContainerOOMScoreAdjust(pod, container,
+		int64(m.machineInfo.MemoryCapacity)))
+	// If request is not specified, but limit is, we want request to default to limit.
+	// API server does this for new containers, but we repeat this logic in Kubelet
+	// for containers running on existing Kubernetes clusters.
+	if cpuRequest.IsZero() && !cpuLimit.IsZero() {
+		cpuShares = milliCPUToShares(cpuLimit.MilliValue())
+	} else {
+		// if cpuRequest.Amount is nil, then milliCPUToShares will return the minimal number
+		// of CPU shares.
+		cpuShares = milliCPUToShares(cpuRequest.MilliValue())
+	}
+	lc.Resources.CpuShares = cpuShares
+	if memoryLimit != 0 {
+		lc.Resources.MemoryLimitInBytes = memoryLimit
+	}
+	// Set OOM score of the container based on qos policy. Processes in lower-priority pods should
+	// be killed first if the system runs out of memory.
+	lc.Resources.OomScoreAdj = oomScoreAdj
+
+	if m.cpuCFSQuota {
+		// if cpuLimit.Amount is nil, then the appropriate default value is returned
+		// to allow full usage of cpu resource.
+		cpuQuota, cpuPeriod := milliCPUToQuota(cpuLimit.MilliValue())
+		lc.Resources.CpuQuota = cpuQuota
+		lc.Resources.CpuPeriod = cpuPeriod
+	}
+
+	return lc
+}
diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_unsupported.go b/pkg/kubelet/kuberuntime/kuberuntime_container_unsupported.go
index 0344b7d..bd2ddd5 100644
--- a/pkg/kubelet/kuberuntime/kuberuntime_container_unsupported.go
+++ b/pkg/kubelet/kuberuntime/kuberuntime_container_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows
+// +build !linux,!windows,!aix
 
 /*
 Copyright 2018 The Kubernetes Authors.
diff --git a/pkg/kubelet/network/cni/cni_aix.go b/pkg/kubelet/network/cni/cni_aix.go
new file mode 100644
index 0000000..0ae09d5
--- /dev/null
+++ b/pkg/kubelet/network/cni/cni_aix.go
@@ -0,0 +1,63 @@
+// +build aix
+
+// This is a copy of cni_windows.go
+
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cni
+
+import (
+	"fmt"
+
+	cniTypes020 "github.com/containernetworking/cni/pkg/types/020"
+	"github.com/golang/glog"
+	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
+	"k8s.io/kubernetes/pkg/kubelet/network"
+)
+
+func getLoNetwork(binDir, vendorDirPrefix string) *cniNetwork {
+	return nil
+}
+
+func (plugin *cniNetworkPlugin) platformInit() error {
+	return nil
+}
+
+// GetPodNetworkStatus : Assuming addToNetwork is idempotent, we can call this API as many times as required to get the IPAddress
+func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
+	netnsPath, err := plugin.host.GetNetNS(id.ID)
+	if err != nil {
+		return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err)
+	}
+
+	result, err := plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath)
+
+	glog.V(5).Infof("GetPodNetworkStatus result %+v", result)
+	if err != nil {
+		glog.Errorf("error while adding to cni network: %s", err)
+		return nil, err
+	}
+
+	// Parse the result and get the IPAddress
+	var result020 *cniTypes020.Result
+	result020, err = cniTypes020.GetResult(result)
+	if err != nil {
+		glog.Errorf("error while cni parsing result: %s", err)
+		return nil, err
+	}
+	return &network.PodNetworkStatus{IP: result020.IP4.IP.IP}, nil
+}
diff --git a/pkg/kubelet/network/cni/cni_others.go b/pkg/kubelet/network/cni/cni_others.go
index 735db9c..de678ce 100644
--- a/pkg/kubelet/network/cni/cni_others.go
+++ b/pkg/kubelet/network/cni/cni_others.go
@@ -1,4 +1,4 @@
-// +build !windows
+// +build !windows,!aix
 
 /*
 Copyright 2017 The Kubernetes Authors.
diff --git a/pkg/kubelet/util/util_aix.go b/pkg/kubelet/util/util_aix.go
new file mode 100644
index 0000000..8f00508
--- /dev/null
+++ b/pkg/kubelet/util/util_aix.go
@@ -0,0 +1,81 @@
+// +build aix
+
+// This is a copy of util_unix.go, except it uses syscall package instead of unsupported golang.org/x/sys/
+
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"time"
+	"syscall"
+
+	"github.com/golang/glog"
+)
+
+const (
+	// unixProtocol is the network protocol of unix socket.
+	unixProtocol = "unix"
+)
+
+func CreateListener(endpoint string) (net.Listener, error) {
+	protocol, addr, err := parseEndpointWithFallbackProtocol(endpoint, unixProtocol)
+	if err != nil {
+		return nil, err
+	}
+	if protocol != unixProtocol {
+		return nil, fmt.Errorf("only support unix socket endpoint")
+	}
+
+	// Unlink to cleanup the previous socket file.
+	err = syscall.Unlink(addr)
+	if err != nil && !os.IsNotExist(err) {
+		return nil, fmt.Errorf("failed to unlink socket file %q: %v", addr, err)
+	}
+
+	return net.Listen(protocol, addr)
+}
+
+func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout time.Duration) (net.Conn, error), error) {
+	protocol, addr, err := parseEndpointWithFallbackProtocol(endpoint, unixProtocol)
+	if err != nil {
+		return "", nil, err
+	}
+	if protocol != unixProtocol {
+		return "", nil, fmt.Errorf("only support unix socket endpoint")
+	}
+
+	return addr, dial, nil
+}
+
+func dial(addr string, timeout time.Duration) (net.Conn, error) {
+	return net.DialTimeout(unixProtocol, addr, timeout)
+}
+
+func parseEndpointWithFallbackProtocol(endpoint string, fallbackProtocol string) (protocol string, addr string, err error) {
+	if protocol, addr, err = parseEndpoint(endpoint); err != nil && protocol == "" {
+		fallbackEndpoint := fallbackProtocol + "://" + endpoint
+		protocol, addr, err = parseEndpoint(fallbackEndpoint)
+		if err == nil {
+			glog.Warningf("Using %q as endpoint is deprecated, please consider using full url format %q.", endpoint, fallbackEndpoint)
+		}
+	}
+	return
+}
diff --git a/pkg/kubelet/util/util_unsupported.go b/pkg/kubelet/util/util_unsupported.go
index 77f14ea..34b7381 100644
--- a/pkg/kubelet/util/util_unsupported.go
+++ b/pkg/kubelet/util/util_unsupported.go
@@ -1,4 +1,4 @@
-// +build !freebsd,!linux,!windows,!darwin
+// +build !freebsd,!linux,!windows,!darwin,!aix
 
 /*
 Copyright 2017 The Kubernetes Authors.
diff --git a/pkg/proxy/userspace/proxier.go b/pkg/proxy/userspace/proxier.go
index 3fc49c0..45df32a 100644
--- a/pkg/proxy/userspace/proxier.go
+++ b/pkg/proxy/userspace/proxier.go
@@ -1,3 +1,5 @@
+// +build !aix
+
 /*
 Copyright 2014 The Kubernetes Authors.
 
diff --git a/pkg/proxy/userspace/proxier_aix.go b/pkg/proxy/userspace/proxier_aix.go
new file mode 100644
index 0000000..c65d7b3
--- /dev/null
+++ b/pkg/proxy/userspace/proxier_aix.go
@@ -0,0 +1,547 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package userspace
+
+import (
+	"bufio"
+	"fmt"
+	"net"
+	"os/exec"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/golang/glog"
+
+	"k8s.io/apimachinery/pkg/types"
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apimachinery/pkg/util/runtime"
+	api "k8s.io/kubernetes/pkg/apis/core"
+	"k8s.io/kubernetes/pkg/apis/core/helper"
+	"k8s.io/kubernetes/pkg/proxy"
+	"k8s.io/kubernetes/pkg/util/iptables"
+)
+
+const (
+	allAvailableInterfaces = ""
+	cmdIfconfig            = "/usr/sbin/ifconfig"
+	vipaInterface          = "vi0"
+)
+
+var ifName string = ""
+
+type portal struct {
+	ip         string
+	port       int
+	isExternal bool
+}
+
+type ServicePortPortalName struct {
+	types.NamespacedName
+	Port         string
+	PortalIPName string
+}
+
+type serviceInfo struct {
+	isAliveAtomic       int32 // Only access this with atomic ops
+	portal              portal
+	protocol            api.Protocol
+	socket              proxySocket
+	timeout             time.Duration
+	activeClients       *clientCache
+	dnsClients          *dnsClientCache
+	sessionAffinityType api.ServiceAffinity
+}
+
+func (info *serviceInfo) setAlive(b bool) {
+	var i int32
+	if b {
+		i = 1
+	}
+	atomic.StoreInt32(&info.isAliveAtomic, i)
+}
+
+func (info *serviceInfo) isAlive() bool {
+	return atomic.LoadInt32(&info.isAliveAtomic) != 0
+}
+
+func logTimeout(err error) bool {
+	if e, ok := err.(net.Error); ok {
+		if e.Timeout() {
+			glog.V(3).Infof("connection to endpoint closed due to inactivity")
+			return true
+		}
+	}
+	return false
+}
+
+// Proxier is a simple proxy for TCP connections between a localhost:lport
+// and services that provide the actual implementations.
+type Proxier struct {
+	loadBalancer   LoadBalancer
+	mu             sync.Mutex // protects serviceMap
+	serviceMap     map[ServicePortPortalName]*serviceInfo
+	syncPeriod     time.Duration
+	udpIdleTimeout time.Duration
+	portMapMutex   sync.Mutex
+	portMap        map[portMapKey]*portMapValue
+	numProxyLoops  int32 // use atomic ops to access this; mostly for testing
+	hostIP         net.IP
+}
+
+// assert Proxier is a ProxyProvider
+var _ proxy.ProxyProvider = &Proxier{}
+
+// A key for the portMap.  The ip has to be a string because slices can't be map
+// keys.
+type portMapKey struct {
+	ip       string
+	port     int
+	protocol api.Protocol
+}
+
+func (k *portMapKey) String() string {
+	return fmt.Sprintf("%s/%s", net.JoinHostPort(k.ip, strconv.Itoa(k.port)), k.protocol)
+}
+
+// A value for the portMap
+type portMapValue struct {
+	owner  ServicePortPortalName
+	socket interface {
+		Close() error
+	}
+}
+
+var (
+	// ErrProxyOnLocalhost is returned by NewProxier if the user requests a proxier on
+	// the loopback address. May be checked for by callers of NewProxier to know whether
+	// the caller provided invalid input.
+	ErrProxyOnLocalhost = fmt.Errorf("cannot proxy on localhost")
+	ErrInterface        = fmt.Errorf("cannot find a valid interface to set service IP")
+)
+
+// Used below.
+var localhostIPv4 = net.ParseIP("127.0.0.1")
+var localhostIPv6 = net.ParseIP("::1")
+
+// NewProxier returns a new Proxier given a LoadBalancer and an address on
+// which to listen. It is assumed that there is only a single Proxier active
+// on a machine. An error will be returned if the proxier cannot be started
+// due to an invalid ListenIP (loopback)
+func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, pr utilnet.PortRange, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) {
+	if listenIP.Equal(localhostIPv4) || listenIP.Equal(localhostIPv6) {
+		return nil, ErrProxyOnLocalhost
+	}
+
+	hostIP, err := utilnet.ChooseHostInterface()
+	if err != nil {
+		return nil, fmt.Errorf("failed to select a host interface: %v", err)
+	}
+
+	glog.V(2).Infof("Setting proxy IP to %v", hostIP)
+	return createProxier(loadBalancer, listenIP, hostIP, syncPeriod, udpIdleTimeout)
+}
+
+func createProxier(loadBalancer LoadBalancer, listenIP net.IP, hostIP net.IP, syncPeriod, udpIdleTimeout time.Duration) (*Proxier, error) {
+	return &Proxier{
+		loadBalancer:   loadBalancer,
+		serviceMap:     make(map[ServicePortPortalName]*serviceInfo),
+		portMap:        make(map[portMapKey]*portMapValue),
+		syncPeriod:     syncPeriod,
+		udpIdleTimeout: udpIdleTimeout,
+		hostIP:         hostIP,
+	}, nil
+}
+
+// Sync is called to immediately synchronize the proxier state
+func (proxier *Proxier) Sync() {
+	proxier.cleanupStaleStickySessions()
+}
+
+// SyncLoop runs periodic work.  This is expected to run as a goroutine or as the main loop of the app.  It does not return.
+func (proxier *Proxier) SyncLoop() {
+	t := time.NewTicker(proxier.syncPeriod)
+	defer t.Stop()
+	for {
+		<-t.C
+		glog.V(6).Infof("Periodic sync")
+		proxier.Sync()
+	}
+}
+
+// cleanupStaleStickySessions cleans up any stale sticky session records in the hash map.
+func (proxier *Proxier) cleanupStaleStickySessions() {
+	proxier.mu.Lock()
+	defer proxier.mu.Unlock()
+	servicePortNameMap := make(map[proxy.ServicePortName]bool)
+	for name := range proxier.serviceMap {
+		servicePortName := proxy.ServicePortName{
+			NamespacedName: types.NamespacedName{
+				Namespace: name.Namespace,
+				Name:      name.Name,
+			},
+			Port: name.Port,
+		}
+		if servicePortNameMap[servicePortName] == false {
+			// ensure cleanup sticky sessions only gets called once per serviceportname
+			servicePortNameMap[servicePortName] = true
+			proxier.loadBalancer.CleanupStaleStickySessions(servicePortName)
+		}
+	}
+}
+
+// This assumes proxier.mu is not locked.
+func (proxier *Proxier) stopProxy(service ServicePortPortalName, info *serviceInfo) error {
+	proxier.mu.Lock()
+	defer proxier.mu.Unlock()
+	return proxier.stopProxyInternal(service, info)
+}
+
+// This assumes proxier.mu is locked.
+func (proxier *Proxier) stopProxyInternal(service ServicePortPortalName, info *serviceInfo) error {
+	delete(proxier.serviceMap, service)
+	info.setAlive(false)
+	err := info.socket.Close()
+	return err
+}
+
+func (proxier *Proxier) getServiceInfo(service ServicePortPortalName) (*serviceInfo, bool) {
+	proxier.mu.Lock()
+	defer proxier.mu.Unlock()
+	info, ok := proxier.serviceMap[service]
+	return info, ok
+}
+
+func (proxier *Proxier) setServiceInfo(service ServicePortPortalName, info *serviceInfo) {
+	proxier.mu.Lock()
+	defer proxier.mu.Unlock()
+	proxier.serviceMap[service] = info
+}
+
+func (proxier *Proxier) ensureIPAddress(serviceIP net.IP) (bool, error) {
+	if ifName == "" {
+		ifName = getInterfaceForServiceIP()
+		if ifName == "" {
+			return false, ErrInterface
+		}
+	}
+	exists, err := checkIPExists(serviceIP.String(), ifName)
+	if err != nil {
+		return false, err
+	} else if exists == true {
+		glog.V(4).Infof("not adding IP address %q as it already exists", serviceIP.String())
+		return true, nil
+	}
+	glog.V(4).Infof("running ifconfig ipv4 add address")
+	out, err := exec.Command(cmdIfconfig, ifName, "inet", serviceIP.String(), "netmask", "255.255.255.255", "alias").CombinedOutput()
+	if err == nil {
+		// Once the IP Address is added, it takes a bit to initialize and show up when querying for it
+		// Query all the IP addresses and see if the one we added is present
+		// PS: We are using netsh interface ipv4 show address here to query all the IP addresses, instead of
+		// querying net.InterfaceAddrs() as it returns the IP address as soon as it is added even though it is uninitialized
+		glog.V(3).Infof("Waiting until IP: %v is added to the network adapter", serviceIP.String())
+		for {
+			if exists, _ := checkIPExists(serviceIP.String(), ifName); exists {
+				break
+			}
+			time.Sleep(500 * time.Millisecond)
+		}
+		return true, nil
+	} else {
+		fmt.Println(out)
+		return false, err
+	}
+	return false, nil
+}
+
+func checkIPExists(ip string, iface string) (bool, error) {
+	output, err := exec.Command(cmdIfconfig, iface).CombinedOutput()
+	if err != nil {
+		return false, err
+	}
+	scanner := bufio.NewScanner(strings.NewReader(string(output)))
+	for scanner.Scan() {
+		strlist := strings.Split(strings.TrimSpace(scanner.Text()), " ")
+		if strlist[0] == "inet" && (strlist[1] == ip || ip == "") {
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+func getInterfaceForServiceIP() string {
+	return vipaInterface
+}
+
+// addServicePortPortal starts listening for a new service, returning the serviceInfo.
+// The timeout only applies to UDP connections, for now.
+func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPortalName, protocol api.Protocol, listenIP string, port int, timeout time.Duration) (*serviceInfo, error) {
+	var serviceIP net.IP
+	if listenIP != allAvailableInterfaces {
+		if serviceIP = net.ParseIP(listenIP); serviceIP == nil {
+			return nil, fmt.Errorf("could not parse ip '%q'", listenIP)
+		}
+		// add the IP address.  Node port binds to all interfaces.
+		if existed, err := proxier.ensureIPAddress(serviceIP); err != nil {
+			return nil, err
+		} else if !existed {
+			glog.V(3).Infof("Added ip address to fowarder interface for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
+		}
+	}
+
+	// add the listener, proxy
+	sock, err := newProxySocket(protocol, serviceIP, port)
+	if err != nil {
+		return nil, err
+	}
+	si := &serviceInfo{
+		isAliveAtomic: 1,
+		portal: portal{
+			ip:         listenIP,
+			port:       port,
+			isExternal: false,
+		},
+		protocol:            protocol,
+		socket:              sock,
+		timeout:             timeout,
+		activeClients:       newClientCache(),
+		dnsClients:          newDNSClientCache(),
+		sessionAffinityType: api.ServiceAffinityNone, // default
+	}
+	proxier.setServiceInfo(servicePortPortalName, si)
+
+	glog.V(2).Infof("Proxying for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol)
+	go func(service ServicePortPortalName, proxier *Proxier) {
+		defer runtime.HandleCrash()
+		atomic.AddInt32(&proxier.numProxyLoops, 1)
+		sock.ProxyLoop(service, si, proxier)
+		atomic.AddInt32(&proxier.numProxyLoops, -1)
+	}(servicePortPortalName, proxier)
+
+	return si, nil
+}
+
+func deleteIPAddress(serviceIP net.IP) error {
+	out, err := exec.Command(cmdIfconfig, ifName, "inet", serviceIP.String(), "delete").CombinedOutput()
+	if err == nil && ifName == vipaInterface {
+		// if the last address on the vipa interface is removed, remove the vipa also
+		if exists, _ := checkIPExists("", ifName); !exists {
+			glog.V(4).Infof("Removing vipa interface ...")
+			out, err := exec.Command("/usr/sbin/ifconfig", vipaInterface, "detach").CombinedOutput()
+			if err != nil {
+				glog.V(4).Infof("Removing vipa interface failed: %s", string(out))
+			}
+			ifName = ""
+		}
+		return nil
+	}
+	return fmt.Errorf("error deleting ipv4 address: %v: %s", err, out)
+}
+
+func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) {
+	return false
+}
+
+func (proxier *Proxier) closeServicePortPortal(servicePortPortalName ServicePortPortalName, info *serviceInfo) error {
+	// turn off the proxy
+	if err := proxier.stopProxy(servicePortPortalName, info); err != nil {
+		return err
+	}
+
+	// close the PortalProxy by deleting the service IP address
+	if info.portal.ip != allAvailableInterfaces {
+		serviceIP := net.ParseIP(info.portal.ip)
+		if err := deleteIPAddress(serviceIP); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// getListenIPPortMap returns a slice of all listen IPs for a service.
+func getListenIPPortMap(service *api.Service, listenPort int, nodePort int) map[string]int {
+	listenIPPortMap := make(map[string]int)
+	listenIPPortMap[service.Spec.ClusterIP] = listenPort
+
+	for _, ip := range service.Spec.ExternalIPs {
+		listenIPPortMap[ip] = listenPort
+	}
+
+	for _, ingress := range service.Status.LoadBalancer.Ingress {
+		listenIPPortMap[ingress.IP] = listenPort
+	}
+
+	if nodePort != 0 {
+		listenIPPortMap[allAvailableInterfaces] = nodePort
+	}
+
+	return listenIPPortMap
+}
+
+func (proxier *Proxier) mergeService(service *api.Service) map[ServicePortPortalName]bool {
+	if service == nil {
+		return nil
+	}
+	svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
+	if !helper.IsServiceIPSet(service) {
+		glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
+		return nil
+	}
+	existingPortPortals := make(map[ServicePortPortalName]bool)
+
+	for i := range service.Spec.Ports {
+		servicePort := &service.Spec.Ports[i]
+		// create a slice of all the source IPs to use for service port portals
+		listenIPPortMap := getListenIPPortMap(service, int(servicePort.Port), int(servicePort.NodePort))
+		protocol := servicePort.Protocol
+
+		for listenIP, listenPort := range listenIPPortMap {
+			servicePortPortalName := ServicePortPortalName{
+				NamespacedName: svcName,
+				Port:           servicePort.Name,
+				PortalIPName:   listenIP,
+			}
+			existingPortPortals[servicePortPortalName] = true
+			info, exists := proxier.getServiceInfo(servicePortPortalName)
+			if exists && sameConfig(info, service, protocol, listenPort) {
+				// Nothing changed.
+				continue
+			}
+			if exists {
+				glog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName)
+				if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
+					glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
+				}
+			}
+			glog.V(1).Infof("Adding new service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), protocol)
+			info, err := proxier.addServicePortPortal(servicePortPortalName, protocol, listenIP, listenPort, proxier.udpIdleTimeout)
+			if err != nil {
+				glog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err)
+				continue
+			}
+			info.sessionAffinityType = service.Spec.SessionAffinity
+			glog.V(10).Infof("info: %#v", info)
+		}
+		if len(listenIPPortMap) > 0 {
+			// only one loadbalancer per service port portal
+			servicePortName := proxy.ServicePortName{
+				NamespacedName: types.NamespacedName{
+					Namespace: service.Namespace,
+					Name:      service.Name,
+				},
+				Port: servicePort.Name,
+			}
+			timeoutSeconds := 0
+			if service.Spec.SessionAffinity == api.ServiceAffinityClientIP {
+				timeoutSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
+			}
+			proxier.loadBalancer.NewService(servicePortName, service.Spec.SessionAffinity, timeoutSeconds)
+		}
+	}
+
+	return existingPortPortals
+}
+
+func (proxier *Proxier) unmergeService(service *api.Service, existingPortPortals map[ServicePortPortalName]bool) {
+	if service == nil {
+		return
+	}
+	svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
+	if !helper.IsServiceIPSet(service) {
+		glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP)
+		return
+	}
+
+	servicePortNameMap := make(map[proxy.ServicePortName]bool)
+	for name := range existingPortPortals {
+		servicePortName := proxy.ServicePortName{
+			NamespacedName: types.NamespacedName{
+				Namespace: name.Namespace,
+				Name:      name.Name,
+			},
+			Port: name.Port,
+		}
+		servicePortNameMap[servicePortName] = true
+	}
+
+	for i := range service.Spec.Ports {
+		servicePort := &service.Spec.Ports[i]
+		serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name}
+		// create a slice of all the source IPs to use for service port portals
+		listenIPPortMap := getListenIPPortMap(service, int(servicePort.Port), int(servicePort.NodePort))
+
+		for listenIP := range listenIPPortMap {
+			servicePortPortalName := ServicePortPortalName{
+				NamespacedName: svcName,
+				Port:           servicePort.Name,
+				PortalIPName:   listenIP,
+			}
+			if existingPortPortals[servicePortPortalName] {
+				continue
+			}
+
+			glog.V(1).Infof("Stopping service %q", servicePortPortalName)
+			info, exists := proxier.getServiceInfo(servicePortPortalName)
+			if !exists {
+				glog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName)
+				continue
+			}
+
+			if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil {
+				glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err)
+			}
+		}
+
+		// Only delete load balancer if all listen ips per name/port show inactive.
+		if !servicePortNameMap[serviceName] {
+			proxier.loadBalancer.DeleteService(serviceName)
+		}
+	}
+}
+
+func (proxier *Proxier) OnServiceAdd(service *api.Service) {
+	_ = proxier.mergeService(service)
+}
+
+func (proxier *Proxier) OnServiceUpdate(oldService, service *api.Service) {
+	existingPortPortals := proxier.mergeService(service)
+	proxier.unmergeService(oldService, existingPortPortals)
+}
+
+func (proxier *Proxier) OnServiceDelete(service *api.Service) {
+	proxier.unmergeService(service, map[ServicePortPortalName]bool{})
+}
+
+func (proxier *Proxier) OnServiceSynced() {
+}
+
+func sameConfig(info *serviceInfo, service *api.Service, protocol api.Protocol, listenPort int) bool {
+	return info.protocol == protocol && info.portal.port == listenPort && info.sessionAffinityType == service.Spec.SessionAffinity
+}
+
+func isTooManyFDsError(err error) bool {
+	return strings.Contains(err.Error(), "too many open files")
+}
+
+func isClosedError(err error) bool {
+	// A brief discussion about handling closed error here:
+	// https://code.google.com/p/go/issues/detail?id=4373#c14
+	// TODO: maybe create a stoppable TCP listener that returns a StoppedError
+	return strings.HasSuffix(err.Error(), "use of closed network connection")
+}
diff --git a/pkg/proxy/userspace/proxysocket.go b/pkg/proxy/userspace/proxysocket.go
index 207004b..4469219 100644
--- a/pkg/proxy/userspace/proxysocket.go
+++ b/pkg/proxy/userspace/proxysocket.go
@@ -1,3 +1,5 @@
+// +build !aix
+
 /*
 Copyright 2015 The Kubernetes Authors.
 
diff --git a/pkg/proxy/userspace/proxysocket_aix.go b/pkg/proxy/userspace/proxysocket_aix.go
new file mode 100644
index 0000000..16ab376
--- /dev/null
+++ b/pkg/proxy/userspace/proxysocket_aix.go
@@ -0,0 +1,649 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package userspace
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/golang/glog"
+	"github.com/miekg/dns"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/runtime"
+	api "k8s.io/kubernetes/pkg/apis/core"
+	"k8s.io/kubernetes/pkg/proxy"
+)
+
+const (
+	// Kubernetes DNS suffix search list
+	// TODO: Get DNS suffix search list from docker containers.
+	// --dns-search option doesn't work on Windows containers and has been
+	// fixed recently in docker.
+
+	// Kubernetes cluster domain
+	clusterDomain = "cluster.local"
+
+	// Kubernetes service domain
+	serviceDomain = "svc." + clusterDomain
+
+	// Kubernetes default namespace domain
+	namespaceServiceDomain = "default." + serviceDomain
+
+	// Kubernetes DNS service port name
+	dnsPortName = "dns"
+
+	// DNS TYPE value A (a host address)
+	dnsTypeA uint16 = 0x01
+
+	// DNS TYPE value AAAA (a host IPv6 address)
+	dnsTypeAAAA uint16 = 0x1c
+
+	// DNS CLASS value IN (the Internet)
+	dnsClassInternet uint16 = 0x01
+)
+
+// Abstraction over TCP/UDP sockets which are proxied.
+type proxySocket interface {
+	// Addr gets the net.Addr for a proxySocket.
+	Addr() net.Addr
+	// Close stops the proxySocket from accepting incoming connections.
+	// Each implementation should comment on the impact of calling Close
+	// while sessions are active.
+	Close() error
+	// ProxyLoop proxies incoming connections for the specified service to the service endpoints.
+	ProxyLoop(service ServicePortPortalName, info *serviceInfo, proxier *Proxier)
+	// ListenPort returns the host port that the proxySocket is listening on
+	ListenPort() int
+}
+
+func newProxySocket(protocol api.Protocol, ip net.IP, port int) (proxySocket, error) {
+	host := ""
+	if ip != nil {
+		host = ip.String()
+	}
+
+	switch strings.ToUpper(string(protocol)) {
+	case "TCP":
+		listener, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port)))
+		if err != nil {
+			return nil, err
+		}
+		return &tcpProxySocket{Listener: listener, port: port}, nil
+	case "UDP":
+		addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, strconv.Itoa(port)))
+		if err != nil {
+			return nil, err
+		}
+		conn, err := net.ListenUDP("udp", addr)
+		if err != nil {
+			return nil, err
+		}
+		return &udpProxySocket{UDPConn: conn, port: port}, nil
+	}
+	return nil, fmt.Errorf("unknown protocol %q", protocol)
+}
+
+// How long we wait for a connection to a backend in seconds
+var endpointDialTimeout = []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second, 2 * time.Second}
+
+// tcpProxySocket implements proxySocket.  Close() is implemented by net.Listener.  When Close() is called,
+// no new connections are allowed but existing connections are left untouched.
+type tcpProxySocket struct {
+	net.Listener
+	port int
+}
+
+func (tcp *tcpProxySocket) ListenPort() int {
+	return tcp.port
+}
+
+func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string, proxier *Proxier) (out net.Conn, err error) {
+	sessionAffinityReset := false
+	for _, dialTimeout := range endpointDialTimeout {
+		servicePortName := proxy.ServicePortName{
+			NamespacedName: types.NamespacedName{
+				Namespace: service.Namespace,
+				Name:      service.Name,
+			},
+			Port: service.Port,
+		}
+		endpoint, err := proxier.loadBalancer.NextEndpoint(servicePortName, srcAddr, sessionAffinityReset)
+		if err != nil {
+			glog.Errorf("Couldn't find an endpoint for %s: %v", service, err)
+			return nil, err
+		}
+		glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint)
+		// TODO: This could spin up a new goroutine to make the outbound connection,
+		// and keep accepting inbound traffic.
+		outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout)
+		if err != nil {
+			if isTooManyFDsError(err) {
+				panic("Dial failed: " + err.Error())
+			}
+			glog.Errorf("Dial failed: %v", err)
+			sessionAffinityReset = true
+			continue
+		}
+		return outConn, nil
+	}
+	return nil, fmt.Errorf("failed to connect to an endpoint.")
+}
+
+func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serviceInfo, proxier *Proxier) {
+	for {
+		if !myInfo.isAlive() {
+			// The service port was closed or replaced.
+			return
+		}
+		// Block until a connection is made.
+		inConn, err := tcp.Accept()
+		if err != nil {
+			if isTooManyFDsError(err) {
+				panic("Accept failed: " + err.Error())
+			}
+
+			if isClosedError(err) {
+				return
+			}
+			if !myInfo.isAlive() {
+				// Then the service port was just closed so the accept failure is to be expected.
+				return
+			}
+			glog.Errorf("Accept failed: %v", err)
+			continue
+		}
+		glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr())
+		outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier)
+		if err != nil {
+			glog.Errorf("Failed to connect to balancer: %v", err)
+			inConn.Close()
+			continue
+		}
+		// Spin up an async copy loop.
+		go proxyTCP(inConn.(*net.TCPConn), outConn.(*net.TCPConn))
+	}
+}
+
+// proxyTCP proxies data bi-directionally between in and out.
+func proxyTCP(in, out *net.TCPConn) {
+	var wg sync.WaitGroup
+	wg.Add(2)
+	glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v",
+		in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr())
+	go copyBytes("from backend", in, out, &wg)
+	go copyBytes("to backend", out, in, &wg)
+	wg.Wait()
+}
+
+func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) {
+	defer wg.Done()
+	glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr())
+	n, err := io.Copy(dest, src)
+	if err != nil {
+		if !isClosedError(err) {
+			glog.Errorf("I/O error: %v", err)
+		}
+	}
+	glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr())
+	dest.Close()
+	src.Close()
+}
+
+// udpProxySocket implements proxySocket.  Close() is implemented by net.UDPConn.  When Close() is called,
+// no new connections are allowed and existing connections are broken.
+// TODO: We could lame-duck this ourselves, if it becomes important.
+type udpProxySocket struct {
+	*net.UDPConn
+	port int
+}
+
+func (udp *udpProxySocket) ListenPort() int {
+	return udp.port
+}
+
+func (udp *udpProxySocket) Addr() net.Addr {
+	return udp.LocalAddr()
+}
+
+// Holds all the known UDP clients that have not timed out.
+type clientCache struct {
+	mu      sync.Mutex
+	clients map[string]net.Conn // addr string -> connection
+}
+
+func newClientCache() *clientCache {
+	return &clientCache{clients: map[string]net.Conn{}}
+}
+
+// DNS query client classified by address and QTYPE
+type dnsClientQuery struct {
+	clientAddress string
+	dnsQType      uint16
+}
+
+// Holds DNS client query, the value contains the index in DNS suffix search list,
+// the original DNS message and length for the same client and QTYPE
+type dnsClientCache struct {
+	mu      sync.Mutex
+	clients map[dnsClientQuery]*dnsQueryState
+}
+
+type dnsQueryState struct {
+	searchIndex int32
+	msg         *dns.Msg
+}
+
+func newDNSClientCache() *dnsClientCache {
+	return &dnsClientCache{clients: map[dnsClientQuery]*dnsQueryState{}}
+}
+
+func packetRequiresDNSSuffix(dnsType, dnsClass uint16) bool {
+	return (dnsType == dnsTypeA || dnsType == dnsTypeAAAA) && dnsClass == dnsClassInternet
+}
+
+func isDNSService(portName string) bool {
+	return portName == dnsPortName
+}
+
+func appendDNSSuffix(msg *dns.Msg, buffer []byte, length int, dnsSuffix string) (int, error) {
+	if msg == nil || len(msg.Question) == 0 {
+		return length, fmt.Errorf("DNS message parameter is invalid")
+	}
+
+	// Save the original name since it will be reused for next iteration
+	origName := msg.Question[0].Name
+	if dnsSuffix != "" {
+		msg.Question[0].Name += dnsSuffix + "."
+	}
+	mbuf, err := msg.PackBuffer(buffer)
+	msg.Question[0].Name = origName
+
+	if err != nil {
+		glog.Warningf("Unable to pack DNS packet. Error is: %v", err)
+		return length, err
+	}
+
+	if &buffer[0] != &mbuf[0] {
+		return length, fmt.Errorf("Buffer is too small in packing DNS packet")
+	}
+
+	return len(mbuf), nil
+}
+
+func recoverDNSQuestion(origName string, msg *dns.Msg, buffer []byte, length int) (int, error) {
+	if msg == nil || len(msg.Question) == 0 {
+		return length, fmt.Errorf("DNS message parameter is invalid")
+	}
+
+	if origName == msg.Question[0].Name {
+		return length, nil
+	}
+
+	msg.Question[0].Name = origName
+	if len(msg.Answer) > 0 {
+		msg.Answer[0].Header().Name = origName
+	}
+	mbuf, err := msg.PackBuffer(buffer)
+
+	if err != nil {
+		glog.Warningf("Unable to pack DNS packet. Error is: %v", err)
+		return length, err
+	}
+
+	if &buffer[0] != &mbuf[0] {
+		return length, fmt.Errorf("Buffer is too small in packing DNS packet")
+	}
+
+	return len(mbuf), nil
+}
+
+func processUnpackedDNSQueryPacket(
+	dnsClients *dnsClientCache,
+	msg *dns.Msg,
+	host string,
+	dnsQType uint16,
+	buffer []byte,
+	length int,
+	dnsSearch []string) int {
+	if dnsSearch == nil || len(dnsSearch) == 0 {
+		glog.V(1).Infof("DNS search list is not initialized and is empty.")
+		return length
+	}
+
+	// TODO: handle concurrent queries from a client
+	dnsClients.mu.Lock()
+	state, found := dnsClients.clients[dnsClientQuery{host, dnsQType}]
+	if !found {
+		state = &dnsQueryState{0, msg}
+		dnsClients.clients[dnsClientQuery{host, dnsQType}] = state
+	}
+	dnsClients.mu.Unlock()
+
+	index := atomic.SwapInt32(&state.searchIndex, state.searchIndex+1)
+	// Also update message ID if the client retries due to previous query time out
+	state.msg.MsgHdr.Id = msg.MsgHdr.Id
+
+	if index < 0 || index >= int32(len(dnsSearch)) {
+		glog.V(1).Infof("Search index %d is out of range.", index)
+		return length
+	}
+
+	length, err := appendDNSSuffix(msg, buffer, length, dnsSearch[index])
+	if err != nil {
+		glog.Errorf("Append DNS suffix failed: %v", err)
+	}
+
+	return length
+}
+
+func processUnpackedDNSResponsePacket(
+	svrConn net.Conn,
+	dnsClients *dnsClientCache,
+	msg *dns.Msg,
+	rcode int,
+	host string,
+	dnsQType uint16,
+	buffer []byte,
+	length int,
+	dnsSearch []string) (bool, int) {
+	var drop bool
+	var err error
+	if dnsSearch == nil || len(dnsSearch) == 0 {
+		glog.V(1).Infof("DNS search list is not initialized and is empty.")
+		return drop, length
+	}
+
+	dnsClients.mu.Lock()
+	state, found := dnsClients.clients[dnsClientQuery{host, dnsQType}]
+	dnsClients.mu.Unlock()
+
+	if found {
+		index := atomic.SwapInt32(&state.searchIndex, state.searchIndex+1)
+		if rcode != 0 && index >= 0 && index < int32(len(dnsSearch)) {
+			// If the response has failure and iteration through the search list has not
+			// reached the end, retry on behalf of the client using the original query message
+			drop = true
+			length, err = appendDNSSuffix(state.msg, buffer, length, dnsSearch[index])
+			if err != nil {
+				glog.Errorf("Append DNS suffix failed: %v", err)
+			}
+
+			_, err = svrConn.Write(buffer[0:length])
+			if err != nil {
+				if !logTimeout(err) {
+					glog.Errorf("Write failed: %v", err)
+				}
+			}
+		} else {
+			length, err = recoverDNSQuestion(state.msg.Question[0].Name, msg, buffer, length)
+			if err != nil {
+				glog.Errorf("Recover DNS question failed: %v", err)
+			}
+
+			dnsClients.mu.Lock()
+			delete(dnsClients.clients, dnsClientQuery{host, dnsQType})
+			dnsClients.mu.Unlock()
+		}
+	}
+
+	return drop, length
+}
+
+func processDNSQueryPacket(
+	dnsClients *dnsClientCache,
+	cliAddr net.Addr,
+	buffer []byte,
+	length int,
+	dnsSearch []string) (int, error) {
+	msg := &dns.Msg{}
+	if err := msg.Unpack(buffer[:length]); err != nil {
+		glog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
+		return length, err
+	}
+
+	// Query - Response bit that specifies whether this message is a query (0) or a response (1).
+	if msg.MsgHdr.Response == true {
+		return length, fmt.Errorf("DNS packet should be a query message")
+	}
+
+	// QDCOUNT
+	if len(msg.Question) != 1 {
+		glog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question))
+		glog.V(1).Infof("DNS suffix appending does not support more than one question.")
+		return length, nil
+	}
+
+	// ANCOUNT, NSCOUNT, ARCOUNT
+	if len(msg.Answer) != 0 || len(msg.Ns) != 0 || len(msg.Extra) != 0 {
+		glog.V(1).Infof("DNS packet contains more than question section.")
+		return length, nil
+	}
+
+	dnsQType := msg.Question[0].Qtype
+	dnsQClass := msg.Question[0].Qclass
+	if packetRequiresDNSSuffix(dnsQType, dnsQClass) {
+		host, _, err := net.SplitHostPort(cliAddr.String())
+		if err != nil {
+			glog.V(1).Infof("Failed to get host from client address: %v", err)
+			host = cliAddr.String()
+		}
+
+		length = processUnpackedDNSQueryPacket(dnsClients, msg, host, dnsQType, buffer, length, dnsSearch)
+	}
+
+	return length, nil
+}
+
+func processDNSResponsePacket(
+	svrConn net.Conn,
+	dnsClients *dnsClientCache,
+	cliAddr net.Addr,
+	buffer []byte,
+	length int,
+	dnsSearch []string) (bool, int, error) {
+	var drop bool
+	msg := &dns.Msg{}
+	if err := msg.Unpack(buffer[:length]); err != nil {
+		glog.Warningf("Unable to unpack DNS packet. Error is: %v", err)
+		return drop, length, err
+	}
+
+	// Query - Response bit that specifies whether this message is a query (0) or a response (1).
+	if msg.MsgHdr.Response == false {
+		return drop, length, fmt.Errorf("DNS packet should be a response message")
+	}
+
+	// QDCOUNT
+	if len(msg.Question) != 1 {
+		glog.V(1).Infof("Number of entries in the response section of the DNS packet is: %d", len(msg.Answer))
+		return drop, length, nil
+	}
+
+	dnsQType := msg.Question[0].Qtype
+	dnsQClass := msg.Question[0].Qclass
+	if packetRequiresDNSSuffix(dnsQType, dnsQClass) {
+		host, _, err := net.SplitHostPort(cliAddr.String())
+		if err != nil {
+			glog.V(1).Infof("Failed to get host from client address: %v", err)
+			host = cliAddr.String()
+		}
+
+		drop, length = processUnpackedDNSResponsePacket(svrConn, dnsClients, msg, msg.MsgHdr.Rcode, host, dnsQType, buffer, length, dnsSearch)
+	}
+
+	return drop, length, nil
+}
+
+func getDnsSuffixSearchList() ([]string, error) {
+	data, err := ioutil.ReadFile("/etc/resolv.conf")
+	suffixList := []string{}
+	if err != nil {
+		return suffixList, err
+	}
+	scanner := bufio.NewScanner(strings.NewReader(string(data)))
+	for scanner.Scan() {
+		lst := strings.Split(strings.TrimSpace(scanner.Text()), " ")
+		if lst[0] == "search" {
+			for _, str := range lst[1:] {
+				suffixList = append(suffixList, strings.TrimSpace(str))
+			}
+			return suffixList, nil
+		}
+	}
+	return suffixList, nil
+}
+
+func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serviceInfo, proxier *Proxier) {
+	var buffer [4096]byte // 4KiB should be enough for most whole-packets
+	var dnsSearch []string
+	if isDNSService(service.Port) {
+		dnsSearch = []string{"", namespaceServiceDomain, serviceDomain, clusterDomain}
+		suffixList, err := getDnsSuffixSearchList()
+		if err == nil {
+			for _, suffix := range suffixList {
+				dnsSearch = append(dnsSearch, suffix)
+			}
+		}
+	}
+
+	for {
+		if !myInfo.isAlive() {
+			// The service port was closed or replaced.
+			break
+		}
+
+		// Block until data arrives.
+		// TODO: Accumulate a histogram of n or something, to fine tune the buffer size.
+		n, cliAddr, err := udp.ReadFrom(buffer[0:])
+		if err != nil {
+			if e, ok := err.(net.Error); ok {
+				if e.Temporary() {
+					glog.V(1).Infof("ReadFrom had a temporary failure: %v", err)
+					continue
+				}
+			}
+			glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err)
+			break
+		}
+
+		// If this is DNS query packet
+		if isDNSService(service.Port) {
+			n, err = processDNSQueryPacket(myInfo.dnsClients, cliAddr, buffer[:], n, dnsSearch)
+			if err != nil {
+				glog.Errorf("Process DNS query packet failed: %v", err)
+			}
+		}
+
+		// If this is a client we know already, reuse the connection and goroutine.
+		svrConn, err := udp.getBackendConn(myInfo.activeClients, myInfo.dnsClients, cliAddr, proxier, service, myInfo.timeout, dnsSearch)
+		if err != nil {
+			continue
+		}
+		// TODO: It would be nice to let the goroutine handle this write, but we don't
+		// really want to copy the buffer.  We could do a pool of buffers or something.
+		_, err = svrConn.Write(buffer[0:n])
+		if err != nil {
+			if !logTimeout(err) {
+				glog.Errorf("Write failed: %v", err)
+				// TODO: Maybe tear down the goroutine for this client/server pair?
+			}
+			continue
+		}
+		err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout))
+		if err != nil {
+			glog.Errorf("SetDeadline failed: %v", err)
+			continue
+		}
+	}
+}
+
+func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, dnsClients *dnsClientCache, cliAddr net.Addr, proxier *Proxier, service ServicePortPortalName, timeout time.Duration, dnsSearch []string) (net.Conn, error) {
+	activeClients.mu.Lock()
+	defer activeClients.mu.Unlock()
+
+	svrConn, found := activeClients.clients[cliAddr.String()]
+	if !found {
+		// TODO: This could spin up a new goroutine to make the outbound connection,
+		// and keep accepting inbound traffic.
+		glog.V(3).Infof("New UDP connection from %s", cliAddr)
+		var err error
+		svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
+		if err != nil {
+			return nil, err
+		}
+		if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
+			glog.Errorf("SetDeadline failed: %v", err)
+			return nil, err
+		}
+		activeClients.clients[cliAddr.String()] = svrConn
+		go func(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, dnsClients *dnsClientCache, service ServicePortPortalName, timeout time.Duration, dnsSearch []string) {
+			defer runtime.HandleCrash()
+			udp.proxyClient(cliAddr, svrConn, activeClients, dnsClients, service, timeout, dnsSearch)
+		}(cliAddr, svrConn, activeClients, dnsClients, service, timeout, dnsSearch)
+	}
+	return svrConn, nil
+}
+
+// This function is expected to be called as a goroutine.
+// TODO: Track and log bytes copied, like TCP
+func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, dnsClients *dnsClientCache, service ServicePortPortalName, timeout time.Duration, dnsSearch []string) {
+	defer svrConn.Close()
+	var buffer [4096]byte
+	for {
+		n, err := svrConn.Read(buffer[0:])
+		if err != nil {
+			if !logTimeout(err) {
+				glog.Errorf("Read failed: %v", err)
+			}
+			break
+		}
+
+		drop := false
+		if isDNSService(service.Port) {
+			drop, n, err = processDNSResponsePacket(svrConn, dnsClients, cliAddr, buffer[:], n, dnsSearch)
+			if err != nil {
+				glog.Errorf("Process DNS response packet failed: %v", err)
+			}
+		}
+
+		if !drop {
+			err = svrConn.SetDeadline(time.Now().Add(timeout))
+			if err != nil {
+				glog.Errorf("SetDeadline failed: %v", err)
+				break
+			}
+			n, err = udp.WriteTo(buffer[0:n], cliAddr)
+			if err != nil {
+				if !logTimeout(err) {
+					glog.Errorf("WriteTo failed: %v", err)
+				}
+				break
+			}
+		}
+	}
+	activeClients.mu.Lock()
+	delete(activeClients.clients, cliAddr.String())
+	activeClients.mu.Unlock()
+}
diff --git a/pkg/proxy/userspace/rlimit.go b/pkg/proxy/userspace/rlimit.go
index f8f120d..b948fa8 100644
--- a/pkg/proxy/userspace/rlimit.go
+++ b/pkg/proxy/userspace/rlimit.go
@@ -1,4 +1,4 @@
-// +build !windows
+// +build !windows,!aix
 
 /*
 Copyright 2015 The Kubernetes Authors.
diff --git a/pkg/proxy/userspace/rlimit_aix.go b/pkg/proxy/userspace/rlimit_aix.go
new file mode 100644
index 0000000..8c6a9cb
--- /dev/null
+++ b/pkg/proxy/userspace/rlimit_aix.go
@@ -0,0 +1,27 @@
+// +build aix
+
+// This is a copy of rlimit.go, except it uses syscall package instead of unix
+
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package userspace
+
+import "syscall"
+
+func setRLimit(limit uint64) error {
+	return syscall.Setrlimit(syscall.RLIMIT_NOFILE, &syscall.Rlimit{Max: limit, Cur: limit})
+}
diff --git a/pkg/util/mount/mount_aix.go b/pkg/util/mount/mount_aix.go
new file mode 100644
index 0000000..9124ac9
--- /dev/null
+++ b/pkg/util/mount/mount_aix.go
@@ -0,0 +1,675 @@
+// +build aix
+
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mount
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"sync"
+	"syscall"
+
+	"github.com/golang/glog"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/util/uuid"
+)
+
+const (
+	// Logical volume prefix assigned by kubelet
+	LvPrefix = "k8s_lv"
+	vgPrefix = "k8s_vg"
+	// 512MB worth of ram disk size, for mkramdisk command
+	ramDiskSize = "1024000"
+	ramDiskRoot = "/var/lib/kubelet"
+)
+
+const (
+	// 'fsck' found errors and corrected them
+	fsckErrorsCorrected = 1
+	// 'fsck' found errors but exited without correcting them
+	fsckErrorsUncorrected = 4
+)
+
+// Mounter provides the default implementation of mount.Interface
+// for the linux platform.  This implementation assumes that the
+// kubelet is running in the host's root mount namespace.
+type Mounter struct {
+	mounterPath  string
+	ramDiskMntpt string
+	ramDiskMap   map[string]string
+	mu           sync.RWMutex
+}
+
+// New returns a mount.Interface for the current system.
+// It provides options to override the default mounter behavior.
+// mounterPath allows using an alternative to `/bin/mount` for mounting.
+func New(mounterPath string) Interface {
+	return &Mounter{
+		mounterPath:  mounterPath,
+		ramDiskMntpt: "",
+		ramDiskMap:   make(map[string]string),
+	}
+}
+
+// Mount mounts source to target as fstype with given options. 'source' and 'fstype' must
+// be an emtpy string in case it's not required, e.g. for remount, or for auto filesystem
+// type, where kernel handles fs type for you. The mount 'options' is a list of options,
+// currently come from mount(8), e.g. "ro", "remount", "bind", etc. If no more option is
+// required, call Mount with an empty string list or nil.
+func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {
+	// Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty.
+	mounterPath := ""
+
+	// AIX memory resident filesystem is called RAMdisk. tmpfs mounts will share space on a single ramdisk.
+	// This is because ramdisks are limited (max 64), so we may have more tmpfs requests than ramdisks.
+	// Each object needs a separate, isolated dir on the mounted ramdisk fs, so we create
+	// and mount that dir on target dir via namefs.
+	if fstype == "tmpfs" {
+		mounter.mu.Lock()
+		if mounter.ramDiskMntpt == "" {
+			// no RamDisk configured for kubelet, need to create one
+			var err error
+			mounter.ramDiskMntpt, err = createAndMountRamDisk()
+			if err != nil {
+				mounter.mu.Unlock()
+				glog.Errorf("Mount failed: could not configure/mount ram disk")
+				return err
+			}
+		}
+		mntDir := filepath.Join(mounter.ramDiskMntpt, string(uuid.NewUUID()))
+		mounter.mu.Unlock()
+		err := os.MkdirAll(mntDir, os.ModePerm)
+		if err != nil {
+			glog.Errorf("Mount failed: could not create directory on ram disk")
+			return err
+		}
+		source = mntDir
+	}
+
+	// AIX doesn't support bind mounts. So we discard bind option. Mounting one dir
+	// on another dir is called "namefs" on AIX, this is roughly similar to linux bind mounts.
+	bind, bindRemountOpts := isBindAix(options)
+	if bind {
+		glog.V(4).Infof("AIX: Ignoring bind option on mount command")
+		return doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts)
+	}
+	// The list of filesystems that require containerized mounter on GCI image cluster
+	fsTypesNeedMounter := sets.NewString("nfs", "glusterfs", "ceph", "cifs")
+	if fsTypesNeedMounter.Has(fstype) {
+		mounterPath = mounter.mounterPath
+	}
+	err := doMount(mounterPath, defaultMountCommand, source, target, fstype, options)
+	if err == nil && fstype == "tmpfs" {
+		mounter.mu.Lock()
+		mounter.ramDiskMap[target] = source
+		mounter.mu.Unlock()
+	}
+	return err
+}
+
+// isBind detects whether a bind mount is being requested and strips the bind option
+// use in case of bind mount, due to the fact that AIX doesn't support bind option.
+// The list equals:
+//   options - 'bind' + 'remount' (no duplicate)
+func isBindAix(options []string) (bool, []string) {
+	bindRemountOpts := []string{}
+	bind := false
+
+	if len(options) != 0 {
+		for _, option := range options {
+			switch option {
+			case "bind":
+				bind = true
+				break
+			default:
+				bindRemountOpts = append(bindRemountOpts, option)
+			}
+		}
+	}
+
+	return bind, bindRemountOpts
+}
+
+func getMountedRamDisk() string {
+	return ""
+}
+
+// configureRamDisk creates and mounts new ramdisk device for use by kubelet
+// any tmpfs mount request will end up on that device.
+func createAndMountRamDisk() (string, error) {
+	// In case we already have a ramDisk mounted on /var/lib/kubelet/ramdiskXX, reuse it
+	dfOut, dfErr := exec.Command("df", "-F", "%m").CombinedOutput()
+	if dfErr == nil {
+		scanner := bufio.NewScanner(strings.NewReader(string(dfOut)))
+		for scanner.Scan() {
+			fld := strings.Fields(scanner.Text())
+			if len(fld) > 0 && strings.HasPrefix(fld[0], "/dev/ramdisk") {
+				path := fld[len(fld)-1]
+				dir, disk := filepath.Split(path)
+				_, disk0 := filepath.Split(fld[0])
+				if filepath.Clean(dir) == ramDiskRoot && disk == disk0 {
+					return path, nil
+				}
+			}
+		}
+	}
+
+	mkOut, mkErr := exec.Command("mkramdisk", ramDiskSize).CombinedOutput()
+	if mkErr != nil {
+		return "", fmt.Errorf("could not create ramdisk %v - %s", mkErr, mkOut)
+	}
+	rramdisk := strings.TrimSpace(string(mkOut))
+	ramDisk := rramdisk[strings.Index(rramdisk, "ramdisk"):]
+	ramDev := "/dev/" + ramDisk
+	mkOut, mkErr = exec.Command("mkfs", "-V", "jfs2", "-o", "log=INLINE", ramDev).CombinedOutput()
+	if mkErr != nil {
+		return "", fmt.Errorf("could not mkfs on ramdisk %v - %s", mkErr, mkOut)
+	}
+	ramDiskMntpt := filepath.Join(ramDiskRoot, ramDisk)
+	os.MkdirAll(ramDiskMntpt, os.ModePerm)
+	opts := []string{"log=" + ramDev}
+	err := doMount("", defaultMountCommand, ramDev, ramDiskMntpt, "tmpfs", opts)
+	if err != nil {
+		glog.Errorf("Mount failed: could not mount ram disk: %s\n", ramDev)
+		return "", err
+	}
+	return ramDiskMntpt, nil
+}
+
+// doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used.
+func doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string) error {
+	mountArgs := makeMountArgs(source, target, fstype, options)
+	if len(mounterPath) > 0 {
+		mountArgs = append([]string{mountCmd}, mountArgs...)
+		mountCmd = mounterPath
+	}
+
+	glog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs)
+	command := exec.Command(mountCmd, mountArgs...)
+	output, err := command.CombinedOutput()
+	if err != nil {
+		glog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n", err, mountCmd, source, target, fstype, options, string(output))
+		return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n",
+			err, mountCmd, source, target, fstype, options, string(output))
+	}
+	return err
+}
+
+// makeMountArgs makes the arguments to the mount(8) command.
+func makeMountArgs(source, target, fstype string, options []string) []string {
+	// Build mount command as follows:
+	//   mount [-t $fstype] [-o $options] [$source] $target
+	mountArgs := []string{}
+
+	//AIX doesn't support fstype "-t" option in mount commands. discard fstype.
+	//TODO maybe use "-V" option instead for jfs or jfs2 filesystems
+	//if len(fstype) > 0 {mountArgs = append(mountArgs, "-t", fstype)}
+
+	if len(options) > 0 {
+		mountArgs = append(mountArgs, "-o", strings.Join(options, ","))
+	}
+	if len(source) > 0 {
+		mountArgs = append(mountArgs, source)
+	}
+	mountArgs = append(mountArgs, target)
+
+	return mountArgs
+}
+
+// Unmount unmounts the target and deletes associated ramdisk if needed.
+func (mounter *Mounter) Unmount(target string) error {
+	glog.V(4).Infof("Unmounting %s", target)
+	command := exec.Command("umount", target)
+	output, err := command.CombinedOutput()
+	if err != nil {
+		return fmt.Errorf("Unmount failed: %v\nUnmounting arguments: %s\nOutput: %s\n", err, target, string(output))
+	}
+	mounter.mu.Lock()
+	defer mounter.mu.Unlock()
+	if source, ok := mounter.ramDiskMap[target]; ok {
+		delete(mounter.ramDiskMap, target)
+		err = os.RemoveAll(source)
+		if len(mounter.ramDiskMap) == 0 {
+			// ramdisk is not referenced any more, we can unmount and remove it
+			// source is a directory like /var/lib/kubelet/ramdiskXX/uuid, XX is the disk index
+			mntpt, _ := filepath.Split(source)
+			mntpt = filepath.Clean(mntpt)
+			command = exec.Command("umount", mntpt)
+			output, err = command.CombinedOutput()
+			if err != nil {
+				glog.V(4).Infof("AIX: ramdisk unmount failed :%s", string(output))
+			} else {
+				mounter.ramDiskMntpt = ""
+				os.RemoveAll(mntpt)
+				_, disk := filepath.Split(mntpt)
+				rmOut, rmErr := exec.Command("rmramdisk", disk).CombinedOutput()
+				if rmErr != nil {
+					glog.V(4).Infof("AIX: --rmramdisk %s-- failed :%s", disk, string(rmOut))
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// List returns a list of all mounted filesystems.
+func (*Mounter) List() ([]MountPoint, error) {
+	output, err := exec.Command("mount").CombinedOutput()
+	if err != nil {
+		return nil, err
+	}
+	res := []MountPoint{}
+	srcMap := make(map[string]string)
+	scanner := bufio.NewScanner(strings.NewReader(string(output)))
+	line := 0
+	for scanner.Scan() {
+		line++
+		data := strings.Fields(scanner.Text())
+		// node   mounted  mounted over   vfs  date   options
+		if len(data) >= 7 && line > 2 {
+			mp := MountPoint{
+				Device: data[0],
+				Path:   data[1],
+				Type:   data[2],
+				Opts:   strings.Split(data[len(data)-1], ","),
+				Freq:   0,
+				Pass:   0,
+			}
+			if mp.Path != mp.Device {
+				srcMap[mp.Path] = mp.Device
+			}
+			res = append(res, mp)
+		}
+	}
+
+	// On linux, mounting dir1 on a device and bind-mounting dir2 on dir1 results
+	// in dir2 having device as the Device field (same as dir1). On aix, mount cmd
+	// will show dir2 having dir1 as Device (instead of device). So we need a second pass
+	// to resolve all namefs mounts to their "source" backing device.
+	for i := 0; i < len(res); i++ {
+		dev := res[i].Device
+		for src, ok := dev, true; ok == true && src != ""; src, ok = srcMap[dev] {
+			dev = src
+		}
+		res[i].Device = dev
+	}
+
+	return res, nil
+}
+
+func (mounter *Mounter) IsMountPointMatch(mp MountPoint, dir string) bool {
+	deletedDir := fmt.Sprintf("%s\\040(deleted)", dir)
+	return ((mp.Path == dir) || (mp.Path == deletedDir))
+}
+
+func (mounter *Mounter) IsNotMountPoint(dir string) (bool, error) {
+	return IsNotMountPoint(mounter, dir)
+}
+
+// IsLikelyNotMountPoint determines if a directory is not a mountpoint.
+// It is fast but not necessarily ALWAYS correct. If the path is in fact
+// a bind mount from one part of a mount to another it will not be detected.
+// mkdir /tmp/a /tmp/b; mount --bin /tmp/a /tmp/b; IsLikelyNotMountPoint("/tmp/b")
+// will return true. When in fact /tmp/b is a mount point. If this situation
+// if of interest to you, don't use this function...
+func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
+	stat, err := os.Stat(file)
+	if err != nil {
+		return true, err
+	}
+	rootStat, err := os.Lstat(file + "/..")
+	if err != nil {
+		return true, err
+	}
+	// If the directory has a different device as parent, then it is a mountpoint.
+	if stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev {
+		return false, nil
+	}
+
+	return true, nil
+}
+
+// DeviceOpened checks if block device in use by calling Open with O_EXCL flag.
+// If pathname is not a device, log and return false with nil error.
+// If open returns errno EBUSY, return true with nil error.
+// If open returns nil, return false with nil error.
+// Otherwise, return false with error
+func (mounter *Mounter) DeviceOpened(pathname string) (bool, error) {
+	return exclusiveOpenFailsOnDevice(pathname)
+}
+
+// PathIsDevice uses FileInfo returned from os.Stat to check if path refers
+// to a device.
+func (mounter *Mounter) PathIsDevice(pathname string) (bool, error) {
+	return pathIsDevice(pathname)
+}
+
+func exclusiveOpenFailsOnDevice(pathname string) (bool, error) {
+	isDevice, err := pathIsDevice(pathname)
+	if err != nil {
+		return false, fmt.Errorf(
+			"PathIsDevice failed for path %q: %v",
+			pathname,
+			err)
+	}
+	if !isDevice {
+		glog.Errorf("Path %q is not refering to a device.", pathname)
+		return false, nil
+	}
+	fd, errno := syscall.Open(pathname, syscall.O_RDONLY|syscall.O_EXCL, 0)
+	// If the device is in use, open will return an invalid fd.
+	// When this happens, it is expected that Close will fail and throw an error.
+	defer syscall.Close(fd)
+	if errno == nil {
+		// device not in use
+		return false, nil
+	} else if errno == syscall.EBUSY {
+		// device is in use
+		return true, nil
+	}
+	// error during call to Open
+	return false, errno
+}
+
+func pathIsDevice(pathname string) (bool, error) {
+	finfo, err := os.Stat(pathname)
+	if os.IsNotExist(err) {
+		return false, nil
+	}
+	// err in call to os.Stat
+	if err != nil {
+		return false, err
+	}
+	// path refers to a device
+	if finfo.Mode()&os.ModeDevice != 0 {
+		return true, nil
+	}
+	// path does not refer to device
+	return false, nil
+}
+
+func (mounter *Mounter) ExistsPath(pathname string) bool {
+	_, err := os.Stat(pathname)
+	if err != nil {
+		return false
+	}
+	return true
+}
+
+func (mounter *Mounter) GetFileType(pathname string) (FileType, error) {
+	var pathType FileType
+	finfo, err := os.Stat(pathname)
+	if os.IsNotExist(err) {
+		return pathType, fmt.Errorf("path %q does not exist", pathname)
+	}
+	// err in call to os.Stat
+	if err != nil {
+		return pathType, err
+	}
+
+	mode := finfo.Sys().(*syscall.Stat_t).Mode
+	switch mode & syscall.S_IFMT {
+	case syscall.S_IFSOCK:
+		return FileTypeSocket, nil
+	case syscall.S_IFBLK:
+		return FileTypeBlockDev, nil
+	case syscall.S_IFCHR:
+		return FileTypeBlockDev, nil
+	case syscall.S_IFDIR:
+		return FileTypeDirectory, nil
+	case syscall.S_IFREG:
+		return FileTypeFile, nil
+	}
+
+	return pathType, fmt.Errorf("only recognise file, directory, socket, block device and character device")
+}
+
+func (mounter *Mounter) MakeDir(pathname string) error {
+	err := os.MkdirAll(pathname, os.FileMode(0755))
+	if err != nil {
+		if !os.IsExist(err) {
+			return err
+		}
+	}
+	return nil
+}
+
+func (mounter *Mounter) MakeFile(pathname string) error {
+	f, err := os.OpenFile(pathname, os.O_CREATE, os.FileMode(0644))
+	defer f.Close()
+	if err != nil {
+		if !os.IsExist(err) {
+			return err
+		}
+	}
+	return nil
+}
+
+// MakeRShared checks that given path is on a mount with 'rshared' mount
+// propagation. Empty implementation here.
+func (mounter *Mounter) MakeRShared(path string) error {
+	return nil
+}
+
+//GetDeviceNameFromMount: given a mount point, find the device name from its global mount point
+func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {
+	return getDeviceNameFromMount(mounter, mountPath, pluginDir)
+}
+
+// TODO: AIX port getDeviceNameFromMount for kubelet 1.9.1
+// getDeviceNameFromMount find the device name from /proc/mounts in which
+// the mount path reference should match the given plugin directory. In case no mount path reference
+// matches, returns the volume name taken from its given mountPath
+func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) {
+	refs, err := GetMountRefs(mounter, mountPath)
+	if err != nil {
+		glog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err)
+		return "", err
+	}
+	if len(refs) == 0 {
+		glog.V(4).Infof("Directory %s is not mounted", mountPath)
+		return "", fmt.Errorf("directory %s is not mounted", mountPath)
+	}
+	basemountPath := path.Join(pluginDir, MountsInGlobalPDPath)
+	for _, ref := range refs {
+		if strings.HasPrefix(ref, basemountPath) {
+			volumeID, err := filepath.Rel(basemountPath, ref)
+			if err != nil {
+				glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err)
+				return "", err
+			}
+			return volumeID, nil
+		}
+	}
+
+	return path.Base(mountPath), nil
+}
+
+// GetMountRefs finds all other references to the device referenced
+// by mountPath; returns a list of paths.
+func GetMountRefs(mounter Interface, mountPath string) ([]string, error) {
+	mps, err := mounter.List()
+	if err != nil {
+		return nil, err
+	}
+	// Find the device name.
+	deviceName := ""
+	// If mountPath is symlink, need get its target path.
+	slTarget, err := filepath.EvalSymlinks(mountPath)
+	if err != nil {
+		slTarget = mountPath
+	}
+	for i := range mps {
+		if mps[i].Path == slTarget {
+			deviceName = mps[i].Device
+			break
+		}
+	}
+
+	// Find all references to the device.
+	var refs []string
+	if deviceName == "" {
+		glog.Warningf("could not determine device for path: %q", mountPath)
+	} else {
+		for i := range mps {
+			if mps[i].Device == deviceName && mps[i].Path != slTarget {
+				refs = append(refs, mps[i].Path)
+			}
+		}
+	}
+	return refs, nil
+}
+
+// formatAndMount uses AIX utils to format and mount the given disk (source = physical volume name)
+func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {
+	options = append(options, "defaults")
+
+	// AIX only supports jfs and jfs2 filesystems
+	if fstype != "jfs2" && fstype != "jfs" {
+		return fmt.Errorf("filesystem type %s is unsupported on AIX ", fstype)
+	}
+
+	inlineOpts := append(options, "log=INLINE")
+	mountErr := mounter.Interface.Mount("/dev/"+source, target, fstype, inlineOpts)
+	if mountErr == nil {
+		return nil
+	}
+
+	// We were not able to mount the device (/dev/hdiskXX) directly.
+	// Check if we already have a lv (with correct fstype) ready to be mounted
+	lsOut, lsErr := mounter.Exec.Run("lspv", "-p", source)
+	if lsErr != nil {
+		mounter.Exec.Run("importvg", "-y", vgPrefix+source, source)
+		lsOut, lsErr = mounter.Exec.Run("lspv", "-p", source)
+	}
+
+	// parse lv name and fs type (tabulated data)
+	scanner := bufio.NewScanner(strings.NewReader(string(lsOut)))
+	mp := make(map[string]string)
+	offset := []int{-1, -1, -1}
+	for scanner.Scan() {
+		// parse header: PP RANGE STATE REGION   LV NAME   TYPE    MOUNT POINT
+		line := scanner.Text()
+		data := strings.Fields(line)
+		if offset[0] > 0 {
+			lvname := strings.TrimSpace(line[offset[0] : offset[1]-1])
+			fs := strings.TrimSpace(line[offset[1] : offset[2]-1])
+			mp[fs] = lvname // expect there to be only one fs of fstype
+		}
+		if data[0] == "PP" {
+			offset[0] = strings.Index(line, "LV")
+			offset[1] = strings.Index(line, "TYPE")
+			offset[2] = strings.Index(line, "MOUNT")
+		}
+	}
+	if lv, ok := mp[fstype]; ok {
+		// found a lv with correct fstype, try to mount it
+		loglv, ok2 := mp["jfs2log"]
+		if fstype == "jfs2" && ok2 {
+			options = append(options, "log=/dev/"+loglv)
+		}
+		glog.V(4).Infof("Attempting to mount pre-formatted disk: %s %s %s", fstype, source, target)
+		return mounter.Interface.Mount("/dev/"+lv, target, fstype, options)
+	}
+
+	// Couldn't find any proper lv on disk, need to format the pv.
+	if fstype == "jfs2" {
+		fsOut, fsErr := mounter.Exec.Run("mkfs", "-o", "log=INLINE", "-V", fstype, "/dev/"+source)
+		if fsErr != nil {
+			glog.V(4).Infof("mkfs failed %s", string(fsOut))
+			return fsErr
+		}
+		return mounter.Interface.Mount("/dev/"+source, target, fstype, inlineOpts)
+	}
+
+	// Handle JFS filesystems. Looks like they don't have inline logs.
+	// so we need VG / LVs for logs. Give custom lv names.
+	lvname := LvPrefix + source // e.g k8s_lvhdiskXX
+	loglvname := "log" + lvname
+	// Try to associate a new volume group to the disk
+	glog.V(4).Infof("Attempting to format/mount disk: %s %s %s", fstype, source, target)
+	vgOut, vgErr := mounter.Exec.Run("mkvg", source)
+	if vgErr != nil {
+		// VG association failed.
+		glog.V(4).Infof("failed to associate VG to disk: %s %s", source, string(vgOut))
+		return vgErr
+	}
+	vgname := strings.TrimSpace(string(vgOut))
+
+	// Get the total number of partitions on disk
+	pvOut, pvErr := mounter.Exec.Run("lspv", source)
+	if pvErr != nil {
+		glog.V(4).Infof("lspv failed %s", string(pvOut))
+		return pvErr
+	}
+	var numpp int64 = 0
+	scanner = bufio.NewScanner(strings.NewReader(string(pvOut)))
+	for scanner.Scan() {
+		data := strings.Fields(scanner.Text())
+		if data[0] == "FREE" && data[1] == "PPs:" {
+			numpp, _ = strconv.ParseInt(data[2], 10, 32)
+			numpp = numpp - 1 // 1 pp for jfslog
+		}
+	}
+	if numpp <= 0 {
+		return fmt.Errorf("not enough free partitions on disk")
+	}
+
+	logOpt := ""
+	// Create a log logical volume for jfs, give 1 partition to it
+	_, lgErr := mounter.Exec.Run("mklv", "-y", loglvname, "-t", "jfslog", vgname, "1")
+	if lgErr == nil {
+		logOpt = "log=/dev/" + loglvname
+		options = append(options, logOpt)
+	}
+
+	lvOut, lvErr := mounter.Exec.Run("mklv", "-y", lvname, "-t", fstype, vgname, strconv.Itoa(int(numpp)), source)
+	if lvErr != nil {
+		glog.V(4).Infof("mklv failed %s", string(lvOut))
+		return lvErr
+	}
+
+	fsOut, fsErr := mounter.Exec.Run("mkfs", "-o", logOpt, "-V", fstype, "/dev/"+lvname)
+	if fsErr != nil {
+		glog.V(4).Infof("mkfs failed %s", string(fsOut))
+		return fsErr
+	}
+	return mounter.Interface.Mount("/dev/"+lvname, target, fstype, options)
+}
+
+func (mounter *Mounter) PrepareSafeSubpath(subPath Subpath) (newHostPath string, cleanupAction func(), err error) {
+	return subPath.Path, nil, nil
+}
+
+func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error {
+	return nil
+}
+
+func (mounter *Mounter) SafeMakeDir(pathname string, base string, perm os.FileMode) error {
+	return nil
+}
+
+func (mounter *Mounter) GetSELinuxSupport(pathname string) (bool, error) {
+	return false, nil
+}
diff --git a/pkg/util/mount/mount_unsupported.go b/pkg/util/mount/mount_unsupported.go
index acad6ea..2bc2d47 100644
--- a/pkg/util/mount/mount_unsupported.go
+++ b/pkg/util/mount/mount_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows
+// +build !linux,!windows,!aix
 
 /*
 Copyright 2014 The Kubernetes Authors.
@@ -121,3 +121,7 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error {
 func (mounter *Mounter) SafeMakeDir(pathname string, base string, perm os.FileMode) error {
 	return nil
 }
+
+func (mounter *Mounter) GetSELinuxSupport(pathname string) (bool, error) {
+	return false, errors.New("not implemented")
+}
diff --git a/pkg/version/base.go b/pkg/version/base.go
index 164b64c..8f210be 100644
--- a/pkg/version/base.go
+++ b/pkg/version/base.go
@@ -55,9 +55,9 @@ var (
 	// NOTE: The $Format strings are replaced during 'git archive' thanks to the
 	// companion .gitattributes file containing 'export-subst' in this same
 	// directory.  See also https://git-scm.com/docs/gitattributes
-	gitVersion   = "v0.0.0-master+fc32d2f3698e3"
+	gitVersion   = "v1.10.0+aix"
 	gitCommit    = "fc32d2f3698e36b93322a3465f63a14e9f0eaead" // sha1 from git, output of $(git rev-parse HEAD)
 	gitTreeState = ""            // state of git tree, either "clean" or "dirty"
 
-	buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+	buildDate = "2018-06-11T15:51:07Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
 )
diff --git a/pkg/volume/empty_dir/empty_dir_aix.go b/pkg/volume/empty_dir/empty_dir_aix.go
new file mode 100644
index 0000000..7bbd8fe
--- /dev/null
+++ b/pkg/volume/empty_dir/empty_dir_aix.go
@@ -0,0 +1,52 @@
+// +build aix
+
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package empty_dir
+
+import (
+	"bufio"
+	"fmt"
+	"os/exec"
+	"strings"
+
+	"github.com/golang/glog"
+	"k8s.io/api/core/v1"
+	"k8s.io/kubernetes/pkg/util/mount"
+)
+
+// realMountDetector implements mountDetector in terms of syscalls.
+type realMountDetector struct {
+	mounter mount.Interface
+}
+
+func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, error) {
+	glog.V(5).Infof("Determining mount medium of %v", path)
+	notMnt, err := m.mounter.IsLikelyNotMountPoint(path)
+	if err != nil {
+		return v1.StorageMediumDefault, false, fmt.Errorf("IsLikelyNotMountPoint(%q): %v", path, err)
+	}
+	out, _ := exec.Command("df", path).CombinedOutput()
+	scanner := bufio.NewScanner(strings.NewReader(string(out)))
+	for scanner.Scan() {
+		fld := strings.Fields(scanner.Text())
+		if len(fld) > 0 && fld[len(fld)-1] == path && strings.Index(fld[0], "ramdisk") > 0 {
+			return v1.StorageMediumMemory, !notMnt, nil
+		}
+	}
+	return v1.StorageMediumDefault, !notMnt, nil
+}
diff --git a/pkg/volume/empty_dir/empty_dir_unsupported.go b/pkg/volume/empty_dir/empty_dir_unsupported.go
index defbfc5..a4bff1e 100644
--- a/pkg/volume/empty_dir/empty_dir_unsupported.go
+++ b/pkg/volume/empty_dir/empty_dir_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !linux,!aix
 
 /*
 Copyright 2015 The Kubernetes Authors.
diff --git a/pkg/volume/fc/fc_aix.go b/pkg/volume/fc/fc_aix.go
new file mode 100644
index 0000000..70e3955
--- /dev/null
+++ b/pkg/volume/fc/fc_aix.go
@@ -0,0 +1,221 @@
+// +build aix
+
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fc
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/golang/glog"
+	"k8s.io/api/core/v1"
+	utilfeature "k8s.io/apiserver/pkg/util/feature"
+	"k8s.io/kubernetes/pkg/features"
+	"k8s.io/kubernetes/pkg/volume"
+	aixvolutil "k8s.io/kubernetes/pkg/volume/util"
+)
+
+type ioHandler interface {
+	ReadDir(dirname string) ([]os.FileInfo, error)
+	Lstat(name string) (os.FileInfo, error)
+	EvalSymlinks(path string) (string, error)
+	WriteFile(filename string, data []byte, perm os.FileMode) error
+}
+
+type osIOHandler struct{}
+
+func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
+	return ioutil.ReadDir(dirname)
+}
+func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
+	return os.Lstat(name)
+}
+func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
+	return filepath.EvalSymlinks(path)
+}
+func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
+	return ioutil.WriteFile(filename, data, perm)
+}
+
+// given a wwn and lun, find the hdisk device and associated hdiskpower parent if any
+func findDisk(wwn, lun string, io ioHandler) (string, string) {
+	// Sanitize input
+	wwn = strings.ToLower(wwn)
+	if !strings.HasPrefix(wwn, "0x") {
+		wwn = "0x" + wwn
+	}
+
+	// Get the list of disks
+	out, err := exec.Command("lsdev", "-Ccdisk", "-F", "name").Output()
+	if err != nil {
+		return "", ""
+	}
+	pattern := regexp.MustCompile(`(?m)^\s*name = "([a-zA-Z]+[0-9]+)"$`)
+	scanner := bufio.NewScanner(bytes.NewReader(out))
+	for scanner.Scan() {
+		pvName := scanner.Text()
+		if strings.HasPrefix(pvName, "hdiskpower") {
+			continue
+		}
+		lsOut, err := exec.Command("lsattr", "-El", pvName, "-a", "ww_name", "-a", "lun_id").Output()
+		if err != nil {
+			continue
+		}
+		lunId, mp := aixvolutil.ParseLsattrOutputForHdisk(lsOut)
+		if strings.ToLower(mp["ww_name"]) == wwn && strconv.Itoa(lunId) == lun {
+			// Check if it is attached to an hdiskpower device
+			odmOut, err := exec.Command("odmget", "-q", fmt.Sprintf("attribute=pnpath and value=%s", pvName), "CuAt").Output()
+			if err != nil {
+				// Shouldn't happen even if it has no pnpath; safer to avoid this disk
+				continue
+			}
+			if sub := pattern.FindSubmatch(odmOut); sub != nil {
+				return pvName, string(sub[1])
+			}
+			return pvName, ""
+		}
+	}
+	return "", ""
+}
+
+// given a wwid, find the hdisk device and associated hdiskpower parent if any
+func findDiskWWIDs(wwid string, io ioHandler) (string, string) {
+	// Not supported on AIX: how do we retrieve the WWID of a disk?
+	return "", ""
+}
+
+// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/fc/target-lun-0
+func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string) string {
+	return path.Join(host.GetPluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
+}
+
+// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/target-lun-0
+func makeVDPDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
+	if len(wwns) != 0 {
+		return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
+	} else {
+		return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), wwids[0])
+	}
+}
+
+type FCUtil struct{}
+
+func (util *FCUtil) MakeGlobalPDName(fc fcDisk) string {
+	return makePDNameInternal(fc.plugin.host, fc.wwns, fc.lun)
+}
+
+// Global volume device plugin dir
+func (util *FCUtil) MakeGlobalVDPDName(fc fcDisk) string {
+	return makeVDPDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
+}
+
+func searchDisk(b fcDiskMounter) (string, error) {
+	var diskIds []string
+	var disk string
+	var dm string
+	io := b.io
+	wwids := b.wwids
+	wwns := b.wwns
+	lun := b.lun
+
+	if len(wwns) != 0 {
+		diskIds = wwns
+	} else {
+		diskIds = wwids
+	}
+
+	for _, diskId := range diskIds {
+		if len(wwns) != 0 {
+			disk, dm = findDisk(diskId, lun, io)
+		} else {
+			disk, dm = findDiskWWIDs(diskId, io)
+		}
+		if disk != "" {
+			break
+		}
+	}
+	// if no disk matches input wwn and lun, exit
+	if disk == "" && dm == "" {
+		return "", fmt.Errorf("no fc disk found")
+	}
+
+	// if hdiskpower device is found, use it; otherwise use raw disk
+	if dm != "" {
+		return dm, nil
+	}
+	return disk, nil
+}
+
+func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) {
+	devicePath, err := searchDisk(b)
+	if err != nil {
+		return "", err
+	}
+	// TODO: remove feature gate check after no longer needed
+	if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
+		// If the volumeMode is 'Block', plugin don't have to format the volume.
+		// The globalPDPath will be created by operationexecutor. Just return devicePath here.
+		glog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath)
+		if b.volumeMode == v1.PersistentVolumeBlock {
+			return devicePath, nil
+		}
+	}
+
+	// mount it
+	globalPDPath := b.manager.MakeGlobalPDName(*b.fcDisk)
+	if err := os.MkdirAll(globalPDPath, 0750); err != nil {
+		return devicePath, fmt.Errorf("fc: failed to mkdir %s, error", globalPDPath)
+	}
+
+	noMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
+	if err != nil {
+		return devicePath, fmt.Errorf("Heuristic determination of mount point failed:%v", err)
+	}
+	if !noMnt {
+		glog.Infof("fc: %s already mounted", globalPDPath)
+		return devicePath, nil
+	}
+
+	err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil)
+	if err != nil {
+		return "", fmt.Errorf("fc: failed to mount fc volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
+	}
+
+	return devicePath, err
+}
+
+func (util *FCUtil) DetachDisk(c fcDiskUnmounter, mntPath string) error {
+	if err := c.mounter.Unmount(mntPath); err != nil {
+		return fmt.Errorf("fc detach disk: failed to unmount: %s\nError: %v", mntPath, err)
+	}
+	return nil
+}
+
+func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
+	// nothing to do
+	return nil
+}
diff --git a/pkg/volume/fc/fc_util.go b/pkg/volume/fc/fc_util.go
index 3908275..221322f 100644
--- a/pkg/volume/fc/fc_util.go
+++ b/pkg/volume/fc/fc_util.go
@@ -1,3 +1,5 @@
+// +build !aix
+
 /*
 Copyright 2015 The Kubernetes Authors.
 
diff --git a/pkg/volume/iscsi/iscsi_aix.go b/pkg/volume/iscsi/iscsi_aix.go
new file mode 100755
index 0000000..2368036
--- /dev/null
+++ b/pkg/volume/iscsi/iscsi_aix.go
@@ -0,0 +1,436 @@
+// +build aix
+
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package iscsi
+
+import (
+	"bufio"
+	"encoding/json"
+	"fmt"
+	"os"
+	"path"
+	"path/filepath"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/glog"
+	"k8s.io/kubernetes/pkg/util/mount"
+	"k8s.io/kubernetes/pkg/volume"
+	aixvolutil "k8s.io/kubernetes/pkg/volume/util"
+)
+
+func updateISCSIDiscoverydb(b iscsiDiskMounter) error {
+	doAuth := b.chap_discovery || b.chap_session
+	out, err := b.exec.Run("lsattr", "-El", "iscsi0")
+	if err != nil {
+		return fmt.Errorf("iscsi: failed to update discoverydb, output: %v", string(out))
+	}
+	// parse discovery policy. Policy could be one of "odm", "file", "slp"
+	policy := "file"
+	discFileName := "/etc/iscsi/targets"
+	scanner := bufio.NewScanner(strings.NewReader(string(out)))
+	for scanner.Scan() {
+		data := strings.Fields(scanner.Text())
+		if data[0] == "disc_policy" {
+			policy = data[1]
+		} else if data[0] == "disc_filename" {
+			discFileName = data[1]
+		}
+	}
+	iscsi := *b.iscsiDisk
+	ipPort := strings.Split(iscsi.Portals[0], ":")
+	if policy == "file" {
+		// append target ip/iqn in discFileName if not already present
+		targetData := ipPort[0] + " " + ipPort[1] + " " + iscsi.Iqn
+		if doAuth {
+			targetData += " \"" + b.secret["node.session.auth.password"] + "\""
+		}
+		f, err := os.OpenFile(discFileName, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0600)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		scanner = bufio.NewScanner(f)
+		found := false
+		for scanner.Scan() {
+			if strings.TrimSpace(scanner.Text()) == targetData {
+				found = true
+				break
+			}
+		}
+
+		if found == false {
+			if _, err = f.WriteString(targetData + "\n"); err != nil {
+				return err
+			}
+		}
+	} else if policy == "odm" {
+		if doAuth {
+			out, err = b.exec.Run("mkiscsi", "-l", "iscsi0", "-g", "static", "-t", iscsi.Iqn, "-n", ipPort[1], "-i", ipPort[0], "-p", b.secret["node.session.auth.password"])
+		} else {
+			out, err = b.exec.Run("mkiscsi", "-l", "iscsi0", "-g", "static", "-t", iscsi.Iqn, "-n", ipPort[1], "-i", ipPort[0])
+		}
+		if err != nil {
+			return fmt.Errorf("iscsi: failed to update discoverydb, output: %v", string(out))
+		}
+	}
+	// TODO: handle "slp" policy
+	return nil
+}
+
+// return a list of new physical volumes associated with the iscsi target
+func updateISCSINode(b iscsiDiskMounter) ([]string, error) {
+	res := []string{}
+	out, err := b.exec.Run("cfgmgr", "-l", "iscsi0")
+	if err != nil {
+		return res, fmt.Errorf("iscsi: failed to update node, output: %v", string(out))
+	}
+	// Get the list of iscsi disks
+	out, err = b.exec.Run("lsdev", "-Ccdisk", "-s", "iscsi")
+	if err != nil {
+		return res, fmt.Errorf("iscsi: failed to update node, output: %v", string(out))
+	}
+	scanner := bufio.NewScanner(strings.NewReader(string(out)))
+	for scanner.Scan() {
+		pvName := strings.Fields(scanner.Text())[0]
+		out, err := b.exec.Run("lsattr", "-El", pvName)
+		if err != nil {
+			//glog("iscsi: failed to update node, invalid pv, output: %v", string(out))
+			continue
+		}
+		lunId, mp := aixvolutil.ParseLsattrOutputForHdisk(out)
+		portNum, _ := strconv.ParseUint(mp["port_num"], 0, 32)
+		iscsi := *b.iscsiDisk
+		if iscsi.Portals[0] == mp["host_addr"]+":"+strconv.Itoa(int(portNum)) &&
+			strconv.Itoa(lunId) == iscsi.Lun && mp["target_name"] == iscsi.Iqn {
+			res = append(res, pvName)
+		}
+	}
+	return res, nil
+}
+
+// stat a path, if not exists, retry maxRetries times
+// when iscsi transports other than default are used,  use glob instead as pci id of device is unknown
+type StatFunc func(string) (os.FileInfo, error)
+type GlobFunc func(string) ([]string, error)
+
+func waitForPathToExist(devicePath *string, maxRetries int, deviceTransport string) bool {
+	// This makes unit testing a lot easier
+	return waitForPathToExistInternal(devicePath, maxRetries, deviceTransport, os.Stat, filepath.Glob)
+}
+
+func waitForPathToExistInternal(devicePath *string, maxRetries int, deviceTransport string, osStat StatFunc, filepathGlob GlobFunc) bool {
+	if devicePath != nil {
+		for i := 0; i < maxRetries; i++ {
+			var err error
+			if deviceTransport == "tcp" {
+				_, err = osStat(*devicePath)
+			} else {
+				fpath, _ := filepathGlob(*devicePath)
+				if fpath == nil {
+					err = os.ErrNotExist
+				} else {
+					// There might be a case that fpath contains multiple device paths if
+					// multiple PCI devices connect to same iscsi target. We handle this
+					// case at subsequent logic. Pick up only first path here.
+					*devicePath = fpath[0]
+				}
+			}
+			if err == nil {
+				return true
+			}
+			if err != nil && !os.IsNotExist(err) {
+				return false
+			}
+			if i == maxRetries-1 {
+				break
+			}
+			time.Sleep(time.Second)
+		}
+	}
+	return false
+}
+
+// getDevicePrefixRefCount: given a prefix of device path, find its reference count from /proc/mounts
+// returns the reference count to the device and error code
+// for services like iscsi construct multiple device paths with the same prefix pattern.
+// this function aggregates all references to a service based on the prefix pattern
+// More specifically, this prefix semantics is to aggregate disk paths that belong to the same iSCSI target/iqn pair.
+// an iSCSI target could expose multiple LUNs through the same IQN, and Linux iSCSI initiator creates disk paths that start the same prefix but end with different LUN number
+// When we decide whether it is time to logout a target, we have to see if none of the LUNs are used any more.
+// That's where the prefix based ref count kicks in. If we only count the disks using exact match, we could log other disks out.
+func getDevicePrefixRefCount(mounter mount.Interface, deviceNamePrefix string) (int, error) {
+	mps, err := mounter.List()
+	if err != nil {
+		return -1, err
+	}
+
+	// Find the number of references to the device.
+	refCount := 0
+	for i := range mps {
+		if strings.HasPrefix(mps[i].Path, deviceNamePrefix) {
+			refCount++
+		}
+	}
+	return refCount, nil
+}
+
+// make a directory like /var/lib/kubelet/plugins/kubernetes.io/iscsi/iface_name/portal-some_iqn-lun-lun_id
+func makePDNameInternal(host volume.VolumeHost, portal string, iqn string, lun string, iface string) string {
+	return path.Join(host.GetPluginDir(iscsiPluginName), "iface-"+iface, portal+"-"+iqn+"-lun-"+lun)
+}
+
+// make a directory like /var/lib/kubelet/plugins/kubernetes.io/iscsi/volumeDevices/iface_name/portal-some_iqn-lun-lun_id
+func makeVDPDNameInternal(host volume.VolumeHost, portal string, iqn string, lun string, iface string) string {
+	return path.Join(host.GetVolumeDevicePluginDir(iscsiPluginName), "iface-"+iface, portal+"-"+iqn+"-lun-"+lun)
+}
+
+type ISCSIUtil struct{}
+
+// MakeGlobalPDName returns path of global plugin dir
+func (util *ISCSIUtil) MakeGlobalPDName(iscsi iscsiDisk) string {
+	return makePDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.Lun, iscsi.Iface)
+}
+
+// MakeGlobalVDPDName returns path of global volume device plugin dir
+func (util *ISCSIUtil) MakeGlobalVDPDName(iscsi iscsiDisk) string {
+	return makeVDPDNameInternal(iscsi.plugin.host, iscsi.Portals[0], iscsi.Iqn, iscsi.Lun, iscsi.Iface)
+}
+
+func (util *ISCSIUtil) persistISCSI(conf iscsiDisk, mnt string) error {
+	file := path.Join(mnt, "iscsi.json")
+	fp, err := os.Create(file)
+	if err != nil {
+		return fmt.Errorf("iscsi: create %s err %s", file, err)
+	}
+	defer fp.Close()
+	encoder := json.NewEncoder(fp)
+	if err = encoder.Encode(conf); err != nil {
+		return fmt.Errorf("iscsi: encode err: %v.", err)
+	}
+	return nil
+}
+
+func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error {
+	// NOTE: The iscsi config json is not deleted after logging out from target portals.
+	file := path.Join(mnt, "iscsi.json")
+	fp, err := os.Open(file)
+	if err != nil {
+		return fmt.Errorf("iscsi: open %s err %s", file, err)
+	}
+	defer fp.Close()
+	decoder := json.NewDecoder(fp)
+	if err = decoder.Decode(conf); err != nil {
+		return fmt.Errorf("iscsi: decode err: %v.", err)
+	}
+	return nil
+}
+
+func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
+	var pvName string
+	var pvNames []string
+
+	err := updateISCSIDiscoverydb(b)
+	if err != nil {
+		return "", err
+	}
+	pvNames, lastErr := updateISCSINode(b)
+	if len(pvNames) == 0 {
+		glog.Errorf("iscsi: failed to get any path for iscsi disk, last err seen:\n%v", lastErr)
+		return "", fmt.Errorf("failed to get any path for iscsi disk, last err seen:\n%v", lastErr)
+	}
+
+	//Make sure we use a valid devicepath to find mpio device.
+	pvName = pvNames[0]
+
+	// mount it
+	globalPDPath := b.manager.MakeGlobalPDName(*b.iscsiDisk)
+	notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
+	if !notMnt {
+		glog.Infof("iscsi: %s already mounted", globalPDPath)
+		return "", nil
+	}
+
+	if err := os.MkdirAll(globalPDPath, 0750); err != nil {
+		glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath)
+		return "", err
+	}
+
+	// Persist iscsi disk config to json file for DetachDisk path
+	util.persistISCSI(*(b.iscsiDisk), globalPDPath)
+
+	for _, path := range pvNames {
+		// There shouldnt be any empty physical volume name. However adding this check
+		// for safer side to avoid the possibility of an empty entry.
+		if path == "" {
+			continue
+		}
+		// check if the dev is using mpio and if so mount it via the dm-XX device
+		if mappedDevicePath := b.deviceUtil.FindMultipathDeviceForDevice(path); mappedDevicePath != "" {
+			pvName = mappedDevicePath
+			break
+		}
+	}
+	err = b.mounter.FormatAndMount(pvName, globalPDPath, b.fsType, nil)
+	if err != nil {
+		glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", pvName, b.fsType, globalPDPath, err)
+	}
+
+	return pvName, err
+}
+
+func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
+	_, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath)
+	if err != nil {
+		glog.Errorf("iscsi detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err)
+		return err
+	}
+	if err = c.mounter.Unmount(mntPath); err != nil {
+		glog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err)
+		return err
+	}
+	cnt--
+	// if device is no longer used, see if need to logout the target
+	if cnt == 0 {
+		device, prefix, err := extractDeviceAndPrefix(mntPath)
+		if err != nil {
+			return err
+		}
+		refCount, err := getDevicePrefixRefCount(c.mounter, prefix)
+		if err == nil && refCount == 0 {
+			var bkpPortal []string
+			var iqn, iface string
+			found := true
+
+			// load iscsi disk config from json file
+			if err := util.loadISCSI(c.iscsiDisk, mntPath); err == nil {
+				bkpPortal, iqn, iface = c.iscsiDisk.Portals, c.iscsiDisk.Iqn, c.iscsiDisk.Iface
+			} else {
+				// If the iscsi disk config is not found, fall back to the original behavior.
+				// This portal/iqn/iface is no longer referenced, log out.
+				// Extract the portal and iqn from device path.
+				bkpPortal = make([]string, 1)
+				bkpPortal[0], iqn, err = extractPortalAndIqn(device)
+				if err != nil {
+					return err
+				}
+				// Extract the iface from the mountPath and use it to log out. If the iface
+				// is not found, maintain the previous behavior to facilitate kubelet upgrade.
+				// Logout may fail as no session may exist for the portal/IQN on the specified interface.
+				iface, found = extractIface(mntPath)
+			}
+			for _, portal := range removeDuplicate(bkpPortal) {
+				logout := []string{"-m", "node", "-p", portal, "-T", iqn, "--logout"}
+				delete := []string{"-m", "node", "-p", portal, "-T", iqn, "-o", "delete"}
+				if found {
+					logout = append(logout, []string{"-I", iface}...)
+					delete = append(delete, []string{"-I", iface}...)
+				}
+				glog.Infof("iscsi: log out target %s iqn %s iface %s", portal, iqn, iface)
+				out := "iscsi target cleanup not implemented on AIX"
+				glog.Errorf("iscsi: failed to detach disk Error: %s", string(out))
+
+				// Delete the node record
+				glog.Infof("iscsi: delete node record target %s iqn %s", portal, iqn)
+				glog.Errorf("iscsi: failed to delete node record Error: %s", string(out))
+			}
+		}
+	}
+	return nil
+}
+
+func extractTransportname(ifaceOutput string) (iscsiTransport string) {
+	re := regexp.MustCompile(`iface.transport_name = (.*)\n`)
+
+	rex_output := re.FindStringSubmatch(ifaceOutput)
+	if rex_output != nil {
+		iscsiTransport = rex_output[1]
+	} else {
+		return ""
+	}
+
+	// While iface.transport_name is a required parameter, handle it being unspecified anyways
+	if iscsiTransport == "<empty>" {
+		iscsiTransport = "tcp"
+	}
+	return iscsiTransport
+}
+
+func extractDeviceAndPrefix(mntPath string) (string, string, error) {
+	ind := strings.LastIndex(mntPath, "/")
+	if ind < 0 {
+		return "", "", fmt.Errorf("iscsi detach disk: malformatted mnt path: %s", mntPath)
+	}
+	device := mntPath[(ind + 1):]
+	// strip -lun- from mount path
+	ind = strings.LastIndex(mntPath, "-lun-")
+	if ind < 0 {
+		return "", "", fmt.Errorf("iscsi detach disk: malformatted mnt path: %s", mntPath)
+	}
+	prefix := mntPath[:ind]
+	return device, prefix, nil
+}
+
+func extractIface(mntPath string) (string, bool) {
+	re := regexp.MustCompile(`.+/iface-([^/]+)/.+`)
+
+	re_output := re.FindStringSubmatch(mntPath)
+	if re_output != nil {
+		return re_output[1], true
+	}
+
+	return "", false
+}
+
+func extractPortalAndIqn(device string) (string, string, error) {
+	ind1 := strings.Index(device, "-")
+	if ind1 < 0 {
+		return "", "", fmt.Errorf("iscsi detach disk: no portal in %s", device)
+	}
+	portal := device[0:ind1]
+	ind2 := strings.Index(device, "iqn.")
+	if ind2 < 0 {
+		ind2 = strings.Index(device, "eui.")
+	}
+	if ind2 < 0 {
+		return "", "", fmt.Errorf("iscsi detach disk: no iqn in %s", device)
+	}
+	ind := strings.LastIndex(device, "-lun-")
+	iqn := device[ind2:ind]
+	return portal, iqn, nil
+}
+
+// Remove duplicates or string
+func removeDuplicate(s []string) []string {
+	m := map[string]bool{}
+	for _, v := range s {
+		if v != "" && !m[v] {
+			s[len(m)] = v
+			m[v] = true
+		}
+	}
+	s = s[:len(m)]
+	return s
+}
+
+// DetachBlockISCSIDisk removes loopback device for a volume and detaches a volume from node
+func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) error {
+	return fmt.Errorf("DetachBlockISCSIDisk unsupported")
+}
diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go
index ad6382f..7de3288 100644
--- a/pkg/volume/iscsi/iscsi_util.go
+++ b/pkg/volume/iscsi/iscsi_util.go
@@ -1,3 +1,5 @@
+// +build !aix
+
 /*
 Copyright 2015 The Kubernetes Authors.
 
diff --git a/pkg/volume/util/fs/fs_aix.go b/pkg/volume/util/fs/fs_aix.go
new file mode 100644
index 0000000..12180da
--- /dev/null
+++ b/pkg/volume/util/fs/fs_aix.go
@@ -0,0 +1,85 @@
+// +build aix
+
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fs
+
+import (
+	"fmt"
+	"os/exec"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"k8s.io/apimachinery/pkg/api/resource"
+)
+
+// FSInfo AIX returns (available bytes, byte capacity, byte usage, total inodes, inodes free, inode usage, error)
+// for the filesystem that path resides upon.
+func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
+	statfs := &syscall.Statfs_t{}
+	err := syscall.Statfs(path, statfs)
+	if err != nil {
+		return 0, 0, 0, 0, 0, 0, err
+	}
+
+	// Available is blocks available * fragment size
+	available := int64(statfs.Bavail) * int64(statfs.Bsize)
+
+	// Capacity is total block count * fragment size
+	capacity := int64(statfs.Blocks) * int64(statfs.Bsize)
+
+	// Usage is block being used * fragment size (aka block size).
+	usage := (int64(statfs.Blocks) - int64(statfs.Bfree)) * int64(statfs.Bsize)
+
+	inodes := int64(statfs.Files)
+	inodesFree := int64(statfs.Ffree)
+	inodesUsed := inodes - inodesFree
+
+	return available, capacity, usage, inodes, inodesFree, inodesUsed, nil
+}
+
+func Du(path string) (*resource.Quantity, error) {
+	// Uses the same niceness level as cadvisor.fs does when running du
+	// Uses -B 1 to always scale to a blocksize of 1 byte
+	out, err := exec.Command("nice", "-n", "19", "du", "-s", path).CombinedOutput()
+	if err != nil {
+		return nil, fmt.Errorf("failed command 'du' ($ nice -n 19 du -s) on path %s with error %v", path, err)
+	}
+	used, err := strconv.Atoi(strings.Fields(string(out))[0])
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse 'du' output %s due to error %v", out, err)
+	}
+	return resource.NewQuantity(int64(used) * 512, resource.BinarySI), nil
+}
+
+// Find uses the equivalent of the command `find <path> -dev -printf '.' | wc -c` to count files and directories.
+// While this is not an exact measure of inodes used, it is a very good approximation.
+func Find(path string) (int64, error) {
+	if path == "" {
+		return 0, fmt.Errorf("invalid directory")
+	}
+	out, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("find '%s' -xdev | wc -l", path)).Output()
+	if err != nil {
+		return 0, fmt.Errorf("failed command 'find' on path %s with error %v", path, err)
+	}
+	count, err := strconv.Atoi(strings.Fields(string(out))[0])
+	if err != nil {
+		return 0, fmt.Errorf("failed to parse 'find' output %s due to error %v", out, err)
+	}
+	return int64(count), nil
+}
diff --git a/pkg/volume/util/fs/fs_unsupported.go b/pkg/volume/util/fs/fs_unsupported.go
index da41fc8..a220fee 100644
--- a/pkg/volume/util/fs/fs_unsupported.go
+++ b/pkg/volume/util/fs/fs_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!darwin
+// +build !linux,!darwin,!aix
 
 /*
 Copyright 2014 The Kubernetes Authors.
diff --git a/pkg/volume/util/util_aix.go b/pkg/volume/util/util_aix.go
new file mode 100644
index 0000000..ed77cf2
--- /dev/null
+++ b/pkg/volume/util/util_aix.go
@@ -0,0 +1,138 @@
+// +build aix
+
+/*
+Copyright 2017 The Kubernetes Authors.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"os/exec"
+	"strconv"
+	"strings"
+	"syscall"
+        "k8s.io/apimachinery/pkg/api/resource"
+)
+
+func FsInfo(path string) (int64, int64, int64, int64, int64, int64, error) {
+        statfs := &syscall.Statfs_t{}
+        err := syscall.Statfs(path, statfs)
+        if err != nil {
+                return 0, 0, 0, 0, 0, 0, err
+        }
+
+        // Available is blocks available * fragment size
+        available := int64(statfs.Bavail) * int64(statfs.Bsize)
+
+        // Capacity is total block count * fragment size
+        capacity := int64(statfs.Blocks) * int64(statfs.Bsize)
+
+        // Usage is block being used * fragment size (aka block size).
+        usage := (int64(statfs.Blocks) - int64(statfs.Bfree)) * int64(statfs.Bsize)
+
+        inodes := int64(statfs.Files)
+        inodesFree := int64(statfs.Ffree)
+        inodesUsed := inodes - inodesFree
+
+        return available, capacity, usage, inodes, inodesFree, inodesUsed, nil
+}
+
+func Du(path string) (*resource.Quantity, error) {
+        // Uses the same niceness level as cadvisor.fs does when running du
+        // AIX doesn't support (-B 1 to always scale to a blocksize of 1 byte)
+        out, err := exec.Command("nice", "-n", "19", "du", "-sk", path).CombinedOutput()
+        if err != nil {
+                return nil, fmt.Errorf("failed command 'du' ($ nice -n 19 du -sk) on path %s with error %v", path, err)
+        }
+        // multiply du -sk output by 1024 to get bytes. 
+        bytes, _ := strconv.ParseUint(strings.Fields(string(out))[0], 10, 64)
+        used, err := resource.ParseQuantity(strconv.Itoa(1024 * int(bytes)))
+        if err != nil {
+                return nil, fmt.Errorf("failed to parse 'du' output %s due to error %v", out, err)
+        }
+        used.Format = resource.BinarySI
+        return &used, nil
+}
+
+// This func is called Find because linux implementation uses the "find" command to achieve the
+// desired purpose of counting inodes under the provided path. Since the function is mostly called
+// on volume mountpoints with their own filesystem, it looks more convenient on AIX to use "df" command.
+func Find(path string) (int64, error) {
+        if path == "" {
+                return 0, fmt.Errorf("invalid directory")
+        }
+        dfOut, dfErr := exec.Command("df", "-i", path).CombinedOutput()
+        if dfErr != nil {
+                return 0, fmt.Errorf("df -i failed. output: %s", string(dfOut))
+        }
+        scanner := bufio.NewScanner(strings.NewReader(string(dfOut)))
+        for scanner.Scan() {
+                flds := strings.Fields(scanner.Text())
+                if len(flds) > 5 && (flds[0] == path || flds[len(flds) - 1] == path) {
+                        // Filesystem 512-blocks Free %Used Iused %Iused Mounted_on
+                        return strconv.ParseInt(flds[4], 10, 64) 
+                }
+        }
+
+        // Provided path is not a filesystem mount point, so count files and directories to get an estimate
+        // of the number of inodes under path 
+        var counter byteCounter
+        var stderr bytes.Buffer
+        findCmd := exec.Command("sh", "-c", "find " + path + "-xdev -exec printf . \";\"")
+        findCmd.Stdout, findCmd.Stderr = &counter, &stderr
+        if err := findCmd.Start(); err != nil {
+                return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String())
+        }
+        if err := findCmd.Wait(); err != nil {
+                return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", findCmd.Args, stderr.String(), err)
+        }
+        return counter.bytesWritten, nil
+}
+
+// return lun id (-1 if error/not found, >= 0 otherwise) and a map of all other attribute/values
+func ParseLsattrOutputForHdisk(output []byte) (int, map[string]string) {
+        mp := make(map[string]string)
+        lunId := -1
+	if len(output) == 0 {
+		return lunId, mp
+	}
+        scanner := bufio.NewScanner(strings.NewReader(string(output)))
+        for scanner.Scan() {
+                attrs := strings.Fields(scanner.Text())
+                if len(attrs) > 1 {
+                        mp[attrs[0]] = attrs[1]
+                }
+        }
+        if val, ok := mp["lun_id"]; ok {
+                rawId, err := strconv.ParseUint(val, 0, 64)
+                if err != nil {
+                        return -1, mp
+                } else if rawId == 0 {
+                        lunId = 0
+                } else if rawId >> 48 <= 0xff {
+                        // short lun id format
+                         lunId = int(rawId >> 48)
+                } // TODO: handle other lun formats for lun id > 0xFF
+        }
+        return lunId, mp 
+}
+
+// Simple io.Writer implementation that counts how many bytes were written.
+type byteCounter struct{ bytesWritten int64 }
+
+func (b *byteCounter) Write(p []byte) (int, error) {
+	b.bytesWritten += int64(len(p))
+	return len(p), nil
+}
diff --git a/pkg/volume/util/volumepathhandler/volume_path_handler_aix.go b/pkg/volume/util/volumepathhandler/volume_path_handler_aix.go
new file mode 100644
index 0000000..93344ca
--- /dev/null
+++ b/pkg/volume/util/volumepathhandler/volume_path_handler_aix.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volumepathhandler
+
+import (
+	"github.com/golang/glog"
+)
+
+// AttachFileDevice takes a path to a regular file and makes it available as an
+// attached block device.
+func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
+	glog.Infof("AttachFileDevice %s", path)
+	return path, nil
+}
+
+// GetLoopDevice returns the full path to the loop device associated with the given path.
+func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
+	glog.Infof("GetLoopDevice %s", path)
+	return path, nil
+}
+
+// RemoveLoopDevice removes specified loopback device
+func (v VolumePathHandler) RemoveLoopDevice(device string) error {
+	glog.Infof("RemoveLoopDevice %s", device)
+	return nil
+}
diff --git a/pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go b/pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go
index 266398b..ba4d1de 100644
--- a/pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go
+++ b/pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux
+// +build !aix,!linux
 
 /*
 Copyright 2018 The Kubernetes Authors.
diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
index bab5737..bd88b11 100644
--- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
+++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build darwin dragonfly freebsd linux netbsd opensbd solaris
+// +build aix darwin dragonfly freebsd linux netbsd opensbd solaris
 
 package invoke
 
diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go
index 89de892..93d17e1 100644
--- a/vendor/github.com/docker/docker/client/client_unix.go
+++ b/vendor/github.com/docker/docker/client/client_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris openbsd darwin
+// +build linux freebsd solaris openbsd darwin aix
 
 package client
 
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go
index af79a65..dcbb816 100644
--- a/vendor/github.com/docker/docker/pkg/system/mknod.go
+++ b/vendor/github.com/docker/docker/pkg/system/mknod.go
@@ -1,4 +1,4 @@
-// +build !windows
+// +build !windows,!aix
 
 package system
 
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_aix.go b/vendor/github.com/docker/docker/pkg/system/stat_aix.go
new file mode 100644
index 0000000..a4823dd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_aix.go
@@ -0,0 +1,20 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+	mtim := syscall.Timespec{Sec: syscall.Timespec_sec_t(s.Mtim.Sec), Nsec: syscall.Timespec_nsec_t(s.Mtim.Nsec)}
+	return &StatT{size: s.Size,
+		mode: uint32(s.Mode),
+		uid:  s.Uid,
+		gid:  s.Gid,
+		rdev: uint64(s.Rdev),
+		mtim: mtim}, nil
+}
+
+// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
+// This is exposed on Linux as pkg/archive/changes uses it.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go
index 5a10eda..b44601f 100644
--- a/vendor/github.com/docker/docker/pkg/system/umask.go
+++ b/vendor/github.com/docker/docker/pkg/system/umask.go
@@ -1,4 +1,4 @@
-// +build !windows
+// +build !windows,!aix
 
 package system
 
diff --git a/vendor/github.com/docker/docker/pkg/term/tc.go b/vendor/github.com/docker/docker/pkg/term/tc.go
index 6d2dfd3..12fe4c9 100644
--- a/vendor/github.com/docker/docker/pkg/term/tc.go
+++ b/vendor/github.com/docker/docker/pkg/term/tc.go
@@ -1,4 +1,5 @@
 // +build !windows
+// +build !aix
 // +build !solaris !cgo
 
 package term
diff --git a/vendor/github.com/docker/docker/pkg/term/tc_aix_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_aix_cgo.go
new file mode 100644
index 0000000..3531415
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc_aix_cgo.go
@@ -0,0 +1,53 @@
+// +build aix
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// #include <termios.h>
+import "C"
+
+// Termios is the Unix API for terminal I/O.
+// It is passthgrouh for syscall.Termios in order to make it portable with
+// other platforms where it is not available or handled differently.
+type Termios syscall.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	var oldState State
+	if err := tcget(fd, &oldState.termios); err != 0 {
+		return nil, err
+	}
+
+	newState := oldState.termios
+
+	C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState)))
+	newState.Cc[C.VMIN] = 1
+	newState.Cc[C.VTIME] = 0
+
+	if err := tcset(fd, &newState); err != 0 {
+		return nil, err
+	}
+	return &oldState, nil
+}
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+	ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
+	if ret != 0 {
+		return err.(syscall.Errno)
+	}
+	return 0
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+	ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
+	if ret != 0 {
+		return err.(syscall.Errno)
+	}
+	return 0
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go
index 4f59d8d..816f8d7 100644
--- a/vendor/github.com/docker/docker/pkg/term/term.go
+++ b/vendor/github.com/docker/docker/pkg/term/term.go
@@ -10,8 +10,7 @@ import (
 	"io"
 	"os"
 	"os/signal"
-
-	"golang.org/x/sys/unix"
+	"syscall"
 )
 
 var (
@@ -80,7 +79,7 @@ func SaveState(fd uintptr) (*State, error) {
 // descriptor, with echo disabled.
 func DisableEcho(fd uintptr, state *State) error {
 	newState := state.termios
-	newState.Lflag &^= unix.ECHO
+	newState.Lflag &^= syscall.ECHO
 
 	if err := tcset(fd, &newState); err != 0 {
 		return err
diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go
index f58367f..736da69 100644
--- a/vendor/github.com/docker/docker/pkg/term/winsize.go
+++ b/vendor/github.com/docker/docker/pkg/term/winsize.go
@@ -1,4 +1,4 @@
-// +build !solaris,!windows
+// +build !solaris,!windows,!aix
 
 package term
 
diff --git a/vendor/github.com/docker/docker/pkg/term/winsize_aix_cgo.go b/vendor/github.com/docker/docker/pkg/term/winsize_aix_cgo.go
new file mode 100644
index 0000000..de6954a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/winsize_aix_cgo.go
@@ -0,0 +1,41 @@
+// +build aix,cgo
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+/*
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <termios.h>
+
+// Small wrapper to get rid of variadic args of ioctl()
+int my_ioctl(int fd, u_int cmd, struct winsize *ws) {
+	return ioctl(fd, cmd, ws);
+}
+*/
+import "C"
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+	ws := &Winsize{}
+	ret, err := C.my_ioctl(C.int(fd), C.u_int(syscall.TIOCGWINSZ & 0xffffffff), (*C.struct_winsize)(unsafe.Pointer(ws)))
+	// Skip retval = 0
+	if ret == 0 {
+		return ws, nil
+	}
+	return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+	ret, err := C.my_ioctl(C.int(fd), C.u_int(syscall.TIOCSWINSZ & 0xffffffff), (*C.struct_winsize)(unsafe.Pointer(ws)))
+	// Skip retval = 0
+	if ret == 0 {
+		return nil
+	}
+	return err
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/aix.go b/vendor/github.com/fsnotify/fsnotify/aix.go
new file mode 100644
index 0000000..2f2296c
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/aix.go
@@ -0,0 +1,437 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix 
+
+package fsnotify
+
+import (
+	"os"
+	"path/filepath"
+	"io/ioutil"
+	"time"
+	"fmt"
+	"sync"
+	"strings"
+        "errors"
+)
+
+const (
+        sleepTime time.Duration = 1000000000
+)
+
+var (
+	ErrWatchedFileDeleted = errors.New("error: watched file or folder deleted")
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events   chan Event
+	Errors   chan error
+	mu       *sync.Mutex     // Map access
+        closed chan struct{}     // Channel to respond to Close
+        close chan struct{}
+        wg *sync.WaitGroup
+        running      bool
+	names        map[string]bool        // bool for recursive or not.
+	files        map[string]os.FileInfo // map of files.
+	ignored      map[string]struct{}    // ignored files or directories.
+        ops map[Op]struct{} // Op filtering.
+        maxEvents int
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+        // Set up the WaitGroup for w.Wait().
+	var wg sync.WaitGroup
+        wg.Add(1)
+
+	w := &Watcher{
+                Events:  make(chan Event),
+		Errors:  make(chan error),
+		closed:  make(chan struct{}),
+		close:   make(chan struct{}),
+		mu:      new(sync.Mutex),
+                wg:      &wg,
+		files:   make(map[string]os.FileInfo),
+		ignored: make(map[string]struct{}),
+                names: make(map[string]bool),
+	}
+	go w.readEvents()
+	return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+        w.mu.Lock()
+	if !w.running {
+		w.mu.Unlock()
+		return nil
+	}
+	w.running = false
+	w.files = make(map[string]os.FileInfo)
+	w.names = make(map[string]bool)
+	w.mu.Unlock()
+	// Send a close signal to the Start method.
+        w.close <- struct{}{}
+        return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+        w.mu.Lock()
+	defer w.mu.Unlock()
+
+	name, err := filepath.Abs(name)
+	if err != nil {
+		return err
+	}
+
+	// If name is on the ignored list or if hidden files are
+	// ignored and name is a hidden file or directory, simply return.
+	_, ignored := w.ignored[name]
+	if ignored || (strings.HasPrefix(name, ".")) {
+		return nil
+	}
+
+	// Add the directory's contents to the files list.
+	fileList, err := w.list(name)
+	if err != nil {
+		return err
+	}
+	for k, v := range fileList {
+		w.files[k] = v
+	}
+
+	// Add the name to the names list.
+	w.names[name] = false
+
+        return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+        w.mu.Lock()
+	defer w.mu.Unlock()
+
+	name, err := filepath.Abs(name)
+	if err != nil {
+		return err
+	}
+
+	// Remove the name from w's names list.
+	delete(w.names, name)
+
+	// If name is a single file, remove it and return.
+	info, found := w.files[name]
+	if !found {
+		return nil // Doesn't exist, just return.
+	}
+	if !info.IsDir() {
+		delete(w.files, name)
+		return nil
+	}
+
+	// Delete the actual directory from w.files
+	delete(w.files, name)
+
+	// If it's a directory, delete all of it's contents from w.files.
+	for path := range w.files {
+		if filepath.Dir(path) == name {
+			delete(w.files, path)
+		}
+	}
+        return nil
+}
+
+func (w *Watcher) list(name string) (map[string]os.FileInfo, error) {
+	fileList := make(map[string]os.FileInfo)
+
+	// Make sure name exists.
+	stat, err := os.Stat(name)
+	if err != nil {
+		return nil, err
+	}
+
+	fileList[name] = stat
+
+	// If it's not a directory, just return.
+	if !stat.IsDir() {
+		return fileList, nil
+	}
+
+	// It's a directory.
+	fInfoList, err := ioutil.ReadDir(name)
+	if err != nil {
+		return nil, err
+	}
+	// Add all of the files in the directory to the file list as long
+	// as they aren't on the ignored list or are hidden files if ignoreHidden
+	// is set to true.
+	for _, fInfo := range fInfoList {
+		path := filepath.Join(name, fInfo.Name())
+		_, ignored := w.ignored[path]
+		if ignored || (strings.HasPrefix(fInfo.Name(), ".")) {
+			continue
+		}
+		fileList[path] = fInfo
+	}
+	return fileList, nil
+}
+
+func (w *Watcher) retrieveFileList() map[string]os.FileInfo {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	fileList := make(map[string]os.FileInfo)
+
+	var list map[string]os.FileInfo
+	var err error
+
+	for name, recursive := range w.names {
+		if recursive {
+			list, err = w.listRecursive(name)
+			if err != nil {
+				if os.IsNotExist(err) {
+					w.Errors <- ErrWatchedFileDeleted
+					w.mu.Unlock()
+					w.removeRecursive(name)
+					w.mu.Lock()
+				} else {
+					w.Errors <- err
+				}
+			}
+		} else {
+			list, err = w.list(name)
+			if err != nil {
+				if os.IsNotExist(err) {
+					w.Errors <- ErrWatchedFileDeleted
+					w.mu.Unlock()
+					w.Remove(name)
+					w.mu.Lock()
+				} else {
+					w.Errors <- err
+				}
+			}
+		}
+		// Add the file's to the file list.
+		for k, v := range list {
+			fileList[k] = v
+		}
+	}
+
+	return fileList
+}
+
+func (w *Watcher) readEvents() {
+	// Make sure the Watcher is not already running.
+	w.mu.Lock()
+	if w.running {
+		w.mu.Unlock()
+		return
+	}
+	w.running = true
+	w.mu.Unlock()
+
+	// Unblock w.Wait().
+	w.wg.Done()
+
+	for {
+		// done lets the inner polling cycle loop know when the
+		// current cycle's method has finished executing.
+		done := make(chan struct{})
+
+		// Any events that are found are first piped to evt before
+		// being sent to the main Event channel.
+		evt := make(chan Event)
+
+		// Retrieve the file list for all watched file's and dirs.
+		fileList := w.retrieveFileList()
+
+		// cancel can be used to cancel the current event polling function.
+		cancel := make(chan struct{})
+
+		// Look for events.
+		go func() {
+			w.pollEvents(fileList, evt, cancel)
+			done <- struct{}{}
+		}()
+
+		// numEvents holds the number of events for the current cycle.
+                numEvents := 0
+inner:
+		for {
+			select {
+			case <-w.close:
+				close(cancel)
+				close(w.closed)
+				return
+			case event := <-evt:
+				if len(w.ops) > 0 { // Filter Ops.
+					_, found := w.ops[event.Op]
+					if !found {
+						continue
+					}
+				}
+				numEvents++
+				if w.maxEvents > 0 && numEvents > w.maxEvents {
+					close(cancel)
+					break inner
+				}
+				w.Events <- event
+			case <-done: // Current cycle is finished.
+				break inner
+			}
+		}
+
+		// Update the file's list.
+		w.mu.Lock()
+		w.files = fileList
+		w.mu.Unlock()
+
+		// Sleep and then continue to the next loop iteration.
+		time.Sleep(sleepTime)
+	}
+}
+
+func (w *Watcher) pollEvents(files map[string]os.FileInfo, evt chan Event,
+	cancel chan struct{}) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	// Store create and remove events for use to check for rename events.
+	creates := make(map[string]os.FileInfo)
+	removes := make(map[string]os.FileInfo)
+
+	// Check for removed files.
+	for path, info := range w.files {
+		if _, found := files[path]; !found {
+			removes[path] = info
+		}
+	}
+
+	// Check for created files, writes and chmods.
+	for path, info := range files {
+		oldInfo, found := w.files[path]
+		if !found {
+			// A file was created.
+			creates[path] = info
+			continue
+		}
+		if oldInfo.ModTime() != info.ModTime() {
+			select {
+			case <-cancel:
+				return
+			case evt <- Event{path, Write}:
+			}
+		}
+		if oldInfo.Mode() != info.Mode() {
+			select {
+			case <-cancel:
+				return
+			case evt <- Event{path, Chmod}:
+			}
+		}
+	}
+
+	// Check for renames and moves.
+	for path1, info1 := range removes {
+		for path2, info2 := range creates {
+			if os.SameFile(info1, info2) {
+				e := Event{
+					Op:       Rename, // was "Moved"
+					Name:     fmt.Sprintf("%s -> %s", path1, path2),
+					//FileInfo: info1,
+				}
+				// If they are from the same directory, it's a rename
+				// instead of a move event.
+				if filepath.Dir(path1) == filepath.Dir(path2) {
+					e.Op = Rename
+				}
+
+				delete(removes, path1)
+				delete(creates, path2)
+
+				select {
+				case <-cancel:
+					return
+				case evt <- e:
+				}
+			}
+		}
+	}
+
+	// Send all the remaining create and remove events.
+	for path,_ := range creates {
+		select {
+		case <-cancel:
+			return
+		case evt <- Event{path, Create}:
+		}
+	}
+	for path,_ := range removes {
+		select {
+		case <-cancel:
+			return
+		case evt <- Event{path, Remove}:
+		}
+	}
+}
+
+// Remove removes either a single file or a directory recursively from
+// the file's list.
+func (w *Watcher) removeRecursive(name string) (err error) {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+
+	name, err = filepath.Abs(name)
+	if err != nil {
+		return err
+	}
+
+	// Remove the name from w's names list.
+	delete(w.names, name)
+
+	// If name is a single file, remove it and return.
+	info, found := w.files[name]
+	if !found {
+		return nil // Doesn't exist, just return.
+	}
+	if !info.IsDir() {
+		delete(w.files, name)
+		return nil
+	}
+
+	// If it's a directory, delete all of it's contents recursively
+	// from w.files.
+	for path := range w.files {
+		if strings.HasPrefix(path, name) {
+			delete(w.files, path)
+		}
+	}
+	return nil
+}
+
+func (w *Watcher) listRecursive(name string) (map[string]os.FileInfo, error) {
+	fileList := make(map[string]os.FileInfo)
+
+	return fileList, filepath.Walk(name, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+		// If path is ignored and it's a directory, skip the directory. If it's
+		// ignored and it's a single file, skip the file.
+		_, ignored := w.ignored[path]
+		if ignored || (strings.HasPrefix(info.Name(), ".")) {
+			if info.IsDir() {
+				return filepath.SkipDir
+			}
+			return nil
+		}
+		// Add the path and it's info to the file list.
+		fileList[path] = info
+		return nil
+	})
+}
+
diff --git a/vendor/github.com/godbus/dbus/transport_aix.go b/vendor/github.com/godbus/dbus/transport_aix.go
new file mode 100644
index 0000000..2095ed2
--- /dev/null
+++ b/vendor/github.com/godbus/dbus/transport_aix.go
@@ -0,0 +1,8 @@
+// This is a copy of transport_darwin.go
+
+package dbus
+
+func (t *unixTransport) SendNullByte() error {
+	_, err := t.Write([]byte{0})
+	return err
+}
diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go
index 3e63fff..57de02b 100644
--- a/vendor/github.com/golang/glog/glog.go
+++ b/vendor/github.com/golang/glog/glog.go
@@ -78,6 +78,7 @@ import (
 	"fmt"
 	"io"
 	stdLog "log"
+	"log/syslog"
 	"os"
 	"path/filepath"
 	"runtime"
@@ -398,6 +399,7 @@ type flushSyncWriter interface {
 func init() {
 	flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
 	flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
+	flag.StringVar(&logging.toSyslog, "logtosyslog", "", "log to syslog with this tag instead of files")
 	flag.Var(&logging.verbosity, "v", "log level for V logs")
 	flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
 	flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
@@ -426,6 +428,8 @@ type loggingT struct {
 	// Level flag. Handled atomically.
 	stderrThreshold severity // The -stderrthreshold flag.
 
+	toSyslog     string // The -logtosyslog flag.
+
 	// freeList is a list of byte buffers, maintained under freeListMu.
 	freeList *buffer
 	// freeListMu maintains the free list. It is separate from the main mutex
@@ -453,6 +457,8 @@ type loggingT struct {
 	// safely using atomic.LoadInt32.
 	vmodule   moduleSpec // The state of the -vmodule flag.
 	verbosity Level      // V logging level, the value of the -v flag/
+
+	sysw      *syslog.Writer
 }
 
 // buffer holds a byte Buffer for reuse. The zero value is ready for use.
@@ -678,6 +684,27 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo
 	data := buf.Bytes()
 	if l.toStderr {
 		os.Stderr.Write(data)
+	} else if l.toSyslog != "" {
+		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+			os.Stderr.Write(data)
+		}
+		if l.sysw == nil {
+			var err error
+			l.sysw, err = syslog.New(syslog.LOG_NOTICE | syslog.LOG_LOCAL0, l.toSyslog)
+			if err != nil {
+				return
+			}
+		}
+		switch s {
+		case fatalLog:
+			l.sysw.Crit(string(data))
+		case errorLog:
+			l.sysw.Err(string(data))
+		case warningLog:
+			l.sysw.Warning(string(data))
+		case infoLog:
+			l.sysw.Info(string(data))
+		}
 	} else {
 		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
 			os.Stderr.Write(data)
diff --git a/vendor/github.com/google/certificate-transparency/go/x509/root_unix.go b/vendor/github.com/google/certificate-transparency/go/x509/root_unix.go
index 324f855..c8427e0 100755
--- a/vendor/github.com/google/certificate-transparency/go/x509/root_unix.go
+++ b/vendor/github.com/google/certificate-transparency/go/x509/root_unix.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build dragonfly freebsd linux openbsd netbsd
+// +build aix dragonfly freebsd linux openbsd netbsd
 
 package x509
 
@@ -15,6 +15,7 @@ var certFiles = []string{
 	"/etc/ssl/ca-bundle.pem",                 // OpenSUSE
 	"/etc/ssl/cert.pem",                      // OpenBSD
 	"/usr/local/share/certs/ca-root-nss.crt", // FreeBSD/DragonFly
+	"/var/ssl/certs/ca-bundle.crt",           // AIX
 }
 
 func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
diff --git a/vendor/github.com/kardianos/osext/osext_procfs.go b/vendor/github.com/kardianos/osext/osext_procfs.go
index b2598bc..05ee1bb 100644
--- a/vendor/github.com/kardianos/osext/osext_procfs.go
+++ b/vendor/github.com/kardianos/osext/osext_procfs.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build linux netbsd openbsd solaris dragonfly
+// +build linux netbsd openbsd solaris dragonfly aix
 
 package osext
 
diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go
index ab6ecde..95e7922 100644
--- a/vendor/github.com/pkg/sftp/attrs_unix.go
+++ b/vendor/github.com/pkg/sftp/attrs_unix.go
@@ -1,4 +1,4 @@
-// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris
+// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix
 // +build cgo
 
 package sftp
diff --git a/vendor/github.com/pkg/sftp/server_unix.go b/vendor/github.com/pkg/sftp/server_unix.go
index 8c3f0b4..169eee1 100644
--- a/vendor/github.com/pkg/sftp/server_unix.go
+++ b/vendor/github.com/pkg/sftp/server_unix.go
@@ -1,4 +1,4 @@
-// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris
+// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix
 // +build cgo
 
 package sftp
diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go
index 5728243..63d33b1 100644
--- a/vendor/github.com/spf13/afero/const_bsds.go
+++ b/vendor/github.com/spf13/afero/const_bsds.go
@@ -11,7 +11,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// +build darwin openbsd freebsd netbsd dragonfly
+// +build darwin openbsd freebsd netbsd dragonfly aix
 
 package afero
 
diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go
index 968fc27..2b850e4 100644
--- a/vendor/github.com/spf13/afero/const_win_unix.go
+++ b/vendor/github.com/spf13/afero/const_win_unix.go
@@ -15,6 +15,7 @@
 // +build !freebsd
 // +build !dragonfly
 // +build !netbsd
+// +build !aix
 
 package afero
 
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go
new file mode 100644
index 0000000..69d737e
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go
@@ -0,0 +1,50 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package terminal
+
+import (
+	"syscall"
+)
+
+// State represents the state of the terminal.
+type State struct {
+	termios syscall.Termios
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+	var termios syscall.Termios
+	err := syscall.Tcgetattr(fd, &termios)
+	return err == nil
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+	var oldState State
+	if err := syscall.Tcgetattr(fd, &oldState.termios); err != nil {
+		return nil, err
+	}
+
+	newState := oldState.termios
+	newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
+	newState.Oflag &^= syscall.OPOST
+	newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
+	newState.Cflag &^= syscall.CSIZE | syscall.PARENB
+	newState.Cflag |= syscall.CS8
+
+	if err := syscall.Tcsetattr(fd, syscall.TCSANOW, &newState); err != nil {
+		return nil, err
+	}
+	return &oldState, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, oldState *State) error {
+	return syscall.Tcsetattr(fd, syscall.TCSANOW, &oldState.termios)
+}
