Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

support for unorchestrated rootfull Podman using OCI hook #1874

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 32 additions & 1 deletion Dockerfile.init
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,29 @@
# Copyright 2021 Authors of KubeArmor

### Make compiler image

FROM golang:1.22-alpine3.20 AS builder
RUN apk --no-cache update
RUN apk add --no-cache git clang llvm make gcc protobuf
RUN apk add --no-cache linux-headers pkgconfig
RUN apk add --no-cache gpgme-dev
RUN apk add --no-cache btrfs-progs-dev
ARG GOARCH
ARG GOOS

WORKDIR /KubeArmor

COPY . .
WORKDIR /KubeArmor/KubeArmor

RUN go mod download

WORKDIR /KubeArmor/KubeArmor/deployHook
RUN CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GO111MODULE=on go build -o deployHook .

WORKDIR /KubeArmor/KubeArmor/hook
RUN CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GO111MODULE=on go build -tags 'containers_image_openpgp' -o hook .

FROM redhat/ubi9-minimal as kubearmor-init

ARG VERSION=latest
Expand Down Expand Up @@ -34,7 +57,15 @@ RUN groupadd --gid 1000 default \
COPY LICENSE /licenses/license.txt
COPY ./KubeArmor/BPF /KubeArmor/BPF/
COPY ./KubeArmor/build/compile.sh /KubeArmor/compile.sh
COPY --from=builder /KubeArmor/KubeArmor/hook/hook /hook
COPY --from=builder /KubeArmor/KubeArmor/deployHook/deployHook /KubeArmor/deployHook

# Copy the custom entrypoint script
COPY ./KubeArmor/build/entrypoint.sh /KubeArmor/entrypoint.sh
RUN chmod +x /KubeArmor/entrypoint.sh

RUN chown -R default:default /KubeArmor

USER 1000
ENTRYPOINT ["/KubeArmor/compile.sh"]

ENTRYPOINT ["/KubeArmor/entrypoint.sh"]
6 changes: 6 additions & 0 deletions KubeArmor/build/entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash
set -e

/KubeArmor/compile.sh

/KubeArmor/deployHook
5 changes: 4 additions & 1 deletion KubeArmor/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ func IsK8sEnv() bool {
}

// ContainerRuntimeSocketKeys contains FIFO ordered keys of container runtimes
var ContainerRuntimeSocketKeys = []string{"docker", "containerd", "cri-o"}
var ContainerRuntimeSocketKeys = []string{"docker", "containerd", "cri-o","podman"}

// ContainerRuntimeSocketMap Structure
var ContainerRuntimeSocketMap = map[string][]string{
Expand All @@ -432,6 +432,9 @@ var ContainerRuntimeSocketMap = map[string][]string{
"/var/run/crio/crio.sock",
"/run/crio/crio.sock",
},
"podman":{
"/run/podman/podman.sock",
},
}

// GetCRISocket Function
Expand Down
3 changes: 2 additions & 1 deletion KubeArmor/core/dockerHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (

"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"

"github.com/kubearmor/KubeArmor/KubeArmor/common"
Expand Down Expand Up @@ -266,7 +267,7 @@ func (dm *KubeArmorDaemon) GetAlreadyDeployedDockerContainers() {
}
}

if containerList, err := Docker.DockerClient.ContainerList(context.Background(), types.ContainerListOptions{}); err == nil {
if containerList, err := Docker.DockerClient.ContainerList(context.Background(), container.ListOptions{}); err == nil {
for _, dcontainer := range containerList {
// get container information from docker client
container, err := Docker.GetContainerInfo(dcontainer.ID, dm.OwnerInfo)
Expand Down
245 changes: 245 additions & 0 deletions KubeArmor/core/hook_handler.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,245 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2022 Authors of KubeArmor

package core

import (
"encoding/json"
"errors"
"io"
"log"
"net"
"os"
"path/filepath"
"sync/atomic"

kl "github.com/kubearmor/KubeArmor/KubeArmor/common"
cfg "github.com/kubearmor/KubeArmor/KubeArmor/config"
"github.com/kubearmor/KubeArmor/KubeArmor/types"
)

const kubearmorDir = "/var/run/kubearmor"

// ContainerEngineHandler defines the interface that any container engine must implement if supports OCI hook
type ContainerEngineHandler interface {
HandleCreateContainer(container types.Container)
HandleDeleteContainer(containerID string)
}

type PodmanHandler struct {
daemon *KubeArmorDaemon
}
func NewPodmanHandler(dm *KubeArmorDaemon) *PodmanHandler {
return &PodmanHandler{daemon: dm}
}
func (p *PodmanHandler) HandleCreateContainer(container types.Container) {
p.daemon.UpdatePodmanContainer(container.ContainerID, container, "create")
}
func (p *PodmanHandler) HandleDeleteContainer(containerID string) {
p.daemon.UpdatePodmanContainer(containerID, p.daemon.Containers[containerID], "destroy")
}


type CRIOHandler struct {
daemon *KubeArmorDaemon
}
func NewCRIOHandler(dm *KubeArmorDaemon) *CRIOHandler {
return &CRIOHandler{daemon: dm}
}
func (c *CRIOHandler) HandleCreateContainer(container types.Container) {
c.daemon.handleContainerCreate(container)
}
func (c *CRIOHandler) HandleDeleteContainer(containerID string) {
c.daemon.handleContainerDelete(containerID)
}

// ListenToHook starts listening on a UNIX socket and waits for container hooks
// to pass new containers
func (dm *KubeArmorDaemon) ListenToHook() {
if err := os.MkdirAll(kubearmorDir, 0750); err != nil {
log.Fatal(err)
}

listenPath := filepath.Join(kubearmorDir, "ka.sock")
err := os.Remove(listenPath) // in case kubearmor crashed and the socket wasn't removed (cleaning the socket file if got crashed)
if err != nil && !errors.Is(err, os.ErrNotExist) {
log.Fatal(err)
}

socket, err := net.Listen("unix", listenPath)
if err != nil {
log.Fatal(err)
}

defer socket.Close()
defer os.Remove(listenPath)
ready := &atomic.Bool{}

for {
conn, err := socket.Accept()
if err != nil {
log.Fatal(err)
}

go dm.handleConn(conn, ready)
}

}

// handleConn gets container details from container hooks.
func (dm *KubeArmorDaemon) handleConn(conn net.Conn, ready *atomic.Bool) {
// We need to makes sure that no containers accepted until all containers created before KubeArmor
// are sent first. This is done mainly to avoid race conditions between hooks sending in
// data that some containers were deleted only for process responsible for sending previous containers
// to send that these containers are created. Which will leave KubeArmor in an incorrect state.
defer conn.Close()
buf := make([]byte, 4096)

for {
n, err := conn.Read(buf)
if err == io.EOF {
return
}
if err != nil {
log.Fatal(err)
}

data := types.HookRequest{}

err = json.Unmarshal(buf[:n], &data)
if err != nil {
log.Fatal(err)
}

if data.Detached {
// we want KubeArmor to start accepting containers after
// all previous container are set
defer ready.Store(true)
} else if !ready.Load() {
_, err = conn.Write([]byte("err"))
if err == io.EOF {
return
} else if err != nil {
log.Println(err)
return
}
continue
}
_, err = conn.Write([]byte("ok"))
if err == io.EOF {
return
} else if err != nil {
log.Println(err)
return
}

containerLabels,_ := kl.GetLabelsFromString(data.Container.Labels)
// Determine which engine is being used (Podman or CRI-O for now support OCI hooks)
var handler ContainerEngineHandler
if containerLabels["containerType"] == "podman" {
handler = NewPodmanHandler(dm)
} else {
handler = NewCRIOHandler(dm)
}

// Handle the container create or delete event
if data.Operation == types.HookContainerCreate {
handler.HandleCreateContainer(data.Container)
} else {
handler.HandleDeleteContainer(data.Container.ContainerID)
}

}
}
func (dm *KubeArmorDaemon) handleContainerCreate(container types.Container) {
endpoint := types.EndPoint{}

dm.Logger.Printf("added %s", container.ContainerID)

dm.ContainersLock.Lock()
defer dm.ContainersLock.Unlock()
if _, ok := dm.Containers[container.ContainerID]; !ok {
dm.Containers[container.ContainerID] = container
} else if dm.Containers[container.ContainerID].PidNS == 0 && dm.Containers[container.ContainerID].MntNS == 0 {
c := dm.Containers[container.ContainerID]
c.MntNS = container.MntNS
c.PidNS = container.PidNS
c.AppArmorProfile = container.AppArmorProfile
dm.Containers[c.ContainerID] = c

dm.EndPointsLock.Lock()
for idx, endPoint := range dm.EndPoints {
if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) {

// update apparmor profiles
if !kl.ContainsElement(endPoint.AppArmorProfiles, container.AppArmorProfile) {
dm.EndPoints[idx].AppArmorProfiles = append(dm.EndPoints[idx].AppArmorProfiles, container.AppArmorProfile)
}

if container.Privileged && dm.EndPoints[idx].PrivilegedContainers != nil {
dm.EndPoints[idx].PrivilegedContainers[container.ContainerName] = struct{}{}
}

endpoint = dm.EndPoints[idx]

break
}
}
dm.EndPointsLock.Unlock()
}

if len(dm.OwnerInfo) > 0 {
container.Owner = dm.OwnerInfo[container.EndPointName]
}

if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy {
dm.SystemMonitor.AddContainerIDToNsMap(container.ContainerID, container.NamespaceName, container.PidNS, container.MntNS)
dm.RuntimeEnforcer.RegisterContainer(container.ContainerID, container.PidNS, container.MntNS)

if len(endpoint.SecurityPolicies) > 0 { // struct can be empty or no policies registered for the endpoint yet
dm.Logger.UpdateSecurityPolicies("ADDED", endpoint)
if dm.RuntimeEnforcer != nil && endpoint.PolicyEnabled == types.KubeArmorPolicyEnabled {
// enforce security policies
dm.RuntimeEnforcer.UpdateSecurityPolicies(endpoint)
}
}
}
}
func (dm *KubeArmorDaemon) handleContainerDelete(containerID string) {
dm.ContainersLock.Lock()
container, ok := dm.Containers[containerID]
dm.Logger.Printf("deleted %s", containerID)
if !ok {
dm.ContainersLock.Unlock()
return
}
delete(dm.Containers, containerID)
dm.ContainersLock.Unlock()

dm.EndPointsLock.Lock()
for idx, endPoint := range dm.EndPoints {
if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) {

// update apparmor profiles
for idxA, profile := range endPoint.AppArmorProfiles {
if profile == container.AppArmorProfile {
dm.EndPoints[idx].AppArmorProfiles = append(dm.EndPoints[idx].AppArmorProfiles[:idxA], dm.EndPoints[idx].AppArmorProfiles[idxA+1:]...)
break
}
}

break
}
}
dm.EndPointsLock.Unlock()

if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy {
// update NsMap
dm.SystemMonitor.DeleteContainerIDFromNsMap(containerID, container.NamespaceName, container.PidNS, container.MntNS)
dm.RuntimeEnforcer.UnregisterContainer(containerID)
}

}



2 changes: 2 additions & 0 deletions KubeArmor/core/kubeArmor.go
Original file line number Diff line number Diff line change
Expand Up @@ -591,6 +591,8 @@ func KubeArmor() {
} else if strings.Contains(cfg.GlobalCfg.CRISocket, "cri-o") {
// monitor crio events
go dm.MonitorCrioEvents()
} else if strings.Contains(cfg.GlobalCfg.CRISocket, "podman") {
go dm.ListenToHook()
} else {
dm.Logger.Warnf("Failed to monitor containers: %s is not a supported CRI socket.", cfg.GlobalCfg.CRISocket)
enableContainerPolicy = false
Expand Down
Loading
Loading