Skip to content

Commit

Permalink
Feature: Could not save docker images locally
Browse files Browse the repository at this point in the history
Solution: Create a new subcommand, "aleph container create"
  • Loading branch information
AmozPay committed Dec 1, 2022
1 parent 5c2256c commit 9c547ab
Show file tree
Hide file tree
Showing 7 changed files with 186 additions and 69 deletions.
100 changes: 64 additions & 36 deletions src/aleph_client/commands/container/cli_command.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,10 @@
from base64 import b32encode, b16decode
from typing import Optional, Dict, List

from aleph_message.models import StoreMessage
from aleph_message.models import (
StoreMessage,
ProgramMessage
)

from aleph_client import synchronous
from aleph_client.account import _load_account, AccountFromPrivateKey
Expand All @@ -30,7 +33,10 @@
yes_no_input
)

from .save import save_tar


from aleph_client.commands.container.save import save_tar
from aleph_client.commands.container.utils import create_container_volume

logger = logging.getLogger(__name__)
app = typer.Typer()
Expand All @@ -52,7 +58,7 @@ def upload_file(
else StorageEnum.storage
)
logger.debug("Uploading file")
result = synchronous.create_store(
result: StoreMessage = synchronous.create_store(
account=account,
file_content=file_content,
storage_engine=storage_engine,
Expand All @@ -65,19 +71,38 @@ def upload_file(
typer.echo(f"{json.dumps(result, indent=4)}")
return result

def MutuallyExclusiveBoolean():
marker = None
def callback(ctx: typer.Context, param: typer.CallbackParam, value: str):
# Add cli option to group if it was called with a value
nonlocal marker
if value is False:
return value
if marker is None:
marker = param.name
if param.name != marker:
raise typer.BadParameter(
f"{param.name} is mutually exclusive with {marker}")
return value
return callback

exclusivity_callback = MutuallyExclusiveBoolean()

@app.command()
def upload(
image: str = typer.Argument(..., help="Path to an image archive exported with docker save."),
path: str = typer.Argument(..., metavar="SCRIPT", help="A small script to start your container with parameters"),
from_remote: bool = typer.Option(False, "--from-remote", "-r", help=" If --from-remote, IMAGE is a registry to pull the image from. e.g: library/alpine, library/ubuntu:latest"),
from_daemon: bool = typer.Option(False, "--from-daemon", "-d", help=" If --from-daemon, IMAGE is an image in local docker deamon storage. You need docker installed for this command"),
from_remote: bool = typer.Option(False, "--from-remote", help=" If --from-remote, IMAGE is a registry to pull the image from. e.g: library/alpine, library/ubuntu:latest", callback=exclusivity_callback),
from_daemon: bool = typer.Option(False, "--from-daemon", help=" If --from-daemon, IMAGE is an image in local docker deamon storage. You need docker installed for this command", callback=exclusivity_callback),
from_created: bool = typer.Option(False, "--from-created", help=" If --from-created, IMAGE the path to a file created with 'aleph container create'", callback=exclusivity_callback),
channel: str = typer.Option(settings.DEFAULT_CHANNEL, help=help_strings.CHANNEL),
memory: int = typer.Option(settings.DEFAULT_VM_MEMORY, help="Maximum memory allocation on vm in MiB"),
vcpus: int = typer.Option(settings.DEFAULT_VM_VCPUS, help="Number of virtual cpus to allocate."),
timeout_seconds: float = typer.Option(settings.DEFAULT_VM_TIMEOUT, help="If vm is not called after [timeout_seconds] it will shutdown"),
private_key: Optional[str] = typer.Option(settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY),
private_key_file: Optional[Path] = typer.Option(settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE),
docker_mountpoint: Optional[Path] = typer.Option(settings.DEFAULT_DOCKER_VOLUME_MOUNTPOINT, "--docker-mountpoint", help="The path where the created docker image volume will be mounted"),
optimize: bool = typer.Option(True, help="Activate volume size optimization"),
print_messages: bool = typer.Option(False),
print_code_message: bool = typer.Option(False),
print_program_message: bool = typer.Option(False),
Expand All @@ -87,31 +112,11 @@ def upload(
Deploy a docker container on Aleph virtual machines.
Unless otherwise specified, you don't need docker on your machine to run this command.
"""
if from_remote or from_daemon:
raise NotImplementedError()
# echo(f"Downloading {image}")
# registry = Registry()
# tag = "latest"
# if ":" in image:
# l = image.split(":")
# tag = l[-1]
# image = l[0]
# print(tag)
# image_object = registry.pull_image(image, tag)
# manifest = registry.get_manifest_configuration(image, tag)
# image_archive = os.path.abspath(f"{str(uuid4())}.tar")
# image_object.write_filename(image_archive)
# image = image_archive
# print(manifest)
typer.echo("Preparing image for vm runtime")
docker_data_path = os.path.abspath("docker-data")
save_tar(image, docker_data_path, settings=settings.DOCKER_SETTINGS)
if not settings.CODE_USES_SQUASHFS:
typer.echo("The command mksquashfs must be installed!")
typer.Exit(2)
logger.debug("Creating squashfs archive...")
os.system(f"mksquashfs {docker_data_path} {docker_data_path}.squashfs -noappend")
docker_data_path = f"{docker_data_path}.squashfs"
docker_data_path=image
if not from_created:
docker_data_path = os.path.abspath("docker-data.squashfs")
create_container_volume(image, docker_data_path, from_remote, from_daemon, optimize, settings)
assert os.path.isfile(docker_data_path)
encoding = Encoding.squashfs
path = os.path.abspath(path)
Expand Down Expand Up @@ -140,16 +145,19 @@ def upload(
docker_upload_result: StoreMessage = upload_file(docker_data_path, account, channel, print_messages, print_code_message)
volumes.append({
"comment": "Docker container volume",
"mount": docker_mountpoint,
"ref": docker_upload_result["item_hash"],
"mount": str(docker_mountpoint),
"ref": str(docker_upload_result.item_hash),
"use_latest": True,
})

typer.echo(f"Docker image upload message address: {docker_upload_result.item_hash}")

program_result: StoreMessage = upload_file(path, account, channel, print_messages, print_code_message)

# Register the program
result = synchronous.create_program(
result: ProgramMessage = synchronous.create_program(
account=account,
program_ref=program_result["item_hash"],
program_ref=program_result.item_hash,
entrypoint=entrypoint,
runtime=settings.DEFAULT_DOCKER_RUNTIME_ID,
storage_engine=StorageEnum.storage,
Expand All @@ -161,25 +169,45 @@ def upload(
volumes=volumes,
subscriptions=subscriptions,
environment_variables={
"DOCKER_MOUNTPOINT": docker_mountpoint
"DOCKER_MOUNTPOINT": str(docker_mountpoint)
}
)
logger.debug("Upload finished")
if print_messages or print_program_message:
typer.echo(f"{json.dumps(result, indent=4)}")

hash: str = result["item_hash"]
hash: str = result.item_hash
hash_base32 = b32encode(b16decode(hash.upper())).strip(b"=").lower().decode()


typer.echo(
f"Your program has been uploaded on Aleph .\n\n"
"Available on:\n"
f" {settings.VM_URL_PATH.format(hash=hash)}\n"
f" {settings.VM_URL_HOST.format(hash_base32=hash_base32)}\n"
"Visualise on:\n https://explorer.aleph.im/address/"
f"{result['chain']}/{result['sender']}/message/PROGRAM/{hash}\n"
f"{result.chain}/{result.sender}/message/PROGRAM/{hash}\n"
)

finally:
# Prevent aiohttp unclosed connector warning
asyncio.get_event_loop().run_until_complete(get_fallback_session().close())

@app.command()
def create(
image: str = typer.Argument(..., help="Path to an image archive exported with docker save."),
output: str = typer.Argument(..., help="The path where you want "),
from_remote: bool = typer.Option(False, "--from-remote", help=" If --from-remote, IMAGE is a registry to pull the image from. e.g: library/alpine, library/ubuntu:latest", callback=exclusivity_callback),
from_daemon: bool = typer.Option(False, "--from-daemon", help=" If --from-daemon, IMAGE is an image in local docker deamon storage. You need docker installed for this command", callback=exclusivity_callback),
optimize: bool = typer.Option(True, help="Activate volume size optimization"),
):
"""
Use a docker image to create an Aleph compatible image on your local machine.
You can later upload it with 'aleph container upload --from-'
"""
try:
create_container_volume(image, output, from_remote, from_daemon, optimize, settings)
typer.echo(f"Container volume created at {output}")
except Exception as e:
print(e)
return
15 changes: 8 additions & 7 deletions src/aleph_client/commands/container/save.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import sys
from .image import Image
from .storage_drivers import create_storage_driver
from aleph_client.commands.container.image import Image
from aleph_client.commands.container.storage_drivers import create_storage_driver
import os
from shutil import rmtree
from .docker_conf import docker_settings, DockerSettings
from aleph_client.commands.container.docker_conf import docker_settings, DockerSettings

dirs = {
"vfs": 0o710,
Expand All @@ -12,27 +12,27 @@
"swarm": 0o700,
"runtimes": 0o700,
"network": 0o750,
"containers": 0o710,
"trust": 0o700,
"volumes": 0o701,
"buildkit": 0o711,
"containers": 0o710,
"tmp": 0o700,
"containerd": 0o711,
}


def populate_dir(output_path: str):
print("populating")
path = os.path.abspath(output_path)
if os.path.exists(output_path) and os.path.isdir(output_path):
try:
rmtree(output_path)
except:
raise "" #TODO: handle error
raise "" # TODO: handle error
os.makedirs(output_path, 0o710)
for d, mode in dirs.items():
os.makedirs(os.path.join(path, d), mode)


def save_tar(archive_path: str, output_path: str, settings: DockerSettings):
archive_path = os.path.abspath(archive_path)
output_path = os.path.abspath(output_path)
Expand All @@ -42,5 +42,6 @@ def save_tar(archive_path: str, output_path: str, settings: DockerSettings):
driver = create_storage_driver(image, output_path, settings)
driver.create_file_architecture()


if __name__ == "__main__":
save_tar(sys.argv[1], sys.argv[2], docker_settings)
save_tar(sys.argv[1], sys.argv[2], docker_settings)
57 changes: 40 additions & 17 deletions src/aleph_client/commands/container/storage_drivers.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import tarfile
from typing import Dict
from typing import Dict, List
from .image import Image
import os
import json
Expand All @@ -11,6 +11,7 @@
import gzip
from .docker_conf import DockerSettings, StorageDriverEnum


class IStorageDriver:
def create_file_architecture(self):
"""
Expand Down Expand Up @@ -70,6 +71,12 @@ def create_distribution(self, output_dir: str):
"""
raise NotImplementedError(f"You must implement this method")

def optimize(self, output_dir: str):
"""
Reproduce /var/lib/docker/image/{storage_driver}/disctibution
in output_dir based on an image object.
"""
raise NotImplementedError(f"You must implement this method")


# Since aleph vms can be running with an unknown host configuration,
Expand Down Expand Up @@ -104,13 +111,16 @@ def create_imagedb(self, output_dir: str):
os.makedirs(os.path.join(output_dir, "imagedb"), 0o700)
os.makedirs(os.path.join(output_dir, "imagedb", "content"), 0o700)
os.makedirs(os.path.join(output_dir, "imagedb", "metadata"), 0o700)
os.makedirs(os.path.join(output_dir, "imagedb", "content", "sha256"), 0o700)
os.makedirs(os.path.join(output_dir, "imagedb", "metadata", "sha256"), 0o700)
os.makedirs(os.path.join(output_dir, "imagedb",
"content", "sha256"), 0o700)
os.makedirs(os.path.join(output_dir, "imagedb",
"metadata", "sha256"), 0o700)
# os.makedirs(os.path.join(metadata, self.image.image_digest))
content = os.path.join(output_dir, "imagedb", "content", "sha256")
path = os.path.join(content, self.image.image_digest)
with open(path, "w") as f:
f.write(json.dumps(self.image.config, separators=(',', ':'))) # This file must be dumped compactly in order to keep the correct sha256 digest
# This file must be dumped compactly in order to keep the correct sha256 digest
f.write(json.dumps(self.image.config, separators=(',', ':')))
os.chmod(path, 0o0600)
# with open(os.path.join(metadata, self.image.image_digest, "parent"), "w") as f:
# f.write(self.image.config['config']['Image'])
Expand Down Expand Up @@ -148,10 +158,10 @@ def save_layer_metadata(path: str, diff: str, cacheid: str, size: int, previous_
fd.write(previous_chain_id)
os.chmod(dest, 0o600)


def copy_layer(src: str, dest: str) -> None:
for folder in os.listdir(src):
subprocess.check_output(["cp", "-r", os.path.join(src, folder), dest])
subprocess.check_output(
["cp", "-r", os.path.join(src, folder), dest])

def compute_layer_size(tar_data_json_path: str) -> int:
size = 0
Expand All @@ -160,12 +170,15 @@ def compute_layer_size(tar_data_json_path: str) -> int:
"["
+ archive.read().decode().replace("}\n{", "},\n{")
+ "]"
) # fixes poor formatting from tar-split
) # fixes poor formatting from tar-split
for elem in data:
if "size" in elem.keys():
size =+ elem["size"]
size = + elem["size"]
return size

def remove_unused_layers(layers_dir: str, keep: List[str]):
return

def extract_layer(path: str, archive_path: str, layerdb_subdir: str) -> int:
cwd = os.getcwd()
tmp_dir = tempfile.mkdtemp()
Expand All @@ -183,42 +196,47 @@ def extract_layer(path: str, archive_path: str, layerdb_subdir: str) -> int:
# Mandatory if one plans to export a docker image to a tar file
# https://github.com/vbatts/tar-split
if self.use_tarsplit:
tar_data_json = os.path.join(layerdb_subdir, "tar-split.json.gz")
os.system(f"tar-split disasm --output {tar_data_json} {tar_src} | tar -C . -x")
size = compute_layer_size(tar_data_json) # Differs from expected. Only messes with docker image size listing
tar_data_json = os.path.join(
layerdb_subdir, "tar-split.json.gz")
os.system(
f"tar-split disasm --output {tar_data_json} {tar_src} | tar -C . -x")
# Differs from expected. Only messes with docker image size listing
size = compute_layer_size(tar_data_json)
os.remove(tar_src)

# Also works, but won't be able to export images
else:
with tarfile.open(tar_src, "r") as tar:
os.remove(tar_src)
tar.extractall()
size=0
size = 0
os.rmdir(tmp_dir)
os.chdir(cwd)
return size

previous_cache_id = None
for i in range(0, len(self.image.chain_ids)):
chain_id = self.image.chain_ids[i]
layerdb_subdir = os.path.join(layerdb_path, chain_id.replace("sha256:", ""))
layerdb_subdir = os.path.join(
layerdb_path, chain_id.replace("sha256:", ""))
os.makedirs(layerdb_subdir, 0o700)
cache_id = (str(uuid4()) + str(uuid4())).replace("-", "")

layer_id = self.image.layers_ids[i]
current_layer_path = os.path.join(layers_dir, cache_id)
os.makedirs(current_layer_path, 0o700)


# Merge layers
# The last layer contains changes from all the previous ones
if previous_cache_id:
previous_layer_path = os.path.join(layers_dir, previous_cache_id)
previous_layer_path = os.path.join(
layers_dir, previous_cache_id)
copy_layer(previous_layer_path, current_layer_path)
if (self.optimize):
rmtree(previous_layer_path)
previous_cache_id = cache_id
size = extract_layer(current_layer_path, self.image.archive_path, layerdb_subdir)
size = extract_layer(current_layer_path,
self.image.archive_path, layerdb_subdir)
save_layer_metadata(
path=layerdb_subdir,
diff=self.image.diff_ids[i],
Expand All @@ -228,6 +246,11 @@ def extract_layer(path: str, archive_path: str, layerdb_subdir: str) -> int:
if i > 0
else None
)
if self.optimize:
layer_to_keep = os.path.join(
layers_dir, previous_cache_id
)
remove_unused_layers(layers_dir, layer_to_keep)


def create_storage_driver(
Expand All @@ -237,4 +260,4 @@ def create_storage_driver(
) -> IStorageDriver:
if settings.storage_driver.kind == StorageDriverEnum.VFS:
return Vfs(image, output_dir, settings)
raise NotImplementedError("Only vfs supported now")
raise NotImplementedError("Only vfs supported now")
2 changes: 2 additions & 0 deletions src/aleph_client/commands/container/test_data/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
FROM alpine:latest

WORKDIR /app
RUN touch file.txt
RUN mkdir folder.d

CMD ["/bin/sh"]
Empty file modified src/aleph_client/commands/container/test_data/custom-dockerd
100755 → 100644
Empty file.
Loading

0 comments on commit 9c547ab

Please sign in to comment.