gnunet-svn
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[taler-grid5k] 01/01: initial import


From: gnunet
Subject: [taler-grid5k] 01/01: initial import
Date: Sun, 29 Aug 2021 14:11:32 +0200

This is an automated email from the git hooks/post-receive script.

grothoff pushed a commit to branch master
in repository grid5k.

commit 4234a2882f0812be37721b6b7a58156260d52379
Author: Christian Grothoff <christian@grothoff.org>
AuthorDate: Sun Aug 29 14:11:21 2021 +0200

    initial import
---
 debian10-taler.yaml                                |   66 +
 default/from_scratch/x86_64/base.yaml              |  138 ++
 default/from_scratch/x86_64/debian-base.yaml       |   67 +
 default/from_scratch/x86_64/debian-testing.yaml    |   29 +
 default/steps/aliases/defaults.yaml                |  169 ++
 .../bootstrap/debian/prepare_autoinstall.yaml      |   11 +
 default/steps/bootstrap/download_installer.yaml    |   31 +
 default/steps/bootstrap/prepare_appliance.yaml     |   33 +
 default/steps/bootstrap/prepare_disk.yaml          |   10 +
 .../bootstrap/prepare_ssh_to_out_context.yaml      |   23 +
 default/steps/bootstrap/start_http_server.yaml     |   19 +
 default/steps/bootstrap/start_qemu.yaml            |  227 +++
 default/steps/checkpoints/simple.yaml              |   21 +
 default/steps/data/helpers/export_appliance.py     |  242 +++
 .../steps/data/helpers/netinstall_iso_finder.py    |  163 ++
 default/steps/data/helpers/simple_http_server.py   |  129 ++
 .../steps/data/preseed/debian-testing-preseed.cfg  |  322 +++
 default/steps/data/qemu-sendkeys.rb                |  121 ++
 .../steps/data/qemu-sendkeys/netinst-iso-debian    |    1 +
 default/steps/disable_checkpoint.yaml              |    3 +
 default/steps/enable_checkpoint.yaml               |    5 +
 default/steps/env/bashrc                           |   23 +
 default/steps/env/functions.sh                     |  201 ++
 default/steps/export/save_appliance_VM.yaml        |   23 +
 default/steps/setup/debian/clean_system.yaml       |   34 +
 default/steps/setup/debian/minimal_install.yaml    |    6 +
 grid5000/debian11-x64-common.yaml                  |   56 +
 grid5000/debian11-x64-min.yaml                     |   27 +
 grid5000/from_scratch/aarch64/base.yaml            |   25 +
 grid5000/from_scratch/aarch64/debian-base.yaml     |   59 +
 grid5000/from_scratch/aarch64/debian-bullseye.yaml |   24 +
 grid5000/from_scratch/base.yaml                    |  138 ++
 grid5000/from_scratch/debian-base.yaml             |   67 +
 grid5000/from_scratch/debian-bullseye.yaml         |   24 +
 grid5000/steps/aliases/defaults.yaml               |  169 ++
 .../bootstrap/debian/prepare_autoinstall.yaml      |   11 +
 grid5000/steps/bootstrap/download_installer.yaml   |   31 +
 grid5000/steps/bootstrap/prepare_appliance.yaml    |   33 +
 grid5000/steps/bootstrap/prepare_disk.yaml         |   10 +
 .../bootstrap/prepare_ssh_to_out_context.yaml      |   23 +
 grid5000/steps/bootstrap/start_http_server.yaml    |   19 +
 grid5000/steps/bootstrap/start_qemu.yaml           |  227 +++
 grid5000/steps/checkpoints/simple.yaml             |   21 +
 grid5000/steps/data/helpers/export_appliance.py    |  247 +++
 .../steps/data/helpers/netinstall_iso_finder.py    |  163 ++
 grid5000/steps/data/helpers/simple_http_server.py  |  129 ++
 .../steps/data/preseed/debian-bullseye-preseed.cfg |  461 +++++
 .../steps/data/preseed/debian-buster-preseed.cfg   |  443 +++++
 grid5000/steps/data/qemu-sendkeys.rb               |  121 ++
 .../steps/data/qemu-sendkeys/netinst-iso-debian    |    1 +
 grid5000/steps/data/setup/hiera/hiera.yaml         |   11 +
 .../steps/data/setup/hiera/hieradata/defaults.yaml |   12 +
 grid5000/steps/data/setup/puppet/manifests/base.pp |    6 +
 grid5000/steps/data/setup/puppet/manifests/big.pp  |    6 +
 grid5000/steps/data/setup/puppet/manifests/min.pp  |    6 +
 grid5000/steps/data/setup/puppet/manifests/nfs.pp  |    6 +
 grid5000/steps/data/setup/puppet/manifests/std.pp  |    6 +
 grid5000/steps/data/setup/puppet/manifests/xen.pp  |    6 +
 .../modules/env/files/base/cpufreq/cpufrequtils    |    1 +
 .../modules/env/files/base/ganglia/gmond.conf      |  336 ++++
 .../modules/env/files/base/infiniband/90-ib.rules  |    6 +
 .../modules/env/files/base/infiniband/openib.conf  |   39 +
 .../modules/env/files/base/infiniband/openibd      | 1610 +++++++++++++++
 .../env/files/base/infiniband/openibd.service      |   22 +
 .../puppet/modules/env/files/base/kexec/kexec      |   13 +
 .../puppet/modules/env/files/base/mx/ip_over_mx    |   13 +
 .../modules/env/files/base/ndctl/ndctl.preset      |    1 +
 .../modules/env/files/base/sshfs/40-fuse.rules     |    1 +
 .../env/files/base/tuning/limits-grid5000.conf     |    5 +
 .../env/files/base/tuning/sysctl-00-grid5000.conf  |    6 +
 .../env/files/base/userns/sysctl-00-userns.conf    |    2 +
 .../modules/env/files/big/amd_gpu/70-amdgpu.rules  |    4 +
 .../modules/env/files/big/kvm/60-qemu-system.rules |    1 +
 .../puppet/modules/env/files/big/kvm/create_tap    |   13 +
 .../puppet/modules/env/files/big/kvm/random_mac    |   38 +
 .../setup/puppet/modules/env/files/big/kvm/sudoers |    2 +
 .../puppet/modules/env/files/big/mic/85-mic.rules  |    2 +
 .../setup/puppet/modules/env/files/big/mic/fstab   |    2 +
 .../puppet/modules/env/files/big/mic/mic0.filelist |   41 +
 .../setup/puppet/modules/env/files/big/mic/mpss    |  191 ++
 .../puppet/modules/env/files/big/nvidia/cuda.conf  |    1 +
 .../env/files/big/nvidia/dcgm-exporter.service     |   16 +
 .../env/files/big/nvidia/ganglia-monitor.service   |   25 +
 .../env/files/big/nvidia/modpython-nvidia.conf     |   13 +
 .../files/big/nvidia/nvidia-persistenced.service   |   18 +
 .../env/files/big/nvidia/nvidia-smi.service        |   12 +
 .../env/files/min/apt/grid5000-archive-key.asc     |   52 +
 .../env/files/min/cpu_microcode/amd64-microcode    |   12 +
 .../env/files/min/cpu_microcode/intel-microcode    |   27 +
 .../modules/env/files/min/image_versioning/git_tag |    2 +
 .../env/files/min/image_versioning/postinst        |    3 +
 .../puppet/modules/env/files/min/locales/locale    |    3 +
 .../modules/env/files/min/locales/locale.gen       |    1 +
 .../env/files/min/network/g5k-update-host-name     |   16 +
 .../puppet/modules/env/files/min/network/hosts     |   10 +
 .../env/files/nfs/ldap/ca2019.grid5000.fr.cert     |   26 +
 .../puppet/modules/env/files/nfs/ldap/common-auth  |   13 +
 .../modules/env/files/nfs/ldap/common-password     |   34 +
 .../puppet/modules/env/files/nfs/ldap/ldap.conf    |   20 +
 .../modules/env/files/nfs/ldap/libnss-ldap.conf    |  324 +++
 .../puppet/modules/env/files/nfs/ldap/nscd.conf    |   81 +
 .../puppet/modules/env/files/nfs/ldap/nslcd.conf   |   36 +
 .../modules/env/files/nfs/ldap/nsswitch.conf       |   19 +
 .../env/files/nfs/openiscsi/55-openiscsi.rules     |    1 +
 .../modules/env/files/nfs/openiscsi/iscsidev.sh    |   17 +
 .../files/std/g5k-manager/g5k-disk-manager-backend |  284 +++
 .../g5k-manager/g5k-disk-manager-backend.service   |   11 +
 .../env/files/std/g5k-manager/g5k-pmem-manager     |  115 ++
 .../files/std/g5k-manager/g5k-pmem-manager.service |   12 +
 .../env/files/std/g5k-manager/lib/g5k-manager.rb   |   79 +
 .../env/files/std/g5k_generator/g5k_generator      |   38 +
 .../env/files/std/g5kchecks/g5k-checks.conf        |   39 +
 .../puppet/modules/env/files/std/lvm/lvm.conf      | 2096 ++++++++++++++++++++
 .../modules/env/files/std/net_access/iptables      |   31 +
 .../env/files/std/net_access/iptables.stretch      |   27 +
 .../modules/env/files/std/net_access/rsyslog.conf  |  113 ++
 .../env/files/std/net_access/syslog_iptables.conf  |    7 +
 .../files/std/nvidia_configure/nvidia-reset-mig    |   16 +
 .../std/nvidia_configure/nvidia-reset-mig.service  |   10 +
 .../modules/env/files/std/oar/batch_job_bashrc     |    6 +
 .../modules/env/files/std/oar/default_oar-node     |   53 +
 .../env/files/std/oar/default_oar-node_site        |   49 +
 .../env/files/std/oar/etc/security/access.conf     |   66 +
 .../modules/env/files/std/oar/oar_sshclient_config |    5 +
 .../env/files/std/oar/var/lib/oar/access.conf      |   66 +
 .../modules/env/files/std/sudo-g5k/id_rsa_sudo-g5k |   27 +
 .../env/files/std/sudo-g5k/id_rsa_sudo-g5k.pub     |    1 +
 .../data/setup/puppet/modules/env/files/version    |    2 +
 .../setup/puppet/modules/env/files/xen/xen/id_rsa  |   27 +
 .../puppet/modules/env/files/xen/xen/id_rsa.pub    |    1 +
 .../puppet/modules/env/files/xen/xen/random_mac    |   38 +
 .../setup/puppet/modules/env/files/xen/xen/xen-g5k |   28 +
 .../modules/env/files/xen/xen/xen-g5k.service      |    8 +
 .../modules/env/files/xen/xen/xend-config.sxp      |    5 +
 .../env/lib/facter/installed_kernelreleases.rb     |   12 +
 .../setup/puppet/modules/env/manifests/base.pp     |   53 +
 .../modules/env/manifests/base/add_ca_grid5000.pp  |   14 +
 .../env/manifests/base/configure_dhclient.pp       |   16 +
 .../manifests/base/configure_ip_over_infiniband.pp |   69 +
 .../modules/env/manifests/base/configure_kexec.pp  |   16 +
 .../env/manifests/base/configure_omnipath.pp       |   74 +
 .../env/manifests/base/disable_ndctl_monitor.pp    |   16 +
 .../env/manifests/base/disable_nvme_multipath.pp   |   21 +
 .../modules/env/manifests/base/do_not_clean_tmp.pp |   12 +
 .../enable_cpufreq_with_performance_governor.pp    |   16 +
 .../modules/env/manifests/base/enable_userns.pp    |   11 +
 .../env/manifests/base/increase_ssh_maxstartups.pp |   15 +
 .../manifests/base/install_and_disable_ganglia.pp  |   40 +
 .../env/manifests/base/tcp_tuning_for_10gbe.pp     |   17 +
 .../base/unlimited_memlock_for_infiniband.pp       |   17 +
 .../data/setup/puppet/modules/env/manifests/big.pp |   46 +
 .../modules/env/manifests/big/configure_amd_gpu.pp |   56 +
 .../env/manifests/big/configure_initramfs.pp       |   15 +
 .../modules/env/manifests/big/configure_kvm.pp     |   83 +
 .../env/manifests/big/configure_nvidia_gpu.pp      |   18 +
 .../env/manifests/big/configure_nvidia_gpu/cuda.pp |  126 ++
 .../manifests/big/configure_nvidia_gpu/drivers.pp  |  120 ++
 .../manifests/big/configure_nvidia_gpu/ganglia.pp  |   47 +
 .../manifests/big/configure_nvidia_gpu/modules.pp  |   25 +
 .../big/configure_nvidia_gpu/prometheus.pp         |   41 +
 .../manifests/big/configure_nvidia_gpu/services.pp |   15 +
 .../modules/env/manifests/big/configure_postfix.pp |   31 +
 .../modules/env/manifests/big/configure_sshfs.pp   |   13 +
 .../modules/env/manifests/big/install_beegfs.pp    |  112 ++
 .../env/manifests/big/install_g5k_jupyterlab.pp    |   19 +
 .../modules/env/manifests/big/install_openmpi.pp   |   58 +
 .../manifests/big/install_prometheus_exporters.pp  |    8 +
 .../modules/env/manifests/big/install_smartd.pp    |   26 +
 .../env/manifests/big/install_snmp_tools.pp        |   15 +
 .../manifests/big/prepare_kernel_module_build.pp   |   18 +
 .../modules/env/manifests/common/apt_pinning.pp    |   25 +
 .../modules/env/manifests/common/g5kpackages.pp    |   33 +
 .../env/manifests/common/software_versions.pp      |   55 +
 .../puppet/modules/env/manifests/commonpackages.pp |   21 +
 .../setup/puppet/modules/env/manifests/init.pp     |   48 +
 .../data/setup/puppet/modules/env/manifests/min.pp |   53 +
 .../env/manifests/min/add_image_version_in_etc.pp  |   18 +
 .../configure_kernel_and_blacklist_some_modules.pp |   18 +
 .../min/configure_network_and_install_drivers.pp   |   32 +
 .../modules/env/manifests/min/generate_etc_motd.pp |   26 +
 .../manifests/min/install_and_configure_locales.pp |   26 +
 .../env/manifests/min/install_and_configure_ssh.pp |   58 +
 .../env/manifests/min/install_cpu_microcode.pp     |   31 +
 .../env/manifests/min/install_metapackage.pp       |   45 +
 .../modules/env/manifests/min/install_tgz_g5k.pp   |   17 +
 .../modules/env/manifests/min/kernel/initramfs.pp  |    8 +
 .../modules/env/manifests/min/kernel/modules.pp    |   52 +
 .../modules/env/manifests/min/kernel/remove_old.pp |   17 +
 .../env/manifests/min/kernel/setup_links.pp        |   48 +
 .../modules/env/manifests/min/set_root_password.pp |   10 +
 .../manifests/min/set_timezone_to_europe_paris.pp  |    9 +
 .../data/setup/puppet/modules/env/manifests/nfs.pp |   31 +
 .../modules/env/manifests/nfs/configure_iscsi.pp   |   29 +
 .../modules/env/manifests/nfs/configure_ldap.pp    |   89 +
 .../env/manifests/nfs/configure_module_path.pp     |   23 +
 .../modules/env/manifests/nfs/configure_ntp.pp     |   40 +
 .../env/manifests/nfs/install_nfs_requirements.pp  |   18 +
 .../manifests/nfs/install_osirim_requirements.pp   |   42 +
 .../nfs/install_storage5k_requirements.pp          |    8 +
 .../data/setup/puppet/modules/env/manifests/std.pp |   58 +
 .../env/manifests/std/add_g5kcode_to_path.pp       |   16 +
 .../std/configure_g5kdiskmanagerbackend.pp         |   33 +
 .../env/manifests/std/configure_g5kmanager.pp      |   33 +
 .../env/manifests/std/configure_g5kpmemmanager.pp  |   32 +
 .../env/manifests/std/configure_oar_client.pp      |  247 +++
 .../env/manifests/std/configure_rsyslog_remote.pp  |   39 +
 .../setup/puppet/modules/env/manifests/std/dell.pp |  120 ++
 .../modules/env/manifests/std/dell/params.pp       |   16 +
 .../env/manifests/std/disable_lvm_pvscan.pp        |   19 +
 .../modules/env/manifests/std/g5k_generator.pp     |   26 +
 .../modules/env/manifests/std/install_g5kchecks.pp |   35 +
 .../env/manifests/std/install_g5ksubnets.pp        |    9 +
 .../env/manifests/std/install_hwraid_apt_source.pp |   38 +
 .../manifests/std/install_libguestfs_backport.pp   |   27 +
 .../modules/env/manifests/std/install_megacli.pp   |   11 +
 .../modules/env/manifests/std/install_sudog5k.pp   |   29 +
 .../puppet/modules/env/manifests/std/ipmitool.pp   |   18 +
 .../modules/env/manifests/std/nvidia_reset_mig.pp  |   21 +
 .../data/setup/puppet/modules/env/manifests/xen.pp |   13 +
 .../modules/env/manifests/xen/configure_xen.pp     |  236 +++
 .../modules/env/manifests/xen/install_grub.pp      |   14 +
 .../env/templates/base/omnipath/scibian.key.erb    |   51 +
 .../modules/env/templates/common/apt_pinning.erb   |    6 +
 .../puppet/modules/env/templates/min/motd.erb      |    8 +
 .../env/templates/nfs/ldap/common-account.erb      |   18 +
 .../modules/env/templates/nfs/ntp/ntp.conf.erb     |   63 +
 .../env/templates/std/dell/linux.dell.com.key.erb  |   68 +
 .../std/hwraid/hwraid.le-vert.net.key.erb          |   30 +
 grid5000/steps/disable_checkpoint.yaml             |    3 +
 grid5000/steps/enable_checkpoint.yaml              |    5 +
 grid5000/steps/env/bashrc                          |   23 +
 grid5000/steps/env/functions.sh                    |  203 ++
 .../steps/export/debian/clean_dhcp_leases.yaml     |    2 +
 grid5000/steps/export/do_qcow2_finish_works.yaml   |   44 +
 grid5000/steps/export/export_g5k.yaml              |   84 +
 grid5000/steps/export/export_vagrant_box.yaml      |   42 +
 grid5000/steps/export/save_appliance_VM.yaml       |   23 +
 grid5000/steps/setup/create_user.yaml              |   11 +
 grid5000/steps/setup/debian/clean_system.yaml      |   34 +
 .../setup/debian/clean_unnecessary_packages.yaml   |    9 +
 .../steps/setup/debian/configure_apt_sources.yaml  |   53 +
 grid5000/steps/setup/debian/configure_system.yaml  |   28 +
 grid5000/steps/setup/debian/install_packages.yaml  |    7 +
 grid5000/steps/setup/debian/minimal_install.yaml   |    6 +
 grid5000/steps/setup/debian/run_orchestrator.yaml  |   43 +
 .../steps/setup/debian/setup_orchestrator.yaml     |   24 +
 grid5000/steps/setup/debian/setup_vagrant_box.yaml |   77 +
 notes.txt                                          |    3 +
 steps/setup/#taler_install.yaml#                   |    7 +
 steps/setup/.#taler_install.yaml                   |    1 +
 steps/setup/taler_install.yaml                     |    7 +
 steps/setup/taler_install.yaml~                    |    2 +
 252 files changed, 15412 insertions(+)

diff --git a/debian10-taler.yaml b/debian10-taler.yaml
new file mode 100644
index 0000000..615cea5
--- /dev/null
+++ b/debian10-taler.yaml
@@ -0,0 +1,66 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Customization of a Debian 10 image with GNU Taler repositories.
+#
+#==============================================================================
+# This recipe extends another. To look at the step involed, run:
+#   kameleon dryrun debian10_custom.yaml
+# To see the variables that you can override, use the following command:
+#   kameleon info debian10_custom.yaml
+---
+extend: grid5000/debian11-x64-min.yaml
+
+global:
+  ### Uncomment and adapt the global variables below as needed
+
+  ## Export format to generate
+  # appliance_formats: qcow2 tar.zst
+
+  ## Environment description customization
+  ## Author
+  g5k_author: "grothoff@gnu.org"
+  ## Version
+  g5k_version: 2
+  ## Environment image path and compression
+  g5k_tgz_path: /home/grothoff/my_g5k_images/debian10-taler.tgz
+  # g5k_tar_compression: "zstd"
+  ## Environment postinstall path, compression, and script command
+  # g5k_postinst_path: server:///grid5000/postinstalls/g5k-postinstall.tgz
+  # g5k_postinst_compression: "gzip"
+  # g5k_postinst_script: g5k-postinstall --net debian
+  ## Environment kernel path and params
+  # g5k_kernel_path: "/vmlinuz"
+  # g5k_initrd_path: "/initrd.img"
+  # g5k_kernel_params: ""
+  ## Environment visibility
+  # g5k_visibility: "shared"
+  other_packages_no_clean: nginx postgresql-13 taler-exchange taler-auditor 
taler-merchant taler-exchange-offline taler-wallet-cli sudo
+
+  ## Other parameters can be changed, see kameleon info debian10-taler.yaml
+
+bootstrap:
+  ### The bootstrap section takes in charge the initial installation of the
+  ## system (distribution installation). No modification should be needed here.
+  - "@base"
+
+setup:
+  ### The setup section is where customizations of the system take place.
+  ## We can request steps from the extended recipe to be executed
+  - "@base"
+  - taler_install
+  ## We add steps required by our customization after or before @base. Use
+  ## kameleon dryrun debian10_custom.yaml to see the resulting steps in the 
build.
+  ## The following is given as example only, replace with your steps.
+  - a_customization_step:
+    - microstep1:
+      - exec_in: echo "Hello world!"
+    - microstep1:
+      # This breakpoint will stop the build for inspecting the environment
+      - breakpoint
+
+export:
+  ### The export section takes in charge the export of your customized 
Grid'5000
+  ## environment. No modification should be needed here.
+  - "@base"
diff --git a/default/from_scratch/x86_64/base.yaml 
b/default/from_scratch/x86_64/base.yaml
new file mode 100644
index 0000000..777fdc4
--- /dev/null
+++ b/default/from_scratch/x86_64/base.yaml
@@ -0,0 +1,138 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Base recipe template
+#
+#==============================================================================
+---
+# Load qemu checkpoint
+checkpoint: simple.yaml
+# Loads some helpful aliases (this files are located in steps/aliases/ 
directory)
+aliases: defaults.yaml
+
+# Custom shell environement (this files are located in steps/env/ directory)
+env:
+  - bashrc
+  - functions.sh
+
+# Global variables use by Kameleon engine and the steps
+global:
+  # Architecture for the target system
+  arch: x86_64
+  distrib: unknown
+  release: unknown
+  # Default hostname
+  hostname: kameleon-$${distrib}
+  # Default root password
+  root_password: kameleon
+
+  ## System variables. Required by kameleon engine
+  # Include specific steps
+  include_steps:
+    - $${distrib}/$${release}
+    - $${distrib}
+
+  # If qemu_iso_path is set, boot from an iso, retrieved from the following 
URL:
+  installer_iso_arch: x86_64
+  installer_iso_url:
+  # or give an helper script to find out the iso URL:
+  installer_iso_finder_helper:
+  installer_iso_finder_args:
+
+  # Otherwise, if qemu_kernel_path is set, boot from an kernel, initrd and
+  # cmdline fetched from the URL defined below, and used directly in qemu:
+  installer_kernel_url:
+  installer_initrd_url:
+  installer_cmdline:
+
+  ## GPG keyserver (Waring: not all servers are reliable)
+  gpg_keyserver: keyserver.ubuntu.com
+
+  ## QEMU options
+  qemu_enable_kvm: true
+  qemu_uefi: false
+  qemu_cpus: 2
+  qemu_memory_size: 768
+  qemu_monitor_socket: $${kameleon_cwd}/qemu_monitor.socket
+  qemu_arch: $${arch}
+  qemu_image_size: 10G
+  qemu_pidfile: $${kameleon_cwd}/qemu.pid
+  qemu_kernel_path: $${kameleon_cwd}/qemu_kernel
+  qemu_initrd_path: $${kameleon_cwd}/qemu_initrd
+  qemu_append_cmdline: $${installer_cmdline}
+  qemu_iso_path: $${kameleon_cwd}/qemu.iso
+
+  # rootfs options
+  disk_device: /dev/vda
+  rootfs: /rootfs
+  filesystem_type: ext4
+
+  # appliance options
+  image_disk: $${kameleon_cwd}/base_$${kameleon_recipe_name}
+  image_format: qcow2
+
+  # Allowed formats are: tar.gz, tar.bz2, tar.xz, tar.lzo, qcow, qcow2, qed, 
vdi, raw, vmdk
+  appliance_formats: tar.xz
+  appliance_filename: "$${kameleon_cwd}/$${kameleon_recipe_name}"
+  appliance_tar_excludes: >-
+    ./etc/fstab ./root/.bash_history ./root/kameleon_workdir ./root/.ssh
+    ./var/tmp/* ./tmp/* ./dev/* ./proc/* ./run/*
+    ./sys/* ./root/.rpmdb ./boot/extlinux ./boot/grub ./boot/grub2
+  zerofree: false
+
+  # GRUB
+  grub_cmdline_linux: console=tty0 console=ttyS0,115200
+
+  http_directory: $${kameleon_cwd}/http_dir
+  http_pid:  $${kameleon_cwd}/http.pid
+
+  ssh_config_file: $${kameleon_cwd}/ssh_config
+  local_ip: 10.0.2.2
+
+  out_context:
+    cmd: ssh -F $${ssh_config_file} $${kameleon_recipe_name} -t /bin/bash
+    workdir: /root/kameleon_workdir
+    proxy_cache: $${local_ip}
+
+  in_context:
+    cmd: ssh -F $${ssh_config_file} $${kameleon_recipe_name} -t /bin/bash
+    workdir: /root/kameleon_workdir
+    proxy_cache: $${local_ip}
+
+# Bootstrap the new system and create the 'in_context'
+bootstrap:
+  - enable_checkpoint
+  - download_installer
+  - prepare_disk
+  - prepare_autoinstall
+  - start_http_server
+  - start_qemu:
+    - force_vm_shutdown: false
+    - shutdown_vm_immediately: true
+    - vm_cleanup_section: bootstrap
+    - vm_expected_service:
+    - boot_timeout: 5
+  - prepare_ssh_to_out_context
+  - prepare_appliance
+  - start_qemu:
+    - force_vm_shutdown: true
+    - shutdown_vm_immediately: false
+    - vm_cleanup_section: setup
+    - vm_expected_server: ssh
+    - boot_timeout: 100
+    - qemu_iso_boot: false
+    - qemu_iso_path: ""
+    - qemu_kernel_path: ""
+    - qemu_sendkeys_commands: ""
+
+# Install and configuration steps
+setup:
+  - minimal_install
+  - clean_system
+
+# Export the generated appliance in the format of your choice
+export:
+  - disable_checkpoint
+  - save_appliance_VM:
+    - appliance_tar_compression_level: "9"
diff --git a/default/from_scratch/x86_64/debian-base.yaml 
b/default/from_scratch/x86_64/debian-base.yaml
new file mode 100644
index 0000000..447e57d
--- /dev/null
+++ b/default/from_scratch/x86_64/debian-base.yaml
@@ -0,0 +1,67 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Debian generic recipe using the netinstall mechanim
+#
+# USAGE:
+#   Select directly in this recipe: see usage example commented in the global 
of
+#   this recipe
+#
+#   or, override the globals directly in CLI. For example:
+#
+#   kameleon build --global distrib:debian,release:wheezy
+#
+#   or extends this recipe with your own and override those variable in it.
+#
+#==============================================================================
+---
+extend: base.yaml
+
+global:
+  # Boilerplate values, so that `kameleon info' works with the recipe.
+  # For a specific version of Debian, please see the dedicated recipe, as this
+  # recipe is mainly meant as being extended.
+  distrib: debian
+  deb_arch: amd64
+  release: jessie
+  release_number: 8
+
+  # URL to retrieve packages from (sources.list)
+  deb_mirror_hostname: deb.debian.org
+  deb_mirror_directory: /debian
+  deb_mirror_uri: http://$${deb_mirror_hostname}$${deb_mirror_directory}
+  deb_security_hostname: security.debian.org
+  deb_security_directory: /debian
+  deb_components: main contrib non-free
+
+  # Install from the installer's iso
+  # The location of the Debian netinstall iso can be set manually or guessed
+  # using a url finder helper script
+  #installer_iso_filename: debian-$${release_number}-$${deb_arch}-netinst.iso
+  #installer_iso_location: archive
+  #installer_iso_release_version: 8.0.0
+  #installer_iso_url: 
http://cdimage.debian.org/cdimage/$${installer_iso_location}/$${installer_iso_release_version}/$${deb_arch}/iso-cd/$${installer_iso_filename}
+  installer_iso_url:
+  installer_iso_finder_helper: 
$${kameleon_data_dir}/helpers/netinstall_iso_finder.py
+  installer_iso_finder_args: $${distrib} $${release_number} $${deb_arch}
+  qemu_iso_path: $${kameleon_cwd}/$${distrib}.iso
+  # Or install from the netboot kernel and initrd directly
+  #installer_kernel_url: 
http://deb.debian.org/debian/dists/$${release}/main/installer-$${deb_arch}/current/images/netboot/debian-installer/$${deb_arch}/linux
+  #installer_initrd_url: 
http://deb.debian.org/debian/dists/$${release}/main/installer-$${deb_arch}/current/images/netboot/debian-installer/$${deb_arch}/initrd.gz
+  #installer_cmdline: "auto url=http://%LOCAL_IP%:%HTTP_PORT%/preseed.cfg";
+
+  base_preseed_path: 
$${kameleon_data_dir}/preseed/$${distrib}-$${release}-preseed.cfg
+  preseed_path: $${kameleon_cwd}/preseed.cfg
+
+  qemu_sendkeys_commands: 
$${kameleon_data_dir}/qemu-sendkeys/netinst-iso-$${distrib}
+
+
+bootstrap:
+  - "@base"
+
+setup:
+  - "@base"
+
+export:
+  - "@base"
diff --git a/default/from_scratch/x86_64/debian-testing.yaml 
b/default/from_scratch/x86_64/debian-testing.yaml
new file mode 100644
index 0000000..5c65ed8
--- /dev/null
+++ b/default/from_scratch/x86_64/debian-testing.yaml
@@ -0,0 +1,29 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Debian testing recipe using the netinstall mechanism
+#
+#==============================================================================
+---
+extend: debian-base.yaml
+# Global variables use by Kameleon engine and the steps
+global:
+  # Distribution
+  distrib: debian
+  release: testing
+  release_number: X
+  # This URL may be invalid when the testing distribution is in its early 
stage,
+  # i.e after a recent release of a new Debian stable.
+  # In this case, it is expected for this recipe to NOT work.
+  # The debian-debootstrap-testing recipe may be prefered.
+  installer_iso_url: 
https://cdimage.debian.org/cdimage/weekly-builds/amd64/iso-cd/debian-testing-amd64-netinst.iso
+
+bootstrap:
+  - "@base"
+
+setup:
+  - "@base"
+
+export:
+  - "@base"
diff --git a/default/steps/aliases/defaults.yaml 
b/default/steps/aliases/defaults.yaml
new file mode 100644
index 0000000..6cf723b
--- /dev/null
+++ b/default/steps/aliases/defaults.yaml
@@ -0,0 +1,169 @@
+write_local:
+  - exec_local: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+write_in:
+  - exec_in: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+write_out:
+  - exec_out: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_local:
+  - exec_local: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_in:
+  - exec_in: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_out:
+  - exec_out: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+write_raw_local:
+  - exec_local: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+write_raw_in:
+  - exec_in: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+write_raw_out:
+  - exec_out: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_raw_local:
+  - exec_local: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_raw_in:
+  - exec_in: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_raw_out:
+  - exec_out: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+local2out:
+  - exec_out: |
+      mkdir -p $(dirname @2)
+  - pipe:
+      - exec_local: cat @1
+      - exec_out: cat > @2
+
+local2in:
+  - exec_in: mkdir -p $(dirname @2)
+  - pipe:
+      - exec_local: cat @1
+      - exec_in: cat > @2
+
+out2local:
+  - exec_local: mkdir -p $(dirname @2)
+  - pipe:
+      - exec_out: cat @1
+      - exec_local: cat > @2
+
+out2in:
+  - exec_in: mkdir -p $(dirname @2)
+  - pipe:
+      - exec_out: cat @1
+      - exec_in: cat > @2
+
+in2local:
+  - exec_local: mkdir -p $(dirname @2)
+  - pipe:
+      - exec_in: cat @1
+      - exec_local: cat > @2
+
+in2out:
+  - exec_out: mkdir -p $(dirname @2)
+  - pipe:
+      - exec_in: cat @1
+      - exec_out: cat > @2
+
+check_cmd_out:
+  - rescue:
+    - exec_out: command -V @1 2> /dev/null
+    - breakpoint: "@1 is missing from out_context"
+
+check_cmd_local:
+  - on_bootstrap_init:
+    - rescue:
+      - exec_local: command -V @1 2> /dev/null
+      - breakpoint: "@1 is missing from local_context"
+
+check_cmd_in:
+  - rescue:
+    - exec_in: command -V @1 2> /dev/null
+    - breakpoint: "@1 is missing from in_context"
+
+umount_out:
+  - exec_out: |
+      echo "try umount @1..." ; mountpoint -q "@1" && umount -f -l "@1" || true
+
+umount_local:
+  - exec_local: |
+      echo "try umount @1..." ; mountpoint -q "@1" && umount -f -l "@1" || true
+
+umount_in:
+  - exec_in: |
+      echo "try umount @1..." ; mountpoint -q "@1" && umount -f -l "@1" || true
+
+download_file_in:
+  - exec_in: __download "@1" "@2"
+
+download_file_out:
+  - exec_out: __download "@1" "@2"
+
+download_file_local:
+  - exec_local: __download "@1" "@2"
+
+download_recipe_build_local:
+  - exec_local: __download_recipe_build "@1" "@2" "@3" "@4" "@5" "@6" "@7"
+
+download_kadeploy_environment_image_local:
+  - exec_local: __download_kadeploy_environment_image "@1" "@2" "@3" "@4" "@5"
+
+apt-get_in:
+  - exec_in: DEBIAN_FRONTEND=noninteractive apt-get -y --force-yes @1 2>&1
+
+apt-get_out:
+  - exec_out: DEBIAN_FRONTEND=noninteractive apt-get -y --force-yes @1 2>&1
diff --git a/default/steps/bootstrap/debian/prepare_autoinstall.yaml 
b/default/steps/bootstrap/debian/prepare_autoinstall.yaml
new file mode 100644
index 0000000..f737d20
--- /dev/null
+++ b/default/steps/bootstrap/debian/prepare_autoinstall.yaml
@@ -0,0 +1,11 @@
+- copy_autoinstall_script_to_http_directory:
+  - exec_local: mkdir -p $${http_directory}
+  - exec_local: cp $${base_preseed_path} $${http_directory}/preseed.cfg
+
+- customize_preseed:
+  - exec_local: sed -i -e 's|\(d-i passwd/root-password password 
\).*|\1$${root_password}|g' $${http_directory}/preseed.cfg
+  - exec_local: sed -i -e 's|\(d-i passwd/root-password-again password 
\).*|\1$${root_password}|g' $${http_directory}/preseed.cfg
+  - exec_local: sed -i -e 's|\(d-i mirror/http/hostname string 
\).*|\1$${deb_mirror_hostname}|g' $${http_directory}/preseed.cfg
+  - exec_local: sed -i -e 's|\(d-i mirror/http/directory string 
\).*|\1$${deb_mirror_directory}|g' $${http_directory}/preseed.cfg
+  - exec_local: sed -i -e 's|\(d-i apt-setup/security_host string 
\).*|\1$${deb_security_hostname}|g' $${http_directory}/preseed.cfg
+  - exec_local: sed -i -e 's|\(d-i apt-setup/security_path string 
\).*|\1$${deb_security_directory}|g' $${http_directory}/preseed.cfg
diff --git a/default/steps/bootstrap/download_installer.yaml 
b/default/steps/bootstrap/download_installer.yaml
new file mode 100644
index 0000000..f15f58c
--- /dev/null
+++ b/default/steps/bootstrap/download_installer.yaml
@@ -0,0 +1,31 @@
+- download_installer:
+  - test:
+     - exec_local: test -n "$${installer_iso_url}" -o -n 
"$${installer_iso_finder_helper}"
+     - group:
+       - test:
+          - exec_local: test -z "$${installer_iso_url}"
+          - exec_local: |
+              echo "Looking for the netinstall iso URL for 
$${installer_iso_finder_args}"
+              DOWNLOAD_SRC_URL=$(python2 $${installer_iso_finder_helper} 
$${installer_iso_finder_args})
+       - download_file_local:
+         - $${installer_iso_url}
+         - $${qemu_iso_path}
+       - exec_local: unset DOWNLOAD_SRC_URL
+     - group:
+       - test:
+         - exec_local: test -n "$${installer_kernel_url}"
+         - download_file_local:
+           - $${installer_kernel_url}
+           - $${qemu_kernel_path}
+       - test:
+         - exec_local: test -n "$${installer_initrd_url}"
+         - download_file_local:
+           - $${installer_initrd_url}
+           - $${qemu_initrd_path}
+
+- delete_installer:
+  - on_checkpoint: skip
+  - on_export_clean:
+    - exec_local: rm -f $${qemu_iso_path}
+    - exec_local: rm -f $${qemu_kernel_path}
+    - exec_local: rm -f $${qemu_initrd_path}
diff --git a/default/steps/bootstrap/prepare_appliance.yaml 
b/default/steps/bootstrap/prepare_appliance.yaml
new file mode 100644
index 0000000..4f597c4
--- /dev/null
+++ b/default/steps/bootstrap/prepare_appliance.yaml
@@ -0,0 +1,33 @@
+- insecure_ssh_key: $${kameleon_cwd}/insecure_ssh_key
+
+- generate_ssh_keys:
+  - check_cmd_local: ssh-keygen
+  - exec_local: echo -e  'y\n' | ssh-keygen -q -t rsa -b 4096 -f 
$${insecure_ssh_key} -N ''
+  - exec_local: cat $${insecure_ssh_key}
+
+- inject_ssh_private_key:
+  - check_cmd_local: virt-customize
+  - exec_local: |
+      virt-customize \
+        -a $${image_disk}.$${image_format} \
+        --run-command 'mkdir -p /root/.ssh' \
+        --upload $${insecure_ssh_key}.pub:/root/.ssh/.kameleon_authorized_keys 
\
+        --run-command 'touch /root/.ssh/authorized_keys' \
+        --run-command 'cp /root/.ssh/authorized_keys 
/root/.ssh/authorized_keys.bak' \
+        --run-command 'cat /root/.ssh/.kameleon_authorized_keys >> 
/root/.ssh/authorized_keys' \
+        --run-command 'chmod 700 /root/.ssh' \
+        --run-command 'chmod -R go-rw /root/.ssh' \
+        --run-command 'chown -R root:root /root/.ssh'
+  - on_export_init:
+    - exec_local: |
+        virt-customize \
+          -a $${image_disk}.$${image_format} \
+          --run-command 'mv /root/.ssh/authorized_keys.bak 
/root/.ssh/authorized_keys' \
+          --delete /root/.ssh/.kameleon_authorized_keys
+
+- add_insecure_key_to_ssh_config:
+  - on_checkpoint: redo
+  - exec_local: |
+      cat <<EOF >> $${ssh_config_file}
+      IdentityFile $${insecure_ssh_key}
+      EOF
diff --git a/default/steps/bootstrap/prepare_disk.yaml 
b/default/steps/bootstrap/prepare_disk.yaml
new file mode 100644
index 0000000..9c3dce4
--- /dev/null
+++ b/default/steps/bootstrap/prepare_disk.yaml
@@ -0,0 +1,10 @@
+- create_initial_image:
+  - check_cmd_local: qemu-img
+  - exec_local: |
+      rm -f $${image_disk}.$${image_format}
+      qemu-img create -f qcow2 $${image_disk}.$${image_format} 
$${qemu_image_size}
+
+- delete_initial_image:
+  - on_checkpoint: skip
+  - on_export_clean:
+    - exec_local: rm -f $${image_disk}.$${image_format}
diff --git a/default/steps/bootstrap/prepare_ssh_to_out_context.yaml 
b/default/steps/bootstrap/prepare_ssh_to_out_context.yaml
new file mode 100644
index 0000000..172f7a4
--- /dev/null
+++ b/default/steps/bootstrap/prepare_ssh_to_out_context.yaml
@@ -0,0 +1,23 @@
+- select_empty_port:
+  - on_checkpoint: redo
+  - exec_local: |
+      # Find empty SSH forwarding port
+      SSH_FWD_PORT=$(__find_free_port 50000 60000)
+      echo "SSH forwarding port: $SSH_FWD_PORT"
+- prepare_ssh_config:
+  - on_checkpoint: redo
+  - write_local:
+    - $${ssh_config_file}
+    - |
+      Host $${kameleon_recipe_name}
+      HostName 127.0.0.1
+      Port ${SSH_FWD_PORT}
+      User root
+      UserKnownHostsFile /dev/null
+      StrictHostKeyChecking no
+      PasswordAuthentication no
+      IdentitiesOnly yes
+      LogLevel FATAL
+      ForwardAgent yes
+      Compression yes
+      Protocol 2
diff --git a/default/steps/bootstrap/start_http_server.yaml 
b/default/steps/bootstrap/start_http_server.yaml
new file mode 100644
index 0000000..59184c3
--- /dev/null
+++ b/default/steps/bootstrap/start_http_server.yaml
@@ -0,0 +1,19 @@
+- http_script: $${kameleon_data_dir}/helpers/simple_http_server.py
+
+- run_http_server:
+  - exec_local: |
+      HTTP_PORT=$(__find_free_port 8000 8100)
+      echo "HTTP port: $HTTP_PORT"
+      export HTTP_PORT
+  - exec_local: python2 $${http_script} --root $${http_directory} --bind 
0.0.0.0 --port $HTTP_PORT --daemon --pid $${http_pid}
+  - on_bootstrap_clean:
+    - exec_local: |
+        if [ -f $${http_pid} ]; then
+          HTTP_PID=$(cat $${http_pid})
+          if ps -p $HTTP_PID > /dev/null; then
+              echo "Killing HTTP server (pid: $HTTP_PID)..."
+              kill -9 "$HTTP_PID"
+              rm -f $${http_pid}
+          fi
+          rm -f $${http_pid}
+        fi
diff --git a/default/steps/bootstrap/start_qemu.yaml 
b/default/steps/bootstrap/start_qemu.yaml
new file mode 100644
index 0000000..4d47953
--- /dev/null
+++ b/default/steps/bootstrap/start_qemu.yaml
@@ -0,0 +1,227 @@
+# Require SSH_FWD_PORT bash environment variable to be set
+
+# This must be set if you want to boot an ISO image:
+- qemu_iso_path: ""
+- qemu_iso_boot: true
+# Else that can be set to boot from a kernel, initrd and cmdline:
+- qemu_kernel_path: ""
+- qemu_initrd_path: ""
+- qemu_append_cmdline: ""
+# Else boot from disk.
+
+- vm_expected_service: ssh
+- boot_timeout: 100
+- shutdown_timeout: 100
+- debug: false
+- telnet_port: ""
+- no_reboot: true
+- socat_monitor: socat - UNIX-CONNECT:$${qemu_monitor_socket}
+- qemu_sendkeys_script: $${kameleon_data_dir}/qemu-sendkeys.rb
+- qemu_sendkeys_commands: 
+- vm_expected_service: ssh
+- vm_cleanup_section: setup
+- shutdown_vm_immediately: false
+- force_vm_shutdown: true
+- qemu_enable_kvm: true
+- qemu_cpus: 2
+- qemu_memory_size: 768
+- qemu_monitor_socket: $${kameleon_cwd}/qemu_monitor.socket
+- qemu_arch: $${arch}
+- qemu_image_size: 10G
+- qemu_pidfile: $${kameleon_cwd}/qemu.pid
+- qemu_uefi: false
+- qemu_uefi_code_path: /usr/share/AAVMF/AAVMF_CODE.fd
+- qemu_uefi_vars_path: /usr/share/AAVMF/AAVMF_VARS.fd
+- qemu_netdev_user_options:
+- disk_cache: unsafe
+
+- start_vm:
+  - on_checkpoint: redo
+  - check_cmd_local: qemu-system-$${qemu_arch}
+  - check_cmd_local: socat
+  - on_bootstrap_clean:
+    - test:
+      - exec_local: test "$${shutdown_vm_immediately}" == "false" -a 
"$${vm_cleanup_section}" == "bootstrap"
+      - group: 
+        - exec_local: &1 |
+            if [ -f $${qemu_pidfile} ]; then
+              _QEMU_PID=$(< $${qemu_pidfile})
+              if ps -p $_QEMU_PID > /dev/null; then
+                if [ "$${force_vm_shutdown}" == "true" ]; then
+                  if [ -S $${qemu_monitor_socket} ]; then
+                    echo "Executing a graceful shutdown of the qemu VM via the 
monitor socket..."
+                    NEXT_WAIT_TIME=0
+                    echo system_powerdown | socat - 
UNIX-CONNECT:$${qemu_monitor_socket} || true
+                    while ps -p $_QEMU_PID > /dev/null && [ $NEXT_WAIT_TIME 
-lt $${shutdown_timeout} ];
+                    do
+                      sleep 1
+                      echo -en "\rWaiting for qemu virtual machine to 
shutdown...($(( $${shutdown_timeout} - 1 - NEXT_WAIT_TIME++ ))s)"
+                    done
+                  fi
+                else
+                  echo "Waiting for the VM to shutdown"
+                  echo "Run 'vncviewer :$VNC_PORT' to see what's happening in 
the VM"
+                  while ps -p $_QEMU_PID > /dev/null;
+                  do
+                    sleep 2
+                  done
+                fi
+              fi
+            fi
+        - exec_local: &2 |
+            if [ -f $${qemu_pidfile} ]; then
+              _QEMU_PID=$(< $${qemu_pidfile})
+              if ps -p $_QEMU_PID > /dev/null; then
+                if [ -S $${qemu_monitor_socket} ]; then
+                  echo "The graceful shutdown of the qemu VM should have 
failed (monitor socket is there)..."
+                fi
+                echo "Killing qemu (pid: $_QEMU_PID)."
+                kill -9 "$_QEMU_PID"
+              fi
+              rm -f $${qemu_pidfile}
+            fi
+            rm -f $${qemu_monitor_socket}
+  - on_setup_clean:
+    - test:
+      - exec_local: test "$${shutdown_vm_immediately}" == "false" -a 
"$${vm_cleanup_section}" == "setup"
+      - group:
+        - exec_local: *1
+        - exec_local: *2
+  - on_export_clean:
+    - test:
+      - exec_local: test "$${shutdown_vm_immediately}" == "false" -a 
"$${vm_cleanup_section}" == "export"
+      - group:
+        - exec_local: *1
+        - exec_local: *2
+  - exec_local: |
+      if [ "$${shutdown_vm_immediately}" == "true" ]; then
+        echo "Qemu VM shutdown: immediately"
+      else
+        echo "Qemu VM shutdown: in $${vm_cleanup_section} section cleaning"
+      fi
+  - exec_local: |
+      if [ -r $${qemu_pidfile} ] && pgrep -F $${qemu_pidfile} > /dev/null; then
+        echo "Qemu pid file found, with process running: killing it !" 1>&2
+        pkill -F $${qemu_pidfile}
+        sleep 0.5
+        if pgrep -F $${qemu_pidfile} > /dev/null; then
+          echo "Failed to kill qemu process." 1>&2
+          exit 1
+        fi
+      fi
+  - exec_local: |
+      echo "Starting qemu..."
+      if [ "$${qemu_enable_kvm}" == "true" ] && (/usr/sbin/kvm-ok > /dev/null 
|| egrep '(vmx|svm)' /proc/cpuinfo > /dev/null) ; then # print warning if 
/usr/sbin/kvm-ok is not installed
+        if [ "$${qemu_arch}" == "aarch64" ]; then
+          ENABLE_KVM="-enable-kvm -accel kvm -machine 
virt,gic-version=host,accel=kvm:tcg -cpu host"
+          #ENABLE_KVM="-global virtio-blk-pci.scsi=off -no-user-config 
-enable-fips -machine virt,gic-version=host,accel=kvm:tcg -cpu host -rtc 
driftfix=slew -object rng-random,filename=/dev/urandom,id=rng0 -device 
virtio-rng-pci,rng=rng0"
+        elif [ "$${qemu_arch}" == "ppc64" ]; then
+          ENABLE_KVM="-enable-kvm -accel kvm -machine pseries,accel=kvm:tcg 
-cpu host"
+        else #X86_64
+          ENABLE_KVM="-enable-kvm -cpu host"
+        fi
+        BOOT_TIMEOUT=$${boot_timeout}
+      else
+        echo "No KVM acceleration used"
+        BOOT_TIMEOUT=$(($${boot_timeout}*2))
+      fi
+      if [ -f "vm_state_to_load.txt" ]
+      then
+          SAVED_STATE="$(< vm_state_to_load.txt)"
+          LOADVM="-loadvm $SAVED_STATE"
+          rm -f vm_state_to_load.txt
+      fi
+      if [ "$${debug}" == "true" ]; then
+        VNC_OPT=""
+      else
+        # Find empty VNC port
+        VNC_PORT=$(( $(__find_free_port 5900 5910) - 5900 ))
+        echo "VNC port: $VNC_PORT"
+        VNC_OPT="-vnc :$VNC_PORT"
+      fi
+      if [ -n "$${telnet_port}" ]; then
+        SERIAL_TELNET="telnet:localhost:$${telnet_port},server"
+      fi
+      # Select disk
+      QEMU_DRIVES="-drive 
file=$${image_disk}.$${image_format},cache=$${disk_cache},media=disk,if=virtio,id=drive0"
+      QEMU_BOOT=
+      QEMU_APPEND_CMDLINE=
+      if [ "$${qemu_uefi}" == "true" ]; then
+        if [ ! -f $${kameleon_cwd}/qemu_uefi_vars.fd ]; then
+          cp $${qemu_uefi_vars_path} $${kameleon_cwd}/qemu_uefi_vars.fd
+        fi
+        QEMU_BOOT="-drive 
if=pflash,format=raw,readonly,file=$${qemu_uefi_code_path} -drive 
if=pflash,format=raw,file=$${kameleon_cwd}/qemu_uefi_vars.fd"
+      fi
+      if [ -n "$${qemu_iso_path}" ]; then
+        QEMU_DRIVES="-drive file=$${qemu_iso_path},readonly,media=cdrom 
$QEMU_DRIVES"
+        if [ "$${qemu_iso_boot}" == "true" ]; then
+          QEMU_BOOT="$QEMU_BOOT -boot order=d"
+        fi
+      elif [ -n "$${qemu_kernel_path}" ]; then
+        QEMU_BOOT="$QEMU_BOOT -kernel $${qemu_kernel_path}"
+        if [ -n "$${qemu_initrd_path}" ]; then
+          QEMU_BOOT="$QEMU_BOOT -initrd $${qemu_initrd_path}"
+        fi
+        if [ -n "$${qemu_append_cmdline}" ]; then
+          QEMU_APPEND_CMDLINE="$${qemu_append_cmdline}"
+          QEMU_APPEND_CMDLINE=${QEMU_APPEND_CMDLINE//%LOCAL_IP%/$${local_ip}}
+          QEMU_APPEND_CMDLINE=${QEMU_APPEND_CMDLINE//%HTTP_PORT%/$HTTP_PORT}
+        fi
+      fi
+      if [ -n "$${qemu_netdev_user_options}" ]; then
+        QEMU_NETDEV_USER_OPTIONS=",$${qemu_netdev_user_options}"
+      fi
+      if [ "$${no_reboot}" == "true" ]; then
+        NO_REBOOT="-no-reboot"
+      fi
+      if [ -n "${SSH_FWD_PORT}" ]; then
+        HOSTFWD=",hostfwd=tcp::${SSH_FWD_PORT}-:22"
+      fi
+      qemu-system-$${qemu_arch} $ENABLE_KVM -smp $${qemu_cpus} -m 
$${qemu_memory_size} -rtc base=localtime \
+        -net nic,model=virtio -net user${QEMU_NETDEV_USER_OPTIONS}${HOSTFWD} \
+        $QEMU_DRIVES \
+        -monitor unix:$${qemu_monitor_socket},server,nowait -pidfile 
$${qemu_pidfile} -daemonize \
+        $QEMU_BOOT ${QEMU_APPEND_CMDLINE:+-append "$QEMU_APPEND_CMDLINE"} 
$NO_REBOOT \
+        $VNC_OPT $SERIAL_TELNET\
+        $LOADVM
+  - exec_local: |
+      VM_AVAILABLE=0
+      if [ "$${vm_expected_service}" == "ssh" ]; then
+        TIMEOUT=$(( $(date +%s) + $BOOT_TIMEOUT ))
+        until timeout 5 ssh -q -F $${ssh_config_file} -o ConnectionAttempts=1  
$${kameleon_recipe_name} -t true && VM_AVAILABLE=1 || [ $(date +%s) -gt 
$TIMEOUT ];
+        do
+          echo -en "\rWaiting for SSH to become available in VM for 
out_context...($(( TIMEOUT - $(date +%s) ))s)"
+          sleep 1
+        done
+        echo
+      else
+        TIMEOUT=$(( $(date +%s) + $BOOT_TIMEOUT ))
+        until timeout 1 [ $(date +%s) -gt $TIMEOUT ];
+        do
+          echo -en "\rWaiting for VM to become available : ($(( TIMEOUT - 
$(date +%s) ))s)"
+          sleep 1
+        done
+        echo
+        VM_AVAILABLE=1
+      fi
+  - rescue:
+    - exec_local: test $VM_AVAILABLE -eq 1
+    - breakpoint: |
+        Failed to get VM up and running (expected service: 
$${vm_expected_service}). Please verify the VM successfully booted with a vnc 
client.
+  - test:
+    - exec_local: test -e "$${qemu_sendkeys_commands}" -a -s 
"$${qemu_sendkeys_commands}"
+    - exec_local: |
+        echo "Sending keyboard commands to the VM: $${qemu_sendkeys_commands}"
+        echo "(Local httpd server url: http://$${local_ip}:$HTTP_PORT)"
+        ruby $${qemu_sendkeys_script} -d 0.05 "$(sed -e 
s/%LOCAL_IP%/$${local_ip}/g -e s/%HTTP_PORT%/$HTTP_PORT/g 
$${qemu_sendkeys_commands})" | $${socat_monitor} > /dev/null
+    - exec_local: echo "No keyboard commands to send"
+
+- shutdown_vm:
+  - on_checkpoint: redo
+  - on_clean:
+    - test:
+      - exec_local: test "$${shutdown_vm_immediately}" == "true"
+      - exec_local: *2
+  - test:
+    - exec_local: test "$${shutdown_vm_immediately}" == "true"
+    - exec_local: *1
diff --git a/default/steps/checkpoints/simple.yaml 
b/default/steps/checkpoints/simple.yaml
new file mode 100644
index 0000000..dbd60df
--- /dev/null
+++ b/default/steps/checkpoints/simple.yaml
@@ -0,0 +1,21 @@
+enabled?:
+  - exec_local: test -f $${kameleon_cwd}/checkpoint_enabled
+
+create:
+  - exec_local: |
+      echo @microstep_id >> $${kameleon_cwd}/checkpoints.list
+
+apply:
+  - exec_local: |
+      touch $${kameleon_cwd}/checkpoints.list
+      grep -R @microstep_id $${kameleon_cwd}/checkpoints.list
+
+
+clear:
+  - exec_local: |
+      echo > $${kameleon_cwd}/checkpoints.list
+
+list:
+  - exec_local: |
+      touch $${kameleon_cwd}/checkpoints.list
+      cat $${kameleon_cwd}/checkpoints.list | uniq
diff --git a/default/steps/data/helpers/export_appliance.py 
b/default/steps/data/helpers/export_appliance.py
new file mode 100644
index 0000000..634b240
--- /dev/null
+++ b/default/steps/data/helpers/export_appliance.py
@@ -0,0 +1,242 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""Convert a disk image to many others formats with guestfish."""
+from __future__ import division, unicode_literals
+
+import os
+# import time
+import os.path as op
+import sys
+import subprocess
+import argparse
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+tar_formats = ('tar', 'tar.gz', 'tgz', 'tar.bz2', 'tbz', 'tar.xz', 'txz',
+               'tar.lzo', 'tzo')
+
+tar_options = ["--selinux", "--xattrs", "--xattrs-include=*", 
"--numeric-owner", "--one-file-system"] 
+
+disk_formats = ('qcow', 'qcow2', 'qed', 'vdi', 'raw', 'vmdk')
+
+
+def which(command):
+    """Locate a command.
+    Snippet from: http://stackoverflow.com/a/377028
+    """
+    def is_exe(fpath):
+        return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+    fpath, fname = os.path.split(command)
+    if fpath:
+        if is_exe(command):
+            return command
+    else:
+        for path in os.environ["PATH"].split(os.pathsep):
+            path = path.strip('"')
+            exe_file = os.path.join(path, command)
+            if is_exe(exe_file):
+                return exe_file
+
+    raise ValueError("Command '%s' not found" % command)
+
+
+def tar_convert(disk, output, excludes, compression_level):
+    """Convert image to a tar rootfs archive."""
+    if compression_level in ("best", "fast"):
+        compression_level_opt = "--%s" % compression_level
+    else:
+        compression_level_opt = "-%s" % compression_level
+
+    compr = ""
+    if output.endswith(('tar.gz', 'tgz')):
+        try:
+            compr = "| %s %s" % (which("pigz"), compression_level_opt)
+        except:
+            compr = "| %s %s" % (which("gzip"), compression_level_opt)
+    elif output.endswith(('tar.bz2', 'tbz')):
+        compr = "| %s %s" % (which("bzip2"), compression_level_opt)
+    elif output.endswith(('tar.xz', 'txz')):
+        compr = "| {} {} -c --threads=0 -".format(
+            which("xz"), compression_level_opt)
+    elif output.endswith(('tar.lzo', 'tzo')):
+        compr = "| %s %s -c -" % (which("lzop"), compression_level_opt)
+
+    # NB: guestfish version >= 1.32 supports the special tar options, but not 
available in Debian stable (jessie): do not use for now
+    #tar_options_list = ["selinux:true", "acls:true", "xattrs:true",
+    #                    "numericowner:true",
+    #                    "excludes:\"%s\"" % ' '.join(excludes)]
+    #tar_options_str = ' '.join(tar_options_list)
+    #cmd = which("guestfish") + \
+    #    " --ro -i tar-out -a %s / - %s %s > %s"
+    #cmd = cmd % (disk, tar_options_str, compr, output)
+    #proc = subprocess.Popen(cmd_mount_tar, env=os.environ.copy(), shell=True)
+    #proc.communicate()
+    #if proc.returncode:
+    #    raise subprocess.CalledProcessError(proc.returncode, cmd)
+
+    tar_options_str = ' '.join(tar_options + ['--exclude="%s"' % s for s in 
excludes])
+    # Necessary to have quick access to /etc (bug 12240) and also good for 
reproducibility
+    tar_options_str += ' --sort=name'
+    directory = dir_path = os.path.dirname(os.path.realpath(disk))
+    cmds = [
+        which("mkdir") + " %s/.mnt" % directory,
+        which("guestmount") + " --ro -i -a %s %s/.mnt" % (disk, directory),
+        which("tar") + " -c %s -C %s/.mnt . %s > %s" % (tar_options_str, 
directory, compr, output)
+        ]
+    cmd_mount_tar = " && ".join(cmds)
+    proc = subprocess.Popen(cmd_mount_tar, env=os.environ.copy(), shell=True)
+    proc.communicate()
+    returncode_mount_tar = proc.returncode
+
+    # try to umount even if the previous command failed
+    cmds = [
+        which("guestunmount") + " %s/.mnt" % directory,
+        which("rmdir") + " %s/.mnt" % directory
+        ]
+    cmd_umount = " && ".join(cmds)
+    proc = subprocess.Popen(cmd_umount, env=os.environ.copy(), shell=True)
+    proc.communicate()
+    returncode_umount = proc.returncode
+
+    if returncode_mount_tar:
+        raise subprocess.CalledProcessError(returncode_mount_tar, 
cmd_mount_tar)
+    elif returncode_umount:
+        raise subprocess.CalledProcessError(returncode_umount, cmd_umount)
+
+
+def qemu_convert(disk, output_fmt, output_filename):
+    """Convert the disk image filename to disk image output_filename."""
+    binary = which("qemu-img")
+    cmd = [binary, "convert", "-O", output_fmt, disk, output_filename]
+    if output_fmt in ("qcow", "qcow2"):
+        cmd.insert(2, "-c")
+    proc = subprocess.Popen(cmd, env=os.environ.copy(), shell=False)
+    proc.communicate()
+    if proc.returncode:
+        raise subprocess.CalledProcessError(proc.returncode, ' '.join(cmd))
+
+
+def run_guestfish_script(disk, script, mount=""):
+    """
+    Run guestfish script.
+    Mount should be in ("read_only", "read_write", "ro", "rw")
+    """
+    args = [which("guestfish"), '-a', disk]
+    if mount in ("read_only", "read_write", "ro", "rw"):
+        args.append('-i')
+        if mount in mount in ("read_only", "ro"):
+            args.append('--ro')
+        else:
+            args.append('--rw')
+    else:
+        script = "run\n%s" % script
+    proc = subprocess.Popen(args,
+                            stdin=subprocess.PIPE,
+                            env=os.environ.copy())
+    proc.communicate(input=script.encode('utf-8'))
+    if proc.returncode:
+        raise subprocess.CalledProcessError(proc.returncode, ' '.join(args))
+
+
+def guestfish_zerofree(filename):
+    """Fill free space with zero"""
+    logger.info(guestfish_zerofree.__doc__)
+    cmd = "virt-filesystems -a %s" % filename
+    fs = subprocess.check_output(cmd.encode('utf-8'),
+                                 stderr=subprocess.STDOUT,
+                                 shell=True,
+                                 env=os.environ.copy())
+    list_fs = fs.decode('utf-8').split()
+    logger.info('\n'.join(('  `--> %s' % i for i in list_fs)))
+    script = '\n'.join(('zerofree %s' % i for i in list_fs))
+    run_guestfish_script(filename, script, mount="read_only")
+
+
+def convert_disk_image(args):
+    """Convert disk to another format."""
+    filename = op.abspath(args.file.name)
+    output = op.abspath(args.output)
+
+    os.environ['LIBGUESTFS_CACHEDIR'] = os.getcwd()
+    if args.verbose:
+        os.environ['LIBGUESTFS_DEBUG'] = '1'
+
+    # sometimes guestfish fails because of other virtualization tools are
+    # still running use a test and retry to wait for availability
+    # attempts = 0
+    # while attempts < 3:
+    #    try:
+    #        logger.info("Waiting for virtualisation to be available...")
+    #        run_guestfish_script(filename, "cat /etc/hostname", mount='ro')
+    #        break
+    #    except:
+    #        attempts += 1
+    #        time.sleep(1)
+
+    if args.zerofree and (set(args.formats) & set(disk_formats)):
+        guestfish_zerofree(filename)
+
+    for fmt in args.formats:
+        if fmt in (tar_formats + disk_formats):
+            output_filename = "%s.%s" % (output, fmt)
+            if output_filename == filename:
+                continue
+            logger.info("Creating %s" % output_filename)
+            try:
+                if fmt in tar_formats:
+                    tar_convert(filename, output_filename,
+                                args.tar_excludes,
+                                args.tar_compression_level)
+                else:
+                    qemu_convert(filename, fmt, output_filename)
+            except ValueError as exp:
+                logger.error("Error: %s" % exp)
+
+
+if __name__ == '__main__':
+    allowed_formats = tar_formats + disk_formats
+    allowed_formats_help = 'Allowed values are ' + ', '.join(allowed_formats)
+
+    allowed_levels = ["%d" % i for i in range(1, 10)] + ["best", "fast"]
+    allowed_levels_helps = 'Allowed values are ' + ', '.join(allowed_levels)
+
+    parser = argparse.ArgumentParser(
+        description=sys.modules[__name__].__doc__,
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter
+    )
+    parser.add_argument('file', action="store", type=argparse.FileType('r'),
+                        help='Disk image filename')
+    parser.add_argument('-F', '--formats', action="store", type=str, nargs='+',
+                        help='Output format. ' + allowed_formats_help,
+                        choices=allowed_formats, metavar='fmt', required=True)
+    parser.add_argument('-o', '--output', action="store", type=str,
+                        help='Output filename (without file extension)',
+                        required=True, metavar='filename')
+    parser.add_argument('--tar-compression-level', action="store", type=str,
+                        default="9", choices=allowed_levels, metavar='lvl',
+                        help="Compression level. " + allowed_levels_helps)
+    parser.add_argument('--tar-excludes', action="store", type=str, nargs='+',
+                        help="Files to excluded from archive",
+                        metavar='pattern', default=[])
+    parser.add_argument('--zerofree', action="store_true", default=False,
+                        help='Zero free unallocated blocks from ext2/3 '
+                             'file-systems before export to reduce image size')
+    parser.add_argument('--verbose', action="store_true", default=False,
+                        help='Enable very verbose messages')
+    log_format = '%(levelname)s: %(message)s'
+    level = logging.INFO
+    args = parser.parse_args()
+    if args.verbose:
+        level = logging.DEBUG
+
+    handler = logging.StreamHandler(sys.stdout)
+    handler.setLevel(level)
+    handler.setFormatter(logging.Formatter(log_format))
+
+    logger.setLevel(level)
+    logger.addHandler(handler)
+
+    convert_disk_image(args)
diff --git a/default/steps/data/helpers/netinstall_iso_finder.py 
b/default/steps/data/helpers/netinstall_iso_finder.py
new file mode 100644
index 0000000..b4a135b
--- /dev/null
+++ b/default/steps/data/helpers/netinstall_iso_finder.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""Find the latest netinstall iso for a Debian version and system 
architecture."""
+
+from html.parser import HTMLParser
+from urllib2 import urlopen
+from urlparse import urljoin
+import re
+import sys
+import argparse
+import logging
+
+logger = logging.getLogger(__name__)
+
+class LinkParser(HTMLParser):
+    """Retrieve links (a hrefs) from a text/html document"""
+    def __init__(self, url):
+        HTMLParser.__init__(self)
+        self.url = url
+        self.links = set()
+        response = urlopen(url)
+        contentType = response.info().get('Content-Type')
+        if not contentType:
+            return
+        logger.debug("url = " + url );
+        logger.debug("contentType = " + contentType );
+        if ';' in contentType:
+            (mediaType,charset) = contentType.split(";")
+            charset = charset.split("=")[1]
+        else:
+            mediaType = contentType
+            # ISO-8859-1 is no longer the default charset, see 
https://tools.ietf.org/html/rfc7231#appendix-B
+            # Let's use UTF-8.
+            charset = "utf-8"
+        if mediaType =='text/html':
+            htmlBytes = response.read()
+            htmlString = htmlBytes.decode(charset)
+            self.feed(htmlString)
+
+    def handle_starttag(self, tag, attrs):
+        if tag == 'a':
+            for (key, value) in attrs:
+                if key == 'href':
+                    new_url = urljoin(self.url,value)
+                    if re.match("^"+self.url, new_url):
+                        self.links.add(new_url)
+
+    def get_links(self):
+        """Returns all the collected links"""
+        return self.links
+
+
+def url_find(to_visit_url_set,visited_url_set,found_url_set):
+    """Recursively look for urls given a regex, a set of urls to visit, a set 
of already visited urls, a set of already found urls. Returns the set of found 
urls"""
+    logger.debug("Progress: to_visit:{} visited:{} 
found:{}".format(len(to_visit_url_set),len(visited_url_set),len(found_url_set)))
+    assert(len(to_visit_url_set.intersection(visited_url_set)) == 0)
+    assert(len(to_visit_url_set.intersection(found_url_set)) == 0)
+    if (len(to_visit_url_set) == 0):
+        return [visited_url_set,found_url_set]
+    else:
+        url = to_visit_url_set.pop()
+        visited_url_set.add(url)
+        if target_regex.match(url):
+            found_url_set.add(url)
+            return url_find(to_visit_url_set, visited_url_set, found_url_set)
+        else:
+            new_url_set = set([url for url in LinkParser(url).get_links() if 
(logger.debug(url) or True) and url_regex.match(url)])
+            new_url_set.difference_update(visited_url_set)
+            to_visit_url_set.update(new_url_set)
+            return url_find(to_visit_url_set, visited_url_set, found_url_set)
+
+def key_normalize(version_string):
+    """"
+    In order to perform a natural sorting, we normalize the version (X.Y.Z) as 
a unique integer with the following formula: X*100 + Y*10 + Z
+    For instance, it solves situations where "9.9.0" is greater than "9.9.11"
+    """
+    splitted_string = version_string.split('.')
+    assert(len(splitted_string) == 3)
+    return 
int(splitted_string[0])*100+int(splitted_string[1])*10+int(splitted_string[2])
+
+if __name__ == '__main__':
+    parser = 
argparse.ArgumentParser(description=sys.modules[__name__].__doc__, 
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+    parser.add_argument("distrib", metavar="DISTRIB", help="distribution")
+    parser.add_argument("version", metavar="VERSION", help="version")
+    parser.add_argument("arch", metavar="ARCH", help="architecture")
+    parser.add_argument("mirror", metavar="MIRROR", help="mirror", nargs="?")
+    parser.add_argument('--info', action="store_true", default=False, 
help='print info messages')
+    parser.add_argument('--debug', action="store_true", default=False, 
help='print debug messages')
+    args = parser.parse_args()
+
+    handler = logging.StreamHandler()
+    if args.debug:
+        logger.setLevel(logging.DEBUG)
+        handler.setLevel(logging.DEBUG)
+    elif args.info:
+        logger.setLevel(logging.INFO)
+        handler.setLevel(logging.INFO)
+    else:
+        logger.setLevel(logging.WARNING)
+        handler.setLevel(logging.WARNING)
+    handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
+    logger.addHandler(handler)
+
+    try:
+        visited = set([])
+        found = set([])
+        if (args.distrib.lower() == "debian"):
+            if args.mirror == None:
+                args.mirror = "http://cdimage.debian.org/";
+            if not re.match("^\d+$",args.version):
+                raise Exception("please give the Debian release number (e.g. 8 
for Jessie)")
+            if args.version == '10':
+                url_regex = 
re.compile("^"+args.mirror+"cdimage/release/(?:"+args.version+"\.\d+\.\d+/(?:"+args.arch+"/(?:iso-cd/(?:debian-"+args.version+"\.\d+\.\d+-"+args.arch+"-netinst\.iso)?)?)?)?$")
+            else:
+                url_regex = 
re.compile("^"+args.mirror+"cdimage/archive/(?:"+args.version+"\.\d+\.\d+/(?:"+args.arch+"/(?:iso-cd/(?:debian-"+args.version+"\.\d+\.\d+-"+args.arch+"-netinst\.iso)?)?)?)?$")
+            target_regex = re.compile("^.*-netinst\.iso$")
+            [visited,found] = url_find(set([args.mirror+"cdimage/"+v+"/" for v 
in ["release","archive"]]), set(), set())
+        elif (args.distrib.lower() == "ubuntu"):
+            if args.mirror == None:
+                args.mirror = "http://(?:archive|old-releases).ubuntu.com/"
+                servers = set(["http://"+s+".ubuntu.com/ubuntu/"; for s in 
["old-releases","archive"]])
+            else:
+                servers = set([args.mirror])
+            if not re.match("^\w+$",args.version):
+                raise Exception("please give the Ubuntu release name")
+            url_regex = 
re.compile("^"+args.mirror+"ubuntu/dists/(?:"+args.version+"(?:-updates)?/(?:main/(?:installer-"+args.arch+"/(?:current/(?:(?:legacy-)?images/(?:netboot/(?:mini\.iso)?)?)?)?)?)?)?$")
+            target_regex = re.compile("^.*/mini\.iso$")
+            [visited,found] = url_find(servers, set(), set())
+        elif (args.distrib.lower() == "centos"):
+            if args.mirror == None:
+                args.mirror = "http://mirror.in2p3.fr/linux/CentOS/";
+            if not re.match("^\d+$",args.version):
+                raise Exception("please give the CentOS release number (e.g. 7 
for CentOS-7)")
+            if args.version == '6':
+                url_regex = 
re.compile("^"+args.mirror+"(?:"+args.version+"/(?:isos/(?:"+args.arch+"/(?:CentOS-"+args.version+"(?:\.\d+)?-"+args.arch+"-netinstall\.iso)?)?)?)?$")
+                target_regex = 
re.compile("^.*CentOS-\d+(?:\.\d+)?-\w+-netinstall\.iso$")
+            elif args.version == '7':
+                url_regex = 
re.compile("^"+args.mirror+"(?:"+args.version+"/(?:isos/(?:"+args.arch+"/(?:CentOS-"+args.version+"-"+args.arch+"-NetInstall-\d+\.iso)?)?)?)?$")
+                target_regex = 
re.compile("^.*CentOS-\d+-\w+-NetInstall-\d+\.iso$")
+            else:
+                url_regex = 
re.compile("^"+args.mirror+"(?:"+args.version+"/(?:isos/(?:"+args.arch+"/(?:CentOS-"+args.version+"\.\d+\.\d+-"+args.arch+"-boot\.iso)?)?)?)?$")
+                target_regex = 
re.compile("^.*CentOS-\d+\.\d+\.\d+-\w+-boot\.iso$")
+            [visited,found] = url_find(set([args.mirror]), set(), set())
+        else:
+            raise Exception("this distribution is not supported")
+        logger.info("URL regex: "+url_regex.pattern)
+        logger.info("Target regex: "+target_regex.pattern)
+        logger.debug("Visited URLs:")
+        for url in visited:
+            logger.debug(url)
+        logger.info("Found URLs:")
+        for url in found:
+            logger.info(url)
+        if len(found) > 0:
+            if (args.distrib.lower() == "debian"):
+                print(sorted(found,key=lambda 
x:key_normalize(re.sub(r".*/debian-(\d+).(\d+).(\d+)-"+args.arch+"-netinst\.iso$",r"\1.\2.\3",x)),reverse=True)[0])
+            else:
+                print(sorted(found, reverse=False)[0])
+        else:
+            raise Exception("no url found")
+    except Exception as exc:
+        sys.stderr.write(u"Error: %s\n" % exc)
+        sys.exit(1)
diff --git a/default/steps/data/helpers/simple_http_server.py 
b/default/steps/data/helpers/simple_http_server.py
new file mode 100644
index 0000000..881343a
--- /dev/null
+++ b/default/steps/data/helpers/simple_http_server.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python2
+"""Simple HTTP server"""
+from __future__ import unicode_literals
+import atexit
+import os
+import sys
+import argparse
+
+
+class HTTPServerDaemon(object):
+
+    """A HTTP server daemon class."""
+
+    def __init__(self, root=os.getcwd()):
+        """ Initialize the object."""
+        self.root = root
+
+    def daemonize(self, pidfile):
+        """Deamonize class. UNIX double fork mechanism."""
+        try:
+            pid = os.fork()
+            if pid > 0:
+                # exit first parent
+                sys.exit(0)
+        except OSError as err:
+            sys.stderr.write('fork #1 failed: {0}\n'.format(err))
+            sys.exit(1)
+
+        # decouple from parent environment
+        os.chdir(self.root)
+        os.setsid()
+        os.umask(0)
+
+        # do second fork
+        try:
+            pid = os.fork()
+            if pid > 0:
+
+                # exit from second parent
+                sys.exit(0)
+        except OSError as err:
+            sys.stderr.write('fork #2 failed: {0}\n'.format(err))
+            sys.exit(1)
+
+        # redirect standard file descriptors
+        sys.stdout.flush()
+        sys.stderr.flush()
+        si = open(os.devnull, 'r')
+        so = open(os.devnull, 'a+')
+        se = open(os.devnull, 'a+')
+
+        os.dup2(si.fileno(), sys.stdin.fileno())
+        os.dup2(so.fileno(), sys.stdout.fileno())
+        os.dup2(se.fileno(), sys.stderr.fileno())
+
+        # Make sure pid file is removed if we quit
+        @atexit.register
+        def delpid(self):
+            os.remove(pidfile)
+
+        # write pidfile
+        pid = str(os.getpid())
+        with open(pidfile, 'w+') as f:
+            f.write(pid + '\n')
+
+    def start(self, pidfile, *args, **kwargs):
+        """Start the daemon."""
+        # Check for a pidfile to see if the daemon already runs
+        try:
+            with open(pidfile, 'r') as pf:
+
+                pid = int(pf.read().strip())
+        except IOError:
+            pid = None
+
+        if pid:
+            message = "pidfile {0} already exist. " + \
+                      "Daemon already running?\n"
+            sys.stderr.write(message.format(pidfile))
+            sys.exit(1)
+
+        # Start the daemon
+        self.daemonize(pidfile)
+        self.run(*args, **kwargs)
+
+    def run(self, host, port):
+        """ Run an HTTP server."""
+        if sys.version_info[0] == 3:
+            from http.server import HTTPServer, SimpleHTTPRequestHandler
+            httpd = HTTPServer((host, port), SimpleHTTPRequestHandler)
+        else:
+            import SimpleHTTPServer
+            import SocketServer
+            handler = SimpleHTTPServer.SimpleHTTPRequestHandler
+            httpd = SocketServer.TCPServer((host, port), handler)
+
+        print("Running on http://%s:%s/"; % (host, port))
+        os.chdir(self.root)
+        try:
+            httpd.serve_forever()
+        except KeyboardInterrupt:
+            sys.stderr.write(u"\nBye\n")
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(
+        description=sys.modules[__name__].__doc__,
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter
+    )
+    parser.add_argument('--port', action="store", default=9090, type=int,
+                        help='Set the listening port')
+    parser.add_argument('--root', action="store", default=os.getcwd())
+    parser.add_argument('--bind', action="store", default="0.0.0.0",
+                        help='Set the binding address')
+    parser.add_argument('--daemon', action="store_true", default=False)
+    parser.add_argument('--pid', action="store")
+
+    try:
+        args = parser.parse_args()
+        http_server = HTTPServerDaemon(root=args.root)
+        if args.daemon:
+            if args.pid is None:
+                parser.error("Need to set a pid file")
+            http_server.start(args.pid, args.bind, args.port)
+        else:
+            http_server.run(args.bind, args.port)
+    except Exception as exc:
+        sys.stderr.write(u"\nError: %s\n" % exc)
+        sys.exit(1)
diff --git a/default/steps/data/preseed/debian-testing-preseed.cfg 
b/default/steps/data/preseed/debian-testing-preseed.cfg
new file mode 100644
index 0000000..5af0d99
--- /dev/null
+++ b/default/steps/data/preseed/debian-testing-preseed.cfg
@@ -0,0 +1,322 @@
+#### Contents of the preconfiguration file (for wheezy)
+### Localization
+# Locale sets language and country.
+d-i debian-installer/locale string en_US.UTF-8
+
+# Keyboard selection.
+#d-i keymap select us
+d-i keyboard-configuration/xkb-keymap select us
+
+### Network configuration
+# netcfg will choose an interface that has link if possible. This makes it
+# skip displaying a list if there is more than one interface.
+d-i netcfg/choose_interface select auto
+
+# To pick a particular interface instead:
+#d-i netcfg/choose_interface select eth1
+
+# If you have a slow dhcp server and the installer times out waiting for
+# it, this might be useful.
+#d-i netcfg/dhcp_timeout string 60
+
+# If you prefer to configure the network manually, uncomment this line and
+# the static network configuration below.
+#d-i netcfg/disable_dhcp boolean true
+
+# If you want the preconfiguration file to work on systems both with and
+# without a dhcp server, uncomment these lines and the static network
+# configuration below.
+#d-i netcfg/dhcp_failed note
+#d-i netcfg/dhcp_options select Configure network manually
+
+# Static network configuration.
+#d-i netcfg/get_nameservers string 192.168.1.1
+#d-i netcfg/get_ipaddress string 192.168.1.42
+#d-i netcfg/get_netmask string 255.255.255.0
+#d-i netcfg/get_gateway string 192.168.1.1
+#d-i netcfg/confirm_static boolean true
+
+# Any hostname and domain names assigned from dhcp take precedence over
+# values set here. However, setting the values still prevents the questions
+# from being shown, even if values come from dhcp.
+d-i netcfg/get_hostname string kameleon
+d-i netcfg/get_domain string kameleon
+
+# Disable that annoying WEP key dialog.
+d-i netcfg/wireless_wep string
+# The wacky dhcp hostname that some ISPs use as a password of sorts.
+#d-i netcfg/dhcp_hostname string radish
+
+# If non-free firmware is needed for the network or other hardware, you can
+# configure the installer to always try to load it, without prompting. Or
+# change to false to disable asking.
+#d-i hw-detect/load_firmware boolean true
+
+### Network console
+# Use the following settings if you wish to make use of the network-console
+# component for remote installation over SSH. This only makes sense if you
+# intend to perform the remainder of the installation manually.
+#d-i anna/choose_modules string network-console
+#d-i network-console/password password r00tme
+#d-i network-console/password-again password r00tme
+
+### Mirror settings
+# If you select ftp, the mirror/country string does not need to be set.
+#d-i mirror/protocol string ftp
+d-i mirror/country string manual
+d-i mirror/http/hostname string http.debian.net
+d-i mirror/http/directory string /debian
+d-i mirror/http/proxy string
+
+# Suite to install.
+d-i mirror/suite string testing
+# Suite to use for loading installer components (optional).
+d-i mirror/udeb/suite string unstable
+
+### Clock and time zone setup
+# Controls whether or not the hardware clock is set to UTC.
+d-i clock-setup/utc boolean true
+
+# You may set this to any valid setting for $TZ; see the contents of
+# /usr/share/zoneinfo/ for valid values.
+d-i time/zone string UTC
+
+# Controls whether to use NTP to set the clock during the install
+d-i clock-setup/ntp boolean true
+# NTP server to use. The default is almost always fine here.
+#d-i clock-setup/ntp-server string ntp.example.com
+
+### Partitioning
+# If the system has free space you can choose to only partition that space.
+#d-i partman-auto/init_automatically_partition select biggest_free
+
+# Alternatively, you can specify a disk to partition. The device name must
+# be given in traditional non-devfs format.
+# Note: A disk must be specified, unless the system has only one disk.
+# For example, to use the first SCSI/SATA hard disk:
+#d-i partman-auto/disk string /dev/sda
+# In addition, you'll need to specify the method to use.
+# The presently available methods are: "regular", "lvm" and "crypto"
+d-i partman-auto/method string regular
+
+# If one of the disks that are going to be automatically partitioned
+# contains an old LVM configuration, the user will normally receive a
+# warning. This can be preseeded away...
+d-i partman-lvm/device_remove_lvm boolean true
+# The same applies to pre-existing software RAID array:
+d-i partman-md/device_remove_md boolean true
+
+# And the same goes for the confirmation to write the lvm partitions.
+d-i partman-lvm/confirm boolean true
+d-i partman-lvm/confirm_nooverwrite boolean true
+
+
+d-i partman/choose_partition select finish
+d-i partman-auto-lvm/guided_size string max
+
+# You can choose one of the three predefined partitioning recipes:
+# - atomic: all files in one partition
+# - home:   separate /home partition
+# - multi:  separate /home, /usr, /var, and /tmp partitions
+d-i partman-auto/choose_recipe select atomic
+d-i partman/default_filesystem string ext4
+
+# Or provide a recipe of your own...
+# The recipe format is documented in the file devel/partman-auto-recipe.txt.
+# If you have a way to get a recipe file into the d-i environment, you can
+# just point at it.
+#d-i partman-auto/expert_recipe_file string /hd-media/recipe
+
+# If not, you can put an entire recipe into the preconfiguration file in one
+# (logical) line. This example creates a small /boot partition, suitable
+# swap, and uses the rest of the space for the root partition:
+#d-i partman-auto/expert_recipe string                         \
+#      boot-root ::                                            \
+#              40 50 100 ext3                                  \
+#                      $primary{ } $bootable{ }                \
+#                      method{ format } format{ }              \
+#                      use_filesystem{ } filesystem{ ext3 }    \
+#                      mountpoint{ /boot }                     \
+#              .                                               \
+#              500 10000 1000000000 ext3                       \
+#                      method{ format } format{ }              \
+#                      use_filesystem{ } filesystem{ ext3 }    \
+#                      mountpoint{ / }                         \
+#              .                                               \
+#              64 512 300% linux-swap                          \
+#                      method{ swap } format{ }                \
+#              .
+
+#The preseed line that "selects finish" needs to be in a certain order in your 
preseed, the example-preseed does not follow this.
+#http://ubuntuforums.org/archive/index.php/t-1504045.html
+
+# This makes partman automatically partition without confirmation, provided
+# that you told it what to do using one of the methods above.
+d-i partman/confirm_write_new_label boolean true
+d-i partman/confirm boolean true
+d-i partman/confirm_nooverwrite boolean true
+
+
+### Base system installation
+# Select the initramfs generator used to generate the initrd for 2.6 kernels.
+#d-i base-installer/kernel/linux/initramfs-generators string yaird
+
+# The kernel image (meta) package to be installed; "none" can be used if no
+# kernel is to be installed.
+#d-i base-installer/kernel/image string linux-image-2.6-486
+
+### Account setup
+# Enable login to root account
+d-i passwd/root-login boolean true
+# Root password, either in clear text
+d-i passwd/root-password password kameleon
+d-i passwd/root-password-again password kameleon
+# or encrypted using an MD5 hash.
+#d-i passwd/root-password-crypted password [MD5 hash]
+
+# Skip creation of a normal user account.
+# d-i passwd/make-user boolean false
+
+# To create a normal user account.
+d-i passwd/user-fullname string Kameleon User
+d-i passwd/username string kameleon
+# Normal user's password, either in clear text
+d-i passwd/user-password password kameleon
+d-i passwd/user-password-again password kameleon
+# or encrypted using an MD5 hash.
+#d-i passwd/user-password-crypted password [MD5 hash]
+# Create the first user with the specified UID instead of the default.
+#d-i passwd/user-uid string 1010
+# d-i user-setup/encrypt-home boolean false
+# d-i user-setup/allow-password-weak boolean true
+
+# The user account will be added to some standard initial groups. To
+# override that, use this.
+d-i passwd/user-default-groups string audio cdrom video admin
+
+### Apt setup
+# You can choose to install non-free and contrib software.
+#d-i apt-setup/non-free boolean true
+#d-i apt-setup/contrib boolean true
+# Uncomment this if you don't want to use a network mirror.
+#d-i apt-setup/use_mirror boolean false
+# Select which update services to use; define the mirrors to be used.
+# Values shown below are the normal defaults.
+# FIXME : temporarily remove security repo while debian fixes the installer 
(default value : d-i apt-setup/services-select multiselect security, volatile)
+d-i apt-setup/services-select multiselect
+#d-i apt-setup/security_host string security.debian.org
+#d-i apt-setup/volatile_host string volatile.debian.org
+
+# Scan another CD or DVD?
+d-i apt-setup/cdrom/set-first boolean false
+
+# By default the installer requires that repositories be authenticated
+# using a known gpg key. This setting can be used to disable that
+# authentication. Warning: Insecure, not recommended.
+#d-i debian-installer/allow_unauthenticated string true
+
+### Package selection
+tasksel tasksel/first multiselect none
+# If the desktop task is selected, install the kde and xfce desktops
+# instead of the default gnome desktop.
+#tasksel tasksel/desktop multiselect kde, xfce
+
+# Individual additional packages to install
+d-i pkgsel/include string openssh-server sudo rsync haveged
+
+# Whether to upgrade packages after debootstrap.
+# Allowed values: none, safe-upgrade, full-upgrade
+d-i pkgsel/upgrade select none
+
+# Some versions of the installer can report back on what software you have
+# installed, and what software you use. The default is not to report back,
+# but sending reports helps the project determine what software is most
+# popular and include it on CDs.
+popularity-contest popularity-contest/participate boolean false
+
+### Boot loader installation
+# Grub is the default boot loader (for x86). If you want lilo installed
+# instead, uncomment this:
+#d-i grub-installer/skip boolean true
+# To also skip installing lilo, and install no bootloader, uncomment this
+# too:
+#d-i lilo-installer/skip boolean true
+
+# This is fairly safe to set, it makes grub install automatically to the MBR
+# if no other operating system is detected on the machine.
+d-i grub-installer/only_debian boolean true
+
+# This one makes grub-installer install to the MBR if it also finds some other
+# OS, which is less safe as it might not be able to boot that other OS.
+d-i grub-installer/with_other_os boolean true
+
+# Alternatively, if you want to install to a location other than the mbr,
+# uncomment and edit these lines:
+#d-i grub-installer/only_debian boolean false
+#d-i grub-installer/with_other_os boolean false
+#d-i grub-installer/bootdev  string (hd0,0)
+# To install grub to multiple disks:
+#d-i grub-installer/bootdev  string (hd0,0) (hd1,0) (hd2,0)
+
+# Optional password for grub, either in clear text
+#d-i grub-installer/password password r00tme
+#d-i grub-installer/password-again password r00tme
+# or encrypted using an MD5 hash, see grub-md5-crypt(8).
+#d-i grub-installer/password-crypted password [MD5 hash]
+
+# GRUB install devices:
+# Choices: /dev/sda (21474 MB; VMware_Virtual_S), /dev/sda1 (21472 MB; 
VMware_Virtual_S)
+grub-pc     grub-pc/install_devices multiselect /dev/vda
+# Choices: Enter device manually, /dev/sda
+grub-installer  grub-installer/choose_bootdev   select  /dev/vda
+
+### Finishing up the installation
+# During installations from serial console, the regular virtual consoles
+# (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next
+# line to prevent this.
+#d-i finish-install/keep-consoles boolean true
+
+# Avoid that last message about the install being complete.
+d-i finish-install/reboot_in_progress note
+
+# This will prevent the installer from ejecting the CD during the reboot,
+# which is useful in some situations.
+d-i cdrom-detect/eject boolean false
+
+# This is how to make the installer shutdown when finished, but not
+# reboot into the installed system.
+#d-i debian-installer/exit/halt boolean true
+# This will power off the machine instead of just halting it.
+d-i debian-installer/exit/poweroff boolean true
+
+### Preseeding other packages
+# Depending on what software you choose to install, or if things go wrong
+# during the installation process, it's possible that other questions may
+# be asked. You can preseed those too, of course. To get a list of every
+# possible question that could be asked during an install, do an
+# installation, and then run these commands:
+#   debconf-get-selections --installer > file
+#   debconf-get-selections >> file
+
+
+#### Advanced options
+### Running custom commands during the installation
+# d-i preseeding is inherently not secure. Nothing in the installer checks
+# for attempts at buffer overflows or other exploits of the values of a
+# preconfiguration file like this one. Only use preconfiguration files from
+# trusted locations! To drive that home, and because it's generally useful,
+# here's a way to run any shell command you'd like inside the installer,
+# automatically.
+
+# This first command is run as early as possible, just after
+# preseeding is read.
+# Prevent packaged version of VirtualBox Guest Additions being installed:
+#d-i preseed/early_command string sed -i \
+#  '/in-target/idiscover(){/sbin/discover|grep -v VirtualBox;}' \
+#  /usr/lib/pre-pkgsel.d/20install-hwpackages
+
+# This command is run just before the install finishes, but when there is
+# still a usable /target directory. You can chroot to /target and use it
+# directly, or use the apt-install and in-target commands to easily install
+# packages and run commands in the target system.
+
diff --git a/default/steps/data/qemu-sendkeys.rb 
b/default/steps/data/qemu-sendkeys.rb
new file mode 100644
index 0000000..d1bcb0f
--- /dev/null
+++ b/default/steps/data/qemu-sendkeys.rb
@@ -0,0 +1,121 @@
+#!/usr/bin/env ruby
+# Translate a string to "sendkey" commands for QEMU.
+# Martin Vidner, MIT License
+
+# https://en.wikibooks.org/wiki/QEMU/Monitor#sendkey_keys
+# sendkey keys
+#
+# You can emulate keyboard events through sendkey command. The syntax is: 
sendkey keys. To get a list of keys, type sendkey [tab]. Examples:
+#
+#     sendkey a
+#     sendkey shift-a
+#     sendkey ctrl-u
+#     sendkey ctrl-alt-f1
+#
+# As of QEMU 0.12.5 there are:
+# shift     shift_r     alt     alt_r   altgr   altgr_r
+# ctrl  ctrl_r  menu    esc     1   2
+# 3     4   5   6   7   8
+# 9     0   minus   equal   backspace   tab
+# q     w   e   r   t   y
+# u     i   o   p   ret     a
+# s     d   f   g   h   j
+# k     l   z   x   c   v
+# b     n   m   comma   dot     slash
+# asterisk  spc     caps_lock   f1  f2  f3
+# f4    f5  f6  f7  f8  f9
+# f10   num_lock    scroll_lock     kp_divide   kp_multiply     kp_subtract
+# kp_add    kp_enter    kp_decimal  sysrq   kp_0    kp_1
+# kp_2  kp_3    kp_4    kp_5    kp_6    kp_7
+# kp_8  kp_9    <   f11     f12     print
+# home  pgup    pgdn    end     left    up
+# down  right   insert  delete
+
+require "optparse"
+
+# incomplete! only what I need now.
+KEYS = {
+  "%" => "shift-5",
+  "/" => "slash",
+  ":" => "shift-semicolon",
+  "=" => "equal",
+  "." => "dot",
+  " " => "spc",
+  "-" => "minus",
+  "_" => "shift-minus",
+  "*" => "asterisk",
+  "," => "comma",
+  "+" => "shift-equal",
+  "|" => "shift-backslash",
+  "\\" => "backslash",
+}
+
+class Main
+  attr_accessor :command
+  attr_accessor :delay_s
+  attr_accessor :keystring
+
+  def initialize
+    self.command = nil
+    self.delay_s = 0.1
+
+    OptionParser.new do |opts|
+      opts.banner = "Usage: sendkeys [-c command_to_pipe_to] STRING\n" +
+        "Where STRING can be 'ls<enter>ls<gt>/dev/null<enter>'"
+
+      opts.on("-c", "--command COMMAND",
+              "Pipe sendkeys to this commands, individually") do |v|
+        self.command = v
+      end
+      opts.on("-d", "--delay SECONDS", Float,
+              "Delay SECONDS after each key (default: 0.1)") do |v|
+        self.delay_s = v
+      end
+    end.parse!
+    self.keystring = ARGV[0]
+  end
+
+  def sendkey(qemu_key_name)
+    if qemu_key_name == "wait"
+      sleep 1
+    else
+      if qemu_key_name =~ /[A-Za-z]/ && qemu_key_name == qemu_key_name.upcase
+        key = "shift-#{qemu_key_name.downcase}"
+      else
+        key = qemu_key_name
+      end
+      qemu_cmd = "sendkey #{key}"
+      if command
+        system "echo '#{qemu_cmd}' | #{command}"
+      else
+        puts qemu_cmd
+        $stdout.flush             # important when we are piped
+      end
+      sleep delay_s
+    end
+  end
+
+  PATTERN = /
+              \G  # where last match ended
+              < [^>]+ >
+            |
+              \G
+              .
+            /x
+  def run
+    keystring.scan(PATTERN) do |match|
+      if match[0] == "<"
+        key_name = match.slice(1..-2)
+        sendkey case key_name
+                when "lt" then "shift-comma"
+                when "gt" then "shift-dot"
+                else key_name
+                end
+      else
+        sendkey KEYS.fetch(match, match)
+      end
+    end
+  end
+end
+
+Main.new.run
diff --git a/default/steps/data/qemu-sendkeys/netinst-iso-debian 
b/default/steps/data/qemu-sendkeys/netinst-iso-debian
new file mode 100644
index 0000000..7705a44
--- /dev/null
+++ b/default/steps/data/qemu-sendkeys/netinst-iso-debian
@@ -0,0 +1 @@
+<esc><wait>auto preseed/url=http://%LOCAL_IP%:%HTTP_PORT%/preseed.cfg<kp_enter>
diff --git a/default/steps/disable_checkpoint.yaml 
b/default/steps/disable_checkpoint.yaml
new file mode 100644
index 0000000..cb571da
--- /dev/null
+++ b/default/steps/disable_checkpoint.yaml
@@ -0,0 +1,3 @@
+- disable_checkpoint:
+  - on_checkpoint: redo
+  - exec_local: rm -f $${kameleon_cwd}/checkpoint_enabled
diff --git a/default/steps/enable_checkpoint.yaml 
b/default/steps/enable_checkpoint.yaml
new file mode 100644
index 0000000..8ac4751
--- /dev/null
+++ b/default/steps/enable_checkpoint.yaml
@@ -0,0 +1,5 @@
+- enable_checkpoint:
+  - on_checkpoint: redo
+  - on_bootstrap_init:
+    - exec_local: rm -f $${kameleon_cwd}/checkpoint_enabled
+  - exec_local: touch $${kameleon_cwd}/checkpoint_enabled
diff --git a/default/steps/env/bashrc b/default/steps/env/bashrc
new file mode 100644
index 0000000..6306e37
--- /dev/null
+++ b/default/steps/env/bashrc
@@ -0,0 +1,23 @@
+## aliases
+# If not running interactively, don't do anything
+export USER=${USER:-"root"}
+export HOME=${HOME:-"/root"}
+export PATH=/usr/bin:/usr/sbin:/bin:/sbin:$PATH
+export LC_ALL=${LC_ALL:-"POSIX"}
+
+export DEBIAN_FRONTEND=noninteractive
+
+if [ -t 1 ] ; then
+export TERM=xterm
+# for fast typing
+alias h='history'
+alias g='git status'
+alias l='ls -lah'
+alias ll='ls -lh'
+alias la='ls -Ah'
+
+# for human readable output
+alias ls='ls -h'
+alias df='df -h'
+alias du='du -h'
+fi
diff --git a/default/steps/env/functions.sh b/default/steps/env/functions.sh
new file mode 100644
index 0000000..1abcc38
--- /dev/null
+++ b/default/steps/env/functions.sh
@@ -0,0 +1,201 @@
+## functions
+
+function fail {
+    echo $@ 1>&2
+    false
+}
+
+export -f fail
+
+function __download {
+    local src=$1
+    local dst=$2
+    if [ -n "$DOWNLOAD_SRC_URL" ]; then
+        src="$DOWNLOAD_SRC_URL"
+    fi
+    if [ -z "$src" ]; then
+        fail "No URL to download from"
+    fi
+    # If dst is unset or a directory, infers dst pathname from src
+    if [ -z "$dst" -o "${dst: -1}" == "/" ]; then
+        dst="$dst${src##*/}"
+        dst="${dst%%\?*}"
+    fi
+    local dstdir=${dst%/*}
+    if [ -n "$dstdir" -a "$dstdir" != "$dst" ]; then
+        mkdir -p $dstdir
+    fi
+    echo -n "Downloading: $src..."
+    # Put cURL first because it accept URIs (like file://...)
+    if which curl >/dev/null; then
+        echo " (cURL)"
+        curl -S --fail -# -L --retry 999 --retry-max-time 0 "$src" -o "$dst" 
2>&1
+    elif which wget >/dev/null; then
+        echo " (wget)"
+        wget --retry-connrefused --progress=bar:force "$src" -O "$dst" 2>&1
+    elif which python >/dev/null; then
+        echo " (python)"
+        python -c <<EOF
+import sys
+import time
+if sys.version_info >= (3,):
+    import urllib.request as urllib
+else:
+    import urllib
+
+
+def reporthook(count, block_size, total_size):
+    global start_time
+    if count == 0:
+        start_time = time.time()
+        return
+    duration = time.time() - start_time
+    progress_size = float(count * block_size)
+    if duration != 0:
+        if total_size == -1:
+            total_size = block_size
+            percent = 'Unknown size, '
+        else:
+            percent = '%.0f%%, ' % float(count * block_size * 100 / total_size)
+        speed = int(progress_size / (1024 * duration))
+        sys.stdout.write('\r%s%.2f MB, %d KB/s, %d seconds passed'
+                         % (percent, progress_size / (1024 * 1024), speed, 
duration))
+        sys.stdout.flush()
+
+urllib.urlretrieve('$src', '$dst', reporthook=reporthook)
+print('\n')
+EOF
+        true
+    else
+        fail "No way to download $src"
+    fi
+}
+
+export -f __download
+
+function __download_recipe_build() {
+    set -e
+    local recipe=$1
+    local version=${2:-latest}
+    local do_checksum=${3:-true}
+    local do_checksign=${4:-false}
+    local do_cache=${5:-false}
+    local builds_url=${6:-http://kameleon.imag.fr/builds}
+    local dest_dir="${7:-$recipe}"
+    local dest=""
+    mkdir -p $dest_dir
+    pushd $dest_dir > /dev/null
+    echo "Downloading $recipe ($version):"
+    __download $builds_url/${recipe}_$version.manifest
+    if [ "$do_checksign" == "true" ]; then
+        __download $builds_url/${recipe}_$version.manifest.sign
+        gpg --verify ${recipe}_$version.manifest{.sign,} || fail "Cannot 
verify signature"
+    fi
+    for f in $(< ${recipe}_$version.manifest); do
+        if [[ $f =~ ^$recipe-cache_ ]] && [ "$do_cache" != "true" ]; then
+            continue
+        fi
+        if [[ $f =~ \.sha[[:digit:]]+sum$ ]]; then
+            if [ "$do_checksum" == "true" ]; then
+                __download $builds_url/$f
+                ${f##*.} -c $f || fail "Cannot verify checksum"
+                if [ "$do_checksign" == "true" ]; then
+                    __download $builds_url/$f.sign
+                    gpg --verify $f{.sign,} || fail "Cannot verify signature"
+                fi
+            fi
+        else
+            __download $builds_url/$f
+            echo -n "Link to version-less filename: "
+            dest=${f%_*}.tar.${f#*.tar.}
+            ln -fv $f $dest
+        fi
+    done
+    popd > /dev/null
+    export UPSTREAM_TARBALL="$dest_dir/$dest"
+    set +e
+}
+
+export -f __download_recipe_build
+
+function __download_kadeploy_environment_image() {
+    set -e
+    local kaenv_name=$1
+    local kaenv_user=$2
+    local kaenv_version=$3
+    local remote=$4
+    local dest_dir=${5:-$kaenv_name}
+    mkdir -p $dest_dir
+    echo "Retrieve image for Kadeploy environment $kaenv_name"
+    ${remote:+ssh $remote }which kaenv3 > /dev/null || fail "kaenv3 command 
not found (${remote:-localhost})"
+    # retrieve image[file], image[kind] and image[compression] from kaenv3
+    declare -A image
+    __kaenv() { local k=${2%%:*}; image[$k]=${2#*:}; }
+    mapfile -s 1 -t -c1 -C __kaenv < <(${remote:+ssh $remote 
}kaenv3${kaenv_user:+ -u $kaenv_user}${kaenv_version:+ --env-version 
$kaenv_version} -p $kaenv_name | grep -A3 -e '^image:' | sed -e 's/ //g')
+    [ -n "${image[file]}" ] || fail "Failed to retrieve environment 
$kaenv_name"
+    if [ "${image[compression]}" == "gzip" ]; then
+        image[compression]="gz"
+    elif [ "${image[compression]}" == "bzip2" ]; then
+        image[compression]="bz2"
+    fi
+    image[protocol]=${image[file]%%:*}
+    image[path]=${image[file]#*://}
+    image[filename]=${image[path]##*/}
+    local 
dest=$dest_dir/${image[filename]%%.*}.${image[kind]}.${image[compression]}
+    if [ "${image[kind]}" == "tar" ]; then
+        if [ "${image[protocol]}" == "http" -o "${image[protocol]}" == "https" 
]; then
+            __download ${image[file]} $dest
+        else
+            if  [ "${image[protocol]}" == "server" ]; then
+                # If server:// => see if available locally (NFS) or fail, same 
as if local:// <=> ""
+                echo "Image is server side, try and fetch it from local file 
${image[path]}"
+            fi
+            [ -r ${image[path]} ] || fail "Cannot retrieve ${image[file]}"
+            cp -v ${image[path]} $dest
+        fi
+    else # dd or whatever
+        fail "Image format${image[kind]:+ ${image[kind]}} is not supported"
+    fi
+    export UPSTREAM_TARBALL=$dest
+    set +e
+}
+
+export -f __download_kadeploy_environment_image
+
+function __find_linux_boot_device() {
+    local PDEVICE=`stat -c %04D /boot`
+    for file in $(find /dev -type b 2>/dev/null) ; do
+        local CURRENT_DEVICE=$(stat -c "%02t%02T" $file)
+        if [ $CURRENT_DEVICE = $PDEVICE ]; then
+            ROOTDEVICE="$file"
+            break;
+        fi
+    done
+    echo "$ROOTDEVICE"
+}
+
+export -f __find_linux_boot_device
+
+
+function __find_free_port() {
+  local begin_port=$1
+  local end_port=$2
+
+  local port=$begin_port
+  local ret=$(nc -z 127.0.0.1 $port && echo in use || echo free)
+  while [ $port -le $end_port ] && [ "$ret" == "in use" ]
+  do
+    local port=$[$port+1]
+    local ret=$(nc -z 127.0.0.1 $port && echo in use || echo free)
+  done
+
+  # manage loop exits
+  if [[ $port -gt $end_port ]]
+  then
+    fail "No free port available between $begin_port and $end_port"
+  fi
+
+  echo $port
+}
+
+export -f __find_free_port
diff --git a/default/steps/export/save_appliance_VM.yaml 
b/default/steps/export/save_appliance_VM.yaml
new file mode 100644
index 0000000..b064d02
--- /dev/null
+++ b/default/steps/export/save_appliance_VM.yaml
@@ -0,0 +1,23 @@
+#
+# Save Appliance from virtual machine
+#
+- export_appliance_script: $${kameleon_data_dir}/helpers/export_appliance.py
+
+# Zero free unallocated blocks from ext2/3 file-systems before export to
+# reduce image size
+- zerofree: true
+
+- save_appliance:
+  - check_cmd_local: python2
+  - exec_local: |
+      if [ "$${zerofree}" = "true" ]; then
+        EXPORT_OPTS="--zerofree"
+      else
+        EXPORT_OPTS=""
+      fi
+  - exec_local: |
+      python2 $${export_appliance_script} $${image_disk}.$${image_format} \
+        -o $${appliance_filename} \
+        --formats $${appliance_formats} \
+        --tar-compression-level $${appliance_tar_compression_level} \
+        --tar-excludes $${appliance_tar_excludes} $EXPORT_OPTS
diff --git a/default/steps/setup/debian/clean_system.yaml 
b/default/steps/setup/debian/clean_system.yaml
new file mode 100644
index 0000000..399c339
--- /dev/null
+++ b/default/steps/setup/debian/clean_system.yaml
@@ -0,0 +1,34 @@
+- enable_lighten: false
+
+- clean_user:
+  - on_setup_clean:
+    - exec_in: |
+        if id kameleon > /dev/null 2>&1; then
+          echo "Removing the kameleon user"
+          userdel -r kameleon 2> >(grep -v "userdel: kameleon mail spool 
(/var/mail/kameleon) not found" )
+        fi
+
+- clean_apt:
+  - on_setup_clean:
+    - apt-get_in: autoremove
+    - apt-get_in: autoclean
+    - apt-get_in: purge
+    - apt-get_in: clean
+    - exec_in: |
+        if [ $${enable_lighten} = true ]; then
+          rm -rf /var/lib/apt/lists/*
+          rm -rf /usr/share/locale/*
+          rm -rf /usr/share/man/*
+          rm -rf /usr/share/doc/*
+        fi
+
+- clean_network:
+  - on_setup_clean:
+    - exec_in: rm -rf /var/lib/dhcp/*
+
+- clean_udev:
+  - on_setup_clean:
+    - exec_in: rm -rf /etc/udev/rules.d/70-persistent-net.rules
+    - exec_in: rm -rf /dev/.udev/
+    - exec_in: touch /etc/udev/rules.d/70-persistent-net.rules
+    - exec_in: rm -rf /lib/udev/rules.d/75-persistent-net-generator.rules
\ No newline at end of file
diff --git a/default/steps/setup/debian/minimal_install.yaml 
b/default/steps/setup/debian/minimal_install.yaml
new file mode 100644
index 0000000..d1cdc69
--- /dev/null
+++ b/default/steps/setup/debian/minimal_install.yaml
@@ -0,0 +1,6 @@
+
+- set_root_password:
+  - exec_in: echo -n 'root:$${root_password}' | chpasswd
+
+- upgrade_system:
+  - apt-get_in: dist-upgrade
diff --git a/grid5000/debian11-x64-common.yaml 
b/grid5000/debian11-x64-common.yaml
new file mode 100644
index 0000000..b2af6d3
--- /dev/null
+++ b/grid5000/debian11-x64-common.yaml
@@ -0,0 +1,56 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Debian bullseye (11) x64 common Grid'5000 environment
+#
+#==============================================================================
+---
+extend: from_scratch/debian-bullseye.yaml
+
+global:
+  # Export format to generate
+  appliance_formats: qcow2 tar.zst
+  # Output base filename
+  output: "$${kameleon_cwd}/$${kameleon_recipe_name}"
+  # Grid'5000 environment variant
+  g5k_variant: common
+  # Grid'5000 environment version
+  g5k_version: 1111111111
+  # Grid'5000 environment arch
+  g5k_image_arch: x64
+  # Grid'5000 kadeploy environment parameters
+  g5k_tar_path: server:///path/to/your/image
+  g5k_tar_compression: "zstd"
+  g5k_postinst_path: server:///grid5000/postinstalls/g5k-postinstall.tgz
+  g5k_postinst_compression: "gzip"
+  g5k_postinst_script: g5k-postinstall --net debian
+  g5k_kernel_params: ""
+  deb_backports: true
+  # grub-efi-amd64-bin has to be installed if we want to support both mbr and
+  # efi boot methods. This package can be installed in parallel with grub-pc
+  # (in contrary to grub-efi-amd64 which conflit with grub-pc)
+  packages: "grub-efi-amd64-bin"
+  # locales
+  locales: POSIX C en_US.UTF8
+  lang: en_US.UTF8
+  timezone: Europe/Paris
+  # puppet
+  puppetlabs_apt_version: 6.3.0
+
+bootstrap:
+  - "@base"
+
+setup:
+  - "@base"
+  - configure_system
+  - configure_apt_sources
+  - install_packages
+  - setup_orchestrator
+  - run_orchestrator
+
+export:
+  - clean_dhcp_leases
+  - "@base"
+  - do_qcow2_finish_works
+  - export_g5k
diff --git a/grid5000/debian11-x64-min.yaml b/grid5000/debian11-x64-min.yaml
new file mode 100644
index 0000000..969fd3b
--- /dev/null
+++ b/grid5000/debian11-x64-min.yaml
@@ -0,0 +1,27 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Debian bullseye (11) x64 min Grid'5000 environment
+#
+#==============================================================================
+---
+extend: ./debian11-x64-common.yaml
+
+global:
+  # Grid'5000 environment variant
+  g5k_variant: min
+  # clean_unnecessary_packages step settings (packages debfoster must keep)
+  default_packages_no_clean: 
g5k-meta-packages-$${distrib}$${release_number}-$${g5k_variant} tgz-g5k gnupg 
linux-image-$${deb_arch} console-setup rsync locales firmware-bnx2 
firmware-bnx2x firmware-qlogic
+  arch_packages_no_clean: grub-pc grub-efi-amd64-bin
+  other_packages_no_clean:
+
+bootstrap:
+  - "@base"
+
+setup:
+  - clean_unnecessary_packages
+  - "@base"
+
+export:
+  - "@base"
diff --git a/grid5000/from_scratch/aarch64/base.yaml 
b/grid5000/from_scratch/aarch64/base.yaml
new file mode 100644
index 0000000..3b6d58b
--- /dev/null
+++ b/grid5000/from_scratch/aarch64/base.yaml
@@ -0,0 +1,25 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: arm64 base recipe
+#
+#==============================================================================
+---
+extend: ../base.yaml
+# Global variables use by Kameleon engine and the steps
+global:
+  arch: aarch64
+  installer_iso_arch: aarch64
+  qemu_arch: aarch64
+  qemu_enable_kvm: true
+  qemu_uefi: true
+
+bootstrap:
+  - "@base"
+
+setup:
+  - "@base"
+
+export:
+  - "@base"
diff --git a/grid5000/from_scratch/aarch64/debian-base.yaml 
b/grid5000/from_scratch/aarch64/debian-base.yaml
new file mode 100644
index 0000000..4450198
--- /dev/null
+++ b/grid5000/from_scratch/aarch64/debian-base.yaml
@@ -0,0 +1,59 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Debian generic recipe using the netinstall mechanim
+#
+# USAGE:
+#   Select directly in this recipe: see usage example commented in the global 
of
+#   this recipe
+#
+#   or, override the globals directly in CLI. For example:
+#
+#   kameleon build --global distrib:debian,release:wheezy
+#
+#   or extends this recipe with your own and override those variable in it.
+#
+#==============================================================================
+---
+extend: base.yaml
+
+global:
+  # Boilerplate values, so that `kameleon info' works with the recipe.
+  # For a specific version of Debian, please see the dedicated recipe, as this
+  # recipe is mainly meant as being extended.
+  distrib: debian
+  deb_arch: arm64
+  release: buster
+  release_number: 10
+
+  # URL to retrieve packages from (sources.list)
+  deb_mirror_hostname: deb.debian.org
+  deb_mirror_directory: /debian
+  deb_mirror_uri: http://$${deb_mirror_hostname}$${deb_mirror_directory}
+  deb_security_hostname: security.debian.org
+  deb_security_directory: /debian
+  deb_components: main contrib non-free
+
+  qemu_iso_path:
+  installer_iso_finder_helper:
+  # Debian netinstall iso refuses to get keyborad keys via the qemu sendkey 
command on ARM64.
+  # Because of that, we cannot set the preseed in the iso image cdrom boot in 
qemu.
+  # As a consequence, we use the netboot instead and boot kernel and initrd 
directly
+  installer_kernel_url: 
$${deb_mirror_uri}/dists/$${release}/main/installer-$${deb_arch}/current/images/netboot/debian-installer/$${deb_arch}/linux
+  installer_initrd_url: 
$${deb_mirror_uri}/dists/$${release}/main/installer-$${deb_arch}/current/images/netboot/debian-installer/$${deb_arch}/initrd.gz
+  # Debian arm64 netboot requires to give explicitly auto=true and 
priority=critical
+  # (the "auto" alias seems not to only be defined for amd64).
+  # FIXME find the Debian documentation page which explains that.
+  installer_cmdline: "auto=true priority=critical 
url=http://%LOCAL_IP%:%HTTP_PORT%/preseed.cfg";
+  base_preseed_path: 
$${kameleon_data_dir}/preseed/$${distrib}-$${release}-preseed.cfg
+  preseed_path: $${kameleon_cwd}/preseed.cfg
+
+bootstrap:
+  - "@base"
+
+setup:
+  - "@base"
+
+export:
+  - "@base"
diff --git a/grid5000/from_scratch/aarch64/debian-bullseye.yaml 
b/grid5000/from_scratch/aarch64/debian-bullseye.yaml
new file mode 100644
index 0000000..a017aab
--- /dev/null
+++ b/grid5000/from_scratch/aarch64/debian-bullseye.yaml
@@ -0,0 +1,24 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Debian 11 (Bullseye) recipe using the netinstall mechanism
+#
+#==============================================================================
+---
+extend: debian-base.yaml
+# Global variables use by Kameleon engine and the steps
+global:
+  # Distribution
+  distrib: debian
+  release: bullseye
+  release_number: 11
+
+bootstrap:
+  - "@base"
+
+setup:
+  - "@base"
+
+export:
+  - "@base"
diff --git a/grid5000/from_scratch/base.yaml b/grid5000/from_scratch/base.yaml
new file mode 100644
index 0000000..8fddec3
--- /dev/null
+++ b/grid5000/from_scratch/base.yaml
@@ -0,0 +1,138 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Base recipe template
+#
+#==============================================================================
+---
+# Load qemu checkpoint
+checkpoint: simple.yaml
+# Loads some helpful aliases (this files are located in steps/aliases/ 
directory)
+aliases: defaults.yaml
+
+# Custom shell environement (this files are located in steps/env/ directory)
+env:
+  - bashrc
+  - functions.sh
+
+# Global variables use by Kameleon engine and the steps
+global:
+  # Architecture for the target system
+  arch: x86_64
+  distrib: unknown
+  release: unknown
+  # Default hostname
+  hostname: kameleon-$${distrib}
+  # Default root password
+  root_password: kameleon
+
+  ## System variables. Required by kameleon engine
+  # Include specific steps
+  include_steps:
+    - $${distrib}/$${release}
+    - $${distrib}
+
+  # If qemu_iso_path is set, boot from an iso, retrieved from the following 
URL:
+  installer_iso_arch: x86_64
+  installer_iso_url:
+  # or give an helper script to find out the iso URL:
+  installer_iso_finder_helper:
+  installer_iso_finder_args:
+
+  # Otherwise, if qemu_kernel_path is set, boot from an kernel, initrd and
+  # cmdline fetched from the URL defined below, and used directly in qemu:
+  installer_kernel_url:
+  installer_initrd_url:
+  installer_cmdline:
+
+  ## GPG keyserver (Waring: not all servers are reliable)
+  gpg_keyserver: keyserver.ubuntu.com
+
+  ## QEMU options
+  qemu_enable_kvm: true
+  qemu_uefi: false
+  qemu_cpus: 8
+  qemu_memory_size: 16G
+  qemu_monitor_socket: $${kameleon_cwd}/qemu_monitor.socket
+  qemu_arch: $${arch}
+  qemu_image_size: 17G
+  qemu_pidfile: $${kameleon_cwd}/qemu.pid
+  qemu_kernel_path: $${kameleon_cwd}/qemu_kernel
+  qemu_initrd_path: $${kameleon_cwd}/qemu_initrd
+  qemu_append_cmdline: $${installer_cmdline}
+  qemu_iso_path: $${kameleon_cwd}/qemu.iso
+
+  # rootfs options
+  disk_device: /dev/vda
+  rootfs: /rootfs
+  filesystem_type: ext4
+
+  # appliance options
+  image_disk: $${kameleon_cwd}/base_$${kameleon_recipe_name}
+  image_format: qcow2
+
+  # Allowed formats are: tar.gz, tar.bz2, tar.xz, tar.lzo, qcow, qcow2, qed, 
vdi, raw, vmdk
+  appliance_formats: tar.xz
+  appliance_filename: "$${kameleon_cwd}/$${kameleon_recipe_name}"
+  appliance_tar_excludes: >-
+    ./etc/fstab ./root/.bash_history ./root/kameleon_workdir ./root/.ssh
+    ./var/tmp/* ./tmp/* ./dev/* ./proc/* ./run/*
+    ./sys/* ./root/.rpmdb ./boot/extlinux ./boot/grub ./boot/grub2
+  zerofree: false
+
+  # GRUB
+  grub_cmdline_linux: console=tty0 console=ttyS0,115200
+
+  http_directory: $${kameleon_cwd}/http_dir
+  http_pid:  $${kameleon_cwd}/http.pid
+
+  ssh_config_file: $${kameleon_cwd}/ssh_config
+  local_ip: 10.0.2.2
+
+  out_context:
+    cmd: ssh -F $${ssh_config_file} $${kameleon_recipe_name} -t /bin/bash
+    workdir: /root/kameleon_workdir
+    proxy_cache: $${local_ip}
+
+  in_context:
+    cmd: ssh -F $${ssh_config_file} $${kameleon_recipe_name} -t /bin/bash
+    workdir: /root/kameleon_workdir
+    proxy_cache: $${local_ip}
+
+# Bootstrap the new system and create the 'in_context'
+bootstrap:
+  - enable_checkpoint
+  - download_installer
+  - prepare_disk
+  - prepare_autoinstall
+  - start_http_server
+  - start_qemu:
+    - force_vm_shutdown: false
+    - shutdown_vm_immediately: true
+    - vm_cleanup_section: bootstrap
+    - vm_expected_service:
+    - boot_timeout: 5
+  - prepare_ssh_to_out_context
+  - prepare_appliance
+  - start_qemu:
+    - force_vm_shutdown: true
+    - shutdown_vm_immediately: false
+    - vm_cleanup_section: setup
+    - vm_expected_server: ssh
+    - boot_timeout: 100
+    - qemu_iso_boot: false
+    - qemu_iso_path: ""
+    - qemu_kernel_path: ""
+    - qemu_sendkeys_commands: ""
+
+# Install and configuration steps
+setup:
+  - minimal_install
+  - clean_system
+
+# Export the generated appliance in the format of your choice
+export:
+  - disable_checkpoint
+  - save_appliance_VM:
+    - appliance_tar_compression_level: "9"
diff --git a/grid5000/from_scratch/debian-base.yaml 
b/grid5000/from_scratch/debian-base.yaml
new file mode 100644
index 0000000..447e57d
--- /dev/null
+++ b/grid5000/from_scratch/debian-base.yaml
@@ -0,0 +1,67 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Debian generic recipe using the netinstall mechanim
+#
+# USAGE:
+#   Select directly in this recipe: see usage example commented in the global 
of
+#   this recipe
+#
+#   or, override the globals directly in CLI. For example:
+#
+#   kameleon build --global distrib:debian,release:wheezy
+#
+#   or extends this recipe with your own and override those variable in it.
+#
+#==============================================================================
+---
+extend: base.yaml
+
+global:
+  # Boilerplate values, so that `kameleon info' works with the recipe.
+  # For a specific version of Debian, please see the dedicated recipe, as this
+  # recipe is mainly meant as being extended.
+  distrib: debian
+  deb_arch: amd64
+  release: jessie
+  release_number: 8
+
+  # URL to retrieve packages from (sources.list)
+  deb_mirror_hostname: deb.debian.org
+  deb_mirror_directory: /debian
+  deb_mirror_uri: http://$${deb_mirror_hostname}$${deb_mirror_directory}
+  deb_security_hostname: security.debian.org
+  deb_security_directory: /debian
+  deb_components: main contrib non-free
+
+  # Install from the installer's iso
+  # The location of the Debian netinstall iso can be set manually or guessed
+  # using a url finder helper script
+  #installer_iso_filename: debian-$${release_number}-$${deb_arch}-netinst.iso
+  #installer_iso_location: archive
+  #installer_iso_release_version: 8.0.0
+  #installer_iso_url: 
http://cdimage.debian.org/cdimage/$${installer_iso_location}/$${installer_iso_release_version}/$${deb_arch}/iso-cd/$${installer_iso_filename}
+  installer_iso_url:
+  installer_iso_finder_helper: 
$${kameleon_data_dir}/helpers/netinstall_iso_finder.py
+  installer_iso_finder_args: $${distrib} $${release_number} $${deb_arch}
+  qemu_iso_path: $${kameleon_cwd}/$${distrib}.iso
+  # Or install from the netboot kernel and initrd directly
+  #installer_kernel_url: 
http://deb.debian.org/debian/dists/$${release}/main/installer-$${deb_arch}/current/images/netboot/debian-installer/$${deb_arch}/linux
+  #installer_initrd_url: 
http://deb.debian.org/debian/dists/$${release}/main/installer-$${deb_arch}/current/images/netboot/debian-installer/$${deb_arch}/initrd.gz
+  #installer_cmdline: "auto url=http://%LOCAL_IP%:%HTTP_PORT%/preseed.cfg";
+
+  base_preseed_path: 
$${kameleon_data_dir}/preseed/$${distrib}-$${release}-preseed.cfg
+  preseed_path: $${kameleon_cwd}/preseed.cfg
+
+  qemu_sendkeys_commands: 
$${kameleon_data_dir}/qemu-sendkeys/netinst-iso-$${distrib}
+
+
+bootstrap:
+  - "@base"
+
+setup:
+  - "@base"
+
+export:
+  - "@base"
diff --git a/grid5000/from_scratch/debian-bullseye.yaml 
b/grid5000/from_scratch/debian-bullseye.yaml
new file mode 100644
index 0000000..a017aab
--- /dev/null
+++ b/grid5000/from_scratch/debian-bullseye.yaml
@@ -0,0 +1,24 @@
+#==============================================================================
+# vim: softtabstop=2 shiftwidth=2 expandtab fenc=utf-8 cc=81 tw=80
+#==============================================================================
+#
+# DESCRIPTION: Debian 11 (Bullseye) recipe using the netinstall mechanism
+#
+#==============================================================================
+---
+extend: debian-base.yaml
+# Global variables use by Kameleon engine and the steps
+global:
+  # Distribution
+  distrib: debian
+  release: bullseye
+  release_number: 11
+
+bootstrap:
+  - "@base"
+
+setup:
+  - "@base"
+
+export:
+  - "@base"
diff --git a/grid5000/steps/aliases/defaults.yaml 
b/grid5000/steps/aliases/defaults.yaml
new file mode 100644
index 0000000..a55c52a
--- /dev/null
+++ b/grid5000/steps/aliases/defaults.yaml
@@ -0,0 +1,169 @@
+write_local:
+  - exec_local: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+write_in:
+  - exec_in: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+write_out:
+  - exec_out: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_local:
+  - exec_local: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_in:
+  - exec_in: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_out:
+  - exec_out: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<EOF_KAMELEON_INTERNAL
+      @2
+      EOF_KAMELEON_INTERNAL
+
+write_raw_local:
+  - exec_local: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+write_raw_in:
+  - exec_in: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+write_raw_out:
+  - exec_out: |
+      mkdir -p $(dirname @1);
+      cat >@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_raw_local:
+  - exec_local: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_raw_in:
+  - exec_in: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+append_raw_out:
+  - exec_out: |
+      mkdir -p $(dirname @1);
+      cat >>@1 <<'EOF_KAMELEON_INTERNAL'
+      @2
+      EOF_KAMELEON_INTERNAL
+
+local2out:
+  - exec_out: |
+      mkdir -p $(dirname @2)
+  - pipe:
+      - exec_local: cat @1
+      - exec_out: cat > @2
+
+local2in:
+  - exec_in: mkdir -p $(dirname @2)
+  - pipe:
+      - exec_local: cat @1
+      - exec_in: cat > @2
+
+out2local:
+  - exec_local: mkdir -p $(dirname @2)
+  - pipe:
+      - exec_out: cat @1
+      - exec_local: cat > @2
+
+out2in:
+  - exec_in: mkdir -p $(dirname @2)
+  - pipe:
+      - exec_out: cat @1
+      - exec_in: cat > @2
+
+in2local:
+  - exec_local: mkdir -p $(dirname @2)
+  - pipe:
+      - exec_in: cat @1
+      - exec_local: cat > @2
+
+in2out:
+  - exec_out: mkdir -p $(dirname @2)
+  - pipe:
+      - exec_in: cat @1
+      - exec_out: cat > @2
+
+check_cmd_out:
+  - rescue:
+    - exec_out: command -V @1 2> /dev/null
+    - breakpoint: "@1 is missing from out_context"
+
+check_cmd_local:
+  - on_bootstrap_init:
+    - rescue:
+      - exec_local: command -V @1 2> /dev/null
+      - breakpoint: "@1 is missing from local_context"
+
+check_cmd_in:
+  - rescue:
+    - exec_in: command -V @1 2> /dev/null
+    - breakpoint: "@1 is missing from in_context"
+
+umount_out:
+  - exec_out: |
+      echo "try umount @1..." ; mountpoint -q "@1" && umount -f -l "@1" || true
+
+umount_local:
+  - exec_local: |
+      echo "try umount @1..." ; mountpoint -q "@1" && umount -f -l "@1" || true
+
+umount_in:
+  - exec_in: |
+      echo "try umount @1..." ; mountpoint -q "@1" && umount -f -l "@1" || true
+
+download_file_in:
+  - exec_in: __download "@1" "@2"
+
+download_file_out:
+  - exec_out: __download "@1" "@2"
+
+download_file_local:
+  - exec_local: __download "@1" "@2"
+
+download_recipe_build_local:
+  - exec_local: __download_recipe_build "@1" "@2" "@3" "@4" "@5" "@6" "@7"
+
+download_grid5000_image_local:
+  - exec_local: __download_grid5000_image "@1" "@2" "@3" "@4" "@5"
+
+apt-get_in:
+  - exec_in: DEBIAN_FRONTEND=noninteractive apt-get -y --force-yes @1 2>&1
+
+apt-get_out:
+  - exec_out: DEBIAN_FRONTEND=noninteractive apt-get -y --force-yes @1 2>&1
diff --git a/grid5000/steps/bootstrap/debian/prepare_autoinstall.yaml 
b/grid5000/steps/bootstrap/debian/prepare_autoinstall.yaml
new file mode 100644
index 0000000..f737d20
--- /dev/null
+++ b/grid5000/steps/bootstrap/debian/prepare_autoinstall.yaml
@@ -0,0 +1,11 @@
+- copy_autoinstall_script_to_http_directory:
+  - exec_local: mkdir -p $${http_directory}
+  - exec_local: cp $${base_preseed_path} $${http_directory}/preseed.cfg
+
+- customize_preseed:
+  - exec_local: sed -i -e 's|\(d-i passwd/root-password password 
\).*|\1$${root_password}|g' $${http_directory}/preseed.cfg
+  - exec_local: sed -i -e 's|\(d-i passwd/root-password-again password 
\).*|\1$${root_password}|g' $${http_directory}/preseed.cfg
+  - exec_local: sed -i -e 's|\(d-i mirror/http/hostname string 
\).*|\1$${deb_mirror_hostname}|g' $${http_directory}/preseed.cfg
+  - exec_local: sed -i -e 's|\(d-i mirror/http/directory string 
\).*|\1$${deb_mirror_directory}|g' $${http_directory}/preseed.cfg
+  - exec_local: sed -i -e 's|\(d-i apt-setup/security_host string 
\).*|\1$${deb_security_hostname}|g' $${http_directory}/preseed.cfg
+  - exec_local: sed -i -e 's|\(d-i apt-setup/security_path string 
\).*|\1$${deb_security_directory}|g' $${http_directory}/preseed.cfg
diff --git a/grid5000/steps/bootstrap/download_installer.yaml 
b/grid5000/steps/bootstrap/download_installer.yaml
new file mode 100644
index 0000000..f15f58c
--- /dev/null
+++ b/grid5000/steps/bootstrap/download_installer.yaml
@@ -0,0 +1,31 @@
+- download_installer:
+  - test:
+     - exec_local: test -n "$${installer_iso_url}" -o -n 
"$${installer_iso_finder_helper}"
+     - group:
+       - test:
+          - exec_local: test -z "$${installer_iso_url}"
+          - exec_local: |
+              echo "Looking for the netinstall iso URL for 
$${installer_iso_finder_args}"
+              DOWNLOAD_SRC_URL=$(python2 $${installer_iso_finder_helper} 
$${installer_iso_finder_args})
+       - download_file_local:
+         - $${installer_iso_url}
+         - $${qemu_iso_path}
+       - exec_local: unset DOWNLOAD_SRC_URL
+     - group:
+       - test:
+         - exec_local: test -n "$${installer_kernel_url}"
+         - download_file_local:
+           - $${installer_kernel_url}
+           - $${qemu_kernel_path}
+       - test:
+         - exec_local: test -n "$${installer_initrd_url}"
+         - download_file_local:
+           - $${installer_initrd_url}
+           - $${qemu_initrd_path}
+
+- delete_installer:
+  - on_checkpoint: skip
+  - on_export_clean:
+    - exec_local: rm -f $${qemu_iso_path}
+    - exec_local: rm -f $${qemu_kernel_path}
+    - exec_local: rm -f $${qemu_initrd_path}
diff --git a/grid5000/steps/bootstrap/prepare_appliance.yaml 
b/grid5000/steps/bootstrap/prepare_appliance.yaml
new file mode 100644
index 0000000..4f597c4
--- /dev/null
+++ b/grid5000/steps/bootstrap/prepare_appliance.yaml
@@ -0,0 +1,33 @@
+- insecure_ssh_key: $${kameleon_cwd}/insecure_ssh_key
+
+- generate_ssh_keys:
+  - check_cmd_local: ssh-keygen
+  - exec_local: echo -e  'y\n' | ssh-keygen -q -t rsa -b 4096 -f 
$${insecure_ssh_key} -N ''
+  - exec_local: cat $${insecure_ssh_key}
+
+- inject_ssh_private_key:
+  - check_cmd_local: virt-customize
+  - exec_local: |
+      virt-customize \
+        -a $${image_disk}.$${image_format} \
+        --run-command 'mkdir -p /root/.ssh' \
+        --upload $${insecure_ssh_key}.pub:/root/.ssh/.kameleon_authorized_keys 
\
+        --run-command 'touch /root/.ssh/authorized_keys' \
+        --run-command 'cp /root/.ssh/authorized_keys 
/root/.ssh/authorized_keys.bak' \
+        --run-command 'cat /root/.ssh/.kameleon_authorized_keys >> 
/root/.ssh/authorized_keys' \
+        --run-command 'chmod 700 /root/.ssh' \
+        --run-command 'chmod -R go-rw /root/.ssh' \
+        --run-command 'chown -R root:root /root/.ssh'
+  - on_export_init:
+    - exec_local: |
+        virt-customize \
+          -a $${image_disk}.$${image_format} \
+          --run-command 'mv /root/.ssh/authorized_keys.bak 
/root/.ssh/authorized_keys' \
+          --delete /root/.ssh/.kameleon_authorized_keys
+
+- add_insecure_key_to_ssh_config:
+  - on_checkpoint: redo
+  - exec_local: |
+      cat <<EOF >> $${ssh_config_file}
+      IdentityFile $${insecure_ssh_key}
+      EOF
diff --git a/grid5000/steps/bootstrap/prepare_disk.yaml 
b/grid5000/steps/bootstrap/prepare_disk.yaml
new file mode 100644
index 0000000..9c3dce4
--- /dev/null
+++ b/grid5000/steps/bootstrap/prepare_disk.yaml
@@ -0,0 +1,10 @@
+- create_initial_image:
+  - check_cmd_local: qemu-img
+  - exec_local: |
+      rm -f $${image_disk}.$${image_format}
+      qemu-img create -f qcow2 $${image_disk}.$${image_format} 
$${qemu_image_size}
+
+- delete_initial_image:
+  - on_checkpoint: skip
+  - on_export_clean:
+    - exec_local: rm -f $${image_disk}.$${image_format}
diff --git a/grid5000/steps/bootstrap/prepare_ssh_to_out_context.yaml 
b/grid5000/steps/bootstrap/prepare_ssh_to_out_context.yaml
new file mode 100644
index 0000000..172f7a4
--- /dev/null
+++ b/grid5000/steps/bootstrap/prepare_ssh_to_out_context.yaml
@@ -0,0 +1,23 @@
+- select_empty_port:
+  - on_checkpoint: redo
+  - exec_local: |
+      # Find empty SSH forwarding port
+      SSH_FWD_PORT=$(__find_free_port 50000 60000)
+      echo "SSH forwarding port: $SSH_FWD_PORT"
+- prepare_ssh_config:
+  - on_checkpoint: redo
+  - write_local:
+    - $${ssh_config_file}
+    - |
+      Host $${kameleon_recipe_name}
+      HostName 127.0.0.1
+      Port ${SSH_FWD_PORT}
+      User root
+      UserKnownHostsFile /dev/null
+      StrictHostKeyChecking no
+      PasswordAuthentication no
+      IdentitiesOnly yes
+      LogLevel FATAL
+      ForwardAgent yes
+      Compression yes
+      Protocol 2
diff --git a/grid5000/steps/bootstrap/start_http_server.yaml 
b/grid5000/steps/bootstrap/start_http_server.yaml
new file mode 100644
index 0000000..59184c3
--- /dev/null
+++ b/grid5000/steps/bootstrap/start_http_server.yaml
@@ -0,0 +1,19 @@
+- http_script: $${kameleon_data_dir}/helpers/simple_http_server.py
+
+- run_http_server:
+  - exec_local: |
+      HTTP_PORT=$(__find_free_port 8000 8100)
+      echo "HTTP port: $HTTP_PORT"
+      export HTTP_PORT
+  - exec_local: python2 $${http_script} --root $${http_directory} --bind 
0.0.0.0 --port $HTTP_PORT --daemon --pid $${http_pid}
+  - on_bootstrap_clean:
+    - exec_local: |
+        if [ -f $${http_pid} ]; then
+          HTTP_PID=$(cat $${http_pid})
+          if ps -p $HTTP_PID > /dev/null; then
+              echo "Killing HTTP server (pid: $HTTP_PID)..."
+              kill -9 "$HTTP_PID"
+              rm -f $${http_pid}
+          fi
+          rm -f $${http_pid}
+        fi
diff --git a/grid5000/steps/bootstrap/start_qemu.yaml 
b/grid5000/steps/bootstrap/start_qemu.yaml
new file mode 100644
index 0000000..35e0206
--- /dev/null
+++ b/grid5000/steps/bootstrap/start_qemu.yaml
@@ -0,0 +1,227 @@
+# Require SSH_FWD_PORT bash environment variable to be set
+
+# This must be set if you want to boot an ISO image:
+- qemu_iso_path: ""
+- qemu_iso_boot: true
+# Else that can be set to boot from a kernel, initrd and cmdline:
+- qemu_kernel_path: ""
+- qemu_initrd_path: ""
+- qemu_append_cmdline: ""
+# Else boot from disk.
+
+- vm_expected_service: ssh
+- boot_timeout: 100
+- shutdown_timeout: 100
+- debug: false
+- telnet_port: ""
+- no_reboot: true
+- socat_monitor: socat - UNIX-CONNECT:$${qemu_monitor_socket}
+- qemu_sendkeys_script: $${kameleon_data_dir}/qemu-sendkeys.rb
+- qemu_sendkeys_commands: 
+- vm_expected_service: ssh
+- vm_cleanup_section: setup
+- shutdown_vm_immediately: false
+- force_vm_shutdown: true
+- qemu_enable_kvm: true
+- qemu_cpus: 2
+- qemu_memory_size: 1024
+- qemu_monitor_socket: $${kameleon_cwd}/qemu_monitor.socket
+- qemu_arch: $${arch}
+- qemu_image_size: 10G
+- qemu_pidfile: $${kameleon_cwd}/qemu.pid
+- qemu_uefi: false
+- qemu_uefi_code_path: /usr/share/AAVMF/AAVMF_CODE.fd
+- qemu_uefi_vars_path: /usr/share/AAVMF/AAVMF_VARS.fd
+- qemu_netdev_user_options:
+- disk_cache: unsafe
+
+- start_vm:
+  - on_checkpoint: redo
+  - check_cmd_local: qemu-system-$${qemu_arch}
+  - check_cmd_local: socat
+  - on_bootstrap_clean:
+    - test:
+      - exec_local: test "$${shutdown_vm_immediately}" == "false" -a 
"$${vm_cleanup_section}" == "bootstrap"
+      - group: 
+        - exec_local: &1 |
+            if [ -f $${qemu_pidfile} ]; then
+              _QEMU_PID=$(< $${qemu_pidfile})
+              if ps -p $_QEMU_PID > /dev/null; then
+                if [ "$${force_vm_shutdown}" == "true" ]; then
+                  if [ -S $${qemu_monitor_socket} ]; then
+                    echo "Executing a graceful shutdown of the qemu VM via the 
monitor socket..."
+                    NEXT_WAIT_TIME=0
+                    echo system_powerdown | socat - 
UNIX-CONNECT:$${qemu_monitor_socket} || true
+                    while ps -p $_QEMU_PID > /dev/null && [ $NEXT_WAIT_TIME 
-lt $${shutdown_timeout} ];
+                    do
+                      sleep 1
+                      echo -en "\rWaiting for qemu virtual machine to 
shutdown...($(( $${shutdown_timeout} - 1 - NEXT_WAIT_TIME++ ))s)"
+                    done
+                  fi
+                else
+                  echo "Waiting for the VM to shutdown"
+                  echo "Run 'vncviewer :$VNC_PORT' to see what's happening in 
the VM"
+                  while ps -p $_QEMU_PID > /dev/null;
+                  do
+                    sleep 2
+                  done
+                fi
+              fi
+            fi
+        - exec_local: &2 |
+            if [ -f $${qemu_pidfile} ]; then
+              _QEMU_PID=$(< $${qemu_pidfile})
+              if ps -p $_QEMU_PID > /dev/null; then
+                if [ -S $${qemu_monitor_socket} ]; then
+                  echo "The graceful shutdown of the qemu VM should have 
failed (monitor socket is there)..."
+                fi
+                echo "Killing qemu (pid: $_QEMU_PID)."
+                kill -9 "$_QEMU_PID"
+              fi
+              rm -f $${qemu_pidfile}
+            fi
+            rm -f $${qemu_monitor_socket}
+  - on_setup_clean:
+    - test:
+      - exec_local: test "$${shutdown_vm_immediately}" == "false" -a 
"$${vm_cleanup_section}" == "setup"
+      - group:
+        - exec_local: *1
+        - exec_local: *2
+  - on_export_clean:
+    - test:
+      - exec_local: test "$${shutdown_vm_immediately}" == "false" -a 
"$${vm_cleanup_section}" == "export"
+      - group:
+        - exec_local: *1
+        - exec_local: *2
+  - exec_local: |
+      if [ "$${shutdown_vm_immediately}" == "true" ]; then
+        echo "Qemu VM shutdown: immediately"
+      else
+        echo "Qemu VM shutdown: in $${vm_cleanup_section} section cleaning"
+      fi
+  - exec_local: |
+      if [ -r $${qemu_pidfile} ] && pgrep -F $${qemu_pidfile} > /dev/null; then
+        echo "Qemu pid file found, with process running: killing it !" 1>&2
+        pkill -F $${qemu_pidfile}
+        sleep 0.5
+        if pgrep -F $${qemu_pidfile} > /dev/null; then
+          echo "Failed to kill qemu process." 1>&2
+          exit 1
+        fi
+      fi
+  - exec_local: |
+      echo "Starting qemu..."
+      if [ "$${qemu_enable_kvm}" == "true" ] && (/usr/sbin/kvm-ok > /dev/null 
|| egrep '(vmx|svm)' /proc/cpuinfo > /dev/null) ; then # print warning if 
/usr/sbin/kvm-ok is not installed
+        if [ "$${qemu_arch}" == "aarch64" ]; then
+          ENABLE_KVM="-enable-kvm -accel kvm -machine 
virt,gic-version=host,accel=kvm:tcg -cpu host"
+          #ENABLE_KVM="-global virtio-blk-pci.scsi=off -no-user-config 
-enable-fips -machine virt,gic-version=host,accel=kvm:tcg -cpu host -rtc 
driftfix=slew -object rng-random,filename=/dev/urandom,id=rng0 -device 
virtio-rng-pci,rng=rng0"
+        elif [ "$${qemu_arch}" == "ppc64" ]; then
+          ENABLE_KVM="-enable-kvm -accel kvm -machine pseries,accel=kvm:tcg 
-cpu host"
+        else #X86_64
+          ENABLE_KVM="-enable-kvm -cpu host"
+        fi
+        BOOT_TIMEOUT=$${boot_timeout}
+      else
+        echo "No KVM acceleration used"
+        BOOT_TIMEOUT=$(($${boot_timeout}*2))
+      fi
+      if [ -f "vm_state_to_load.txt" ]
+      then
+          SAVED_STATE="$(< vm_state_to_load.txt)"
+          LOADVM="-loadvm $SAVED_STATE"
+          rm -f vm_state_to_load.txt
+      fi
+      if [ "$${debug}" == "true" ]; then
+        VNC_OPT=""
+      else
+        # Find empty VNC port
+        VNC_PORT=$(( $(__find_free_port 5900 5910) - 5900 ))
+        echo "VNC port: $VNC_PORT"
+        VNC_OPT="-vnc :$VNC_PORT"
+      fi
+      if [ -n "$${telnet_port}" ]; then
+        SERIAL_TELNET="telnet:localhost:$${telnet_port},server"
+      fi
+      # Select disk
+      QEMU_DRIVES="-drive 
file=$${image_disk}.$${image_format},cache=$${disk_cache},media=disk,if=virtio,id=drive0"
+      QEMU_BOOT=
+      QEMU_APPEND_CMDLINE=
+      if [ "$${qemu_uefi}" == "true" ]; then
+        if [ ! -f $${kameleon_cwd}/qemu_uefi_vars.fd ]; then
+          cp $${qemu_uefi_vars_path} $${kameleon_cwd}/qemu_uefi_vars.fd
+        fi
+        QEMU_BOOT="-drive 
if=pflash,format=raw,readonly,file=$${qemu_uefi_code_path} -drive 
if=pflash,format=raw,file=$${kameleon_cwd}/qemu_uefi_vars.fd"
+      fi
+      if [ -n "$${qemu_iso_path}" ]; then
+        QEMU_DRIVES="-drive file=$${qemu_iso_path},readonly,media=cdrom 
$QEMU_DRIVES"
+        if [ "$${qemu_iso_boot}" == "true" ]; then
+          QEMU_BOOT="$QEMU_BOOT -boot order=d"
+        fi
+      elif [ -n "$${qemu_kernel_path}" ]; then
+        QEMU_BOOT="$QEMU_BOOT -kernel $${qemu_kernel_path}"
+        if [ -n "$${qemu_initrd_path}" ]; then
+          QEMU_BOOT="$QEMU_BOOT -initrd $${qemu_initrd_path}"
+        fi
+        if [ -n "$${qemu_append_cmdline}" ]; then
+          QEMU_APPEND_CMDLINE="$${qemu_append_cmdline}"
+          QEMU_APPEND_CMDLINE=${QEMU_APPEND_CMDLINE//%LOCAL_IP%/$${local_ip}}
+          QEMU_APPEND_CMDLINE=${QEMU_APPEND_CMDLINE//%HTTP_PORT%/$HTTP_PORT}
+        fi
+      fi
+      if [ -n "$${qemu_netdev_user_options}" ]; then
+        QEMU_NETDEV_USER_OPTIONS=",$${qemu_netdev_user_options}"
+      fi
+      if [ "$${no_reboot}" == "true" ]; then
+        NO_REBOOT="-no-reboot"
+      fi
+      if [ -n "${SSH_FWD_PORT}" ]; then
+        HOSTFWD=",hostfwd=tcp::${SSH_FWD_PORT}-:22"
+      fi
+      qemu-system-$${qemu_arch} $ENABLE_KVM -smp $${qemu_cpus} -m 
$${qemu_memory_size} -rtc base=localtime \
+        -net nic,model=virtio -net user${QEMU_NETDEV_USER_OPTIONS}${HOSTFWD} \
+        $QEMU_DRIVES \
+        -monitor unix:$${qemu_monitor_socket},server,nowait -pidfile 
$${qemu_pidfile} -daemonize \
+        $QEMU_BOOT ${QEMU_APPEND_CMDLINE:+-append "$QEMU_APPEND_CMDLINE"} 
$NO_REBOOT \
+        $VNC_OPT $SERIAL_TELNET\
+        $LOADVM
+  - exec_local: |
+      VM_AVAILABLE=0
+      if [ "$${vm_expected_service}" == "ssh" ]; then
+        TIMEOUT=$(( $(date +%s) + $BOOT_TIMEOUT ))
+        until timeout 5 ssh -q -F $${ssh_config_file} -o ConnectionAttempts=1  
$${kameleon_recipe_name} -t true && VM_AVAILABLE=1 || [ $(date +%s) -gt 
$TIMEOUT ];
+        do
+          echo -en "\rWaiting for SSH to become available in VM for 
out_context...($(( TIMEOUT - $(date +%s) ))s)"
+          sleep 1
+        done
+        echo
+      else
+        TIMEOUT=$(( $(date +%s) + $BOOT_TIMEOUT ))
+        until timeout 1 [ $(date +%s) -gt $TIMEOUT ];
+        do
+          echo -en "\rWaiting for VM to become available : ($(( TIMEOUT - 
$(date +%s) ))s)"
+          sleep 1
+        done
+        echo
+        VM_AVAILABLE=1
+      fi
+  - rescue:
+    - exec_local: test $VM_AVAILABLE -eq 1
+    - breakpoint: |
+        Failed to get VM up and running (expected service: 
$${vm_expected_service}). Please verify the VM successfully booted with a vnc 
client.
+  - test:
+    - exec_local: test -e "$${qemu_sendkeys_commands}" -a -s 
"$${qemu_sendkeys_commands}"
+    - exec_local: |
+        echo "Sending keyboard commands to the VM: $${qemu_sendkeys_commands}"
+        echo "(Local httpd server url: http://$${local_ip}:$HTTP_PORT)"
+        ruby $${qemu_sendkeys_script} -d 0.05 "$(sed -e 
s/%LOCAL_IP%/$${local_ip}/g -e s/%HTTP_PORT%/$HTTP_PORT/g 
$${qemu_sendkeys_commands})" | $${socat_monitor} > /dev/null
+    - exec_local: echo "No keyboard commands to send"
+
+- shutdown_vm:
+  - on_checkpoint: redo
+  - on_clean:
+    - test:
+      - exec_local: test "$${shutdown_vm_immediately}" == "true"
+      - exec_local: *2
+  - test:
+    - exec_local: test "$${shutdown_vm_immediately}" == "true"
+    - exec_local: *1
diff --git a/grid5000/steps/checkpoints/simple.yaml 
b/grid5000/steps/checkpoints/simple.yaml
new file mode 100644
index 0000000..dbd60df
--- /dev/null
+++ b/grid5000/steps/checkpoints/simple.yaml
@@ -0,0 +1,21 @@
+enabled?:
+  - exec_local: test -f $${kameleon_cwd}/checkpoint_enabled
+
+create:
+  - exec_local: |
+      echo @microstep_id >> $${kameleon_cwd}/checkpoints.list
+
+apply:
+  - exec_local: |
+      touch $${kameleon_cwd}/checkpoints.list
+      grep -R @microstep_id $${kameleon_cwd}/checkpoints.list
+
+
+clear:
+  - exec_local: |
+      echo > $${kameleon_cwd}/checkpoints.list
+
+list:
+  - exec_local: |
+      touch $${kameleon_cwd}/checkpoints.list
+      cat $${kameleon_cwd}/checkpoints.list | uniq
diff --git a/grid5000/steps/data/helpers/export_appliance.py 
b/grid5000/steps/data/helpers/export_appliance.py
new file mode 100644
index 0000000..450ef47
--- /dev/null
+++ b/grid5000/steps/data/helpers/export_appliance.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""Convert a disk image to many others formats with guestfish."""
+from __future__ import division, unicode_literals
+
+import os
+# import time
+import os.path as op
+import sys
+import subprocess
+import argparse
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+tar_formats = ('tar', 'tar.gz', 'tgz', 'tar.bz2', 'tbz', 'tar.xz', 'txz',
+               'tar.lzo', 'tzo', 'tar.zst', 'tzst')
+
+tar_options = ["--selinux", "--xattrs", "--xattrs-include=*", 
"--numeric-owner", "--one-file-system"] 
+
+disk_formats = ('qcow', 'qcow2', 'qed', 'vdi', 'raw', 'vmdk')
+
+
+def which(command):
+    """Locate a command.
+    Snippet from: http://stackoverflow.com/a/377028
+    """
+    def is_exe(fpath):
+        return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+    fpath, fname = os.path.split(command)
+    if fpath:
+        if is_exe(command):
+            return command
+    else:
+        for path in os.environ["PATH"].split(os.pathsep):
+            path = path.strip('"')
+            exe_file = os.path.join(path, command)
+            if is_exe(exe_file):
+                return exe_file
+
+    raise ValueError("Command '%s' not found" % command)
+
+
+def tar_convert(disk, output, excludes, compression_level):
+    """Convert image to a tar rootfs archive."""
+    if compression_level in ("best", "fast"):
+        compression_level_opt = "--%s" % compression_level
+    else:
+        compression_level_opt = "-%s" % compression_level
+
+    compr = ""
+    if output.endswith(('tar.gz', 'tgz')):
+        try:
+            compr = "| %s %s" % (which("pigz"), compression_level_opt)
+        except:
+            compr = "| %s %s" % (which("gzip"), compression_level_opt)
+    elif output.endswith(('tar.bz2', 'tbz')):
+        compr = "| %s %s" % (which("bzip2"), compression_level_opt)
+    elif output.endswith(('tar.xz', 'txz')):
+        compr = "| {} {} -c --threads=0 -".format(
+            which("xz"), compression_level_opt)
+    elif output.endswith(('tar.lzo', 'tzo')):
+        compr = "| %s %s -c -" % (which("lzop"), compression_level_opt)
+    elif output.endswith(('tar.zst', 'tzst')):
+        try:
+            compr = "| %s %s" % (which("zstdmt"), compression_level_opt)
+        except:
+            compr = "| %s -T0 %s" % (which("zstd"), compression_level_opt)
+
+    # NB: guestfish version >= 1.32 supports the special tar options, but not 
available in Debian stable (jessie): do not use for now
+    #tar_options_list = ["selinux:true", "acls:true", "xattrs:true",
+    #                    "numericowner:true",
+    #                    "excludes:\"%s\"" % ' '.join(excludes)]
+    #tar_options_str = ' '.join(tar_options_list)
+    #cmd = which("guestfish") + \
+    #    " --ro -i tar-out -a %s / - %s %s > %s"
+    #cmd = cmd % (disk, tar_options_str, compr, output)
+    #proc = subprocess.Popen(cmd_mount_tar, env=os.environ.copy(), shell=True)
+    #proc.communicate()
+    #if proc.returncode:
+    #    raise subprocess.CalledProcessError(proc.returncode, cmd)
+
+    tar_options_str = ' '.join(tar_options + ['--exclude="%s"' % s for s in 
excludes])
+    # Necessary to have quick access to /etc (bug 12240) and also good for 
reproducibility
+    tar_options_str += ' --sort=name'
+    directory = dir_path = os.path.dirname(os.path.realpath(disk))
+    cmds = [
+        which("mkdir") + " %s/.mnt" % directory,
+        which("guestmount") + " --ro -i -a %s %s/.mnt" % (disk, directory),
+        which("tar") + " -c %s -C %s/.mnt . %s > %s" % (tar_options_str, 
directory, compr, output)
+        ]
+    cmd_mount_tar = " && ".join(cmds)
+    proc = subprocess.Popen(cmd_mount_tar, env=os.environ.copy(), shell=True)
+    proc.communicate()
+    returncode_mount_tar = proc.returncode
+
+    # try to umount even if the previous command failed
+    cmds = [
+        which("guestunmount") + " %s/.mnt" % directory,
+        which("rmdir") + " %s/.mnt" % directory
+        ]
+    cmd_umount = " && ".join(cmds)
+    proc = subprocess.Popen(cmd_umount, env=os.environ.copy(), shell=True)
+    proc.communicate()
+    returncode_umount = proc.returncode
+
+    if returncode_mount_tar:
+        raise subprocess.CalledProcessError(returncode_mount_tar, 
cmd_mount_tar)
+    elif returncode_umount:
+        raise subprocess.CalledProcessError(returncode_umount, cmd_umount)
+
+
+def qemu_convert(disk, output_fmt, output_filename):
+    """Convert the disk image filename to disk image output_filename."""
+    binary = which("qemu-img")
+    cmd = [binary, "convert", "-O", output_fmt, disk, output_filename]
+    if output_fmt in ("qcow", "qcow2"):
+        cmd.insert(2, "-c")
+    proc = subprocess.Popen(cmd, env=os.environ.copy(), shell=False)
+    proc.communicate()
+    if proc.returncode:
+        raise subprocess.CalledProcessError(proc.returncode, ' '.join(cmd))
+
+
+def run_guestfish_script(disk, script, mount=""):
+    """
+    Run guestfish script.
+    Mount should be in ("read_only", "read_write", "ro", "rw")
+    """
+    args = [which("guestfish"), '-a', disk]
+    if mount in ("read_only", "read_write", "ro", "rw"):
+        args.append('-i')
+        if mount in mount in ("read_only", "ro"):
+            args.append('--ro')
+        else:
+            args.append('--rw')
+    else:
+        script = "run\n%s" % script
+    proc = subprocess.Popen(args,
+                            stdin=subprocess.PIPE,
+                            env=os.environ.copy())
+    proc.communicate(input=script.encode('utf-8'))
+    if proc.returncode:
+        raise subprocess.CalledProcessError(proc.returncode, ' '.join(args))
+
+
+def guestfish_zerofree(filename):
+    """Fill free space with zero"""
+    logger.info(guestfish_zerofree.__doc__)
+    cmd = "virt-filesystems -a %s" % filename
+    fs = subprocess.check_output(cmd.encode('utf-8'),
+                                 stderr=subprocess.STDOUT,
+                                 shell=True,
+                                 env=os.environ.copy())
+    list_fs = fs.decode('utf-8').split()
+    logger.info('\n'.join(('  `--> %s' % i for i in list_fs)))
+    script = '\n'.join(('zerofree %s' % i for i in list_fs))
+    run_guestfish_script(filename, script, mount="read_only")
+
+
+def convert_disk_image(args):
+    """Convert disk to another format."""
+    filename = op.abspath(args.file.name)
+    output = op.abspath(args.output)
+
+    os.environ['LIBGUESTFS_CACHEDIR'] = os.getcwd()
+    if args.verbose:
+        os.environ['LIBGUESTFS_DEBUG'] = '1'
+
+    # sometimes guestfish fails because of other virtualization tools are
+    # still running use a test and retry to wait for availability
+    # attempts = 0
+    # while attempts < 3:
+    #    try:
+    #        logger.info("Waiting for virtualisation to be available...")
+    #        run_guestfish_script(filename, "cat /etc/hostname", mount='ro')
+    #        break
+    #    except:
+    #        attempts += 1
+    #        time.sleep(1)
+
+    if args.zerofree and (set(args.formats) & set(disk_formats)):
+        guestfish_zerofree(filename)
+
+    for fmt in args.formats:
+        if fmt in (tar_formats + disk_formats):
+            output_filename = "%s.%s" % (output, fmt)
+            if output_filename == filename:
+                continue
+            logger.info("Creating %s" % output_filename)
+            try:
+                if fmt in tar_formats:
+                    tar_convert(filename, output_filename,
+                                args.tar_excludes,
+                                args.tar_compression_level)
+                else:
+                    qemu_convert(filename, fmt, output_filename)
+            except ValueError as exp:
+                logger.error("Error: %s" % exp)
+
+
+if __name__ == '__main__':
+    allowed_formats = tar_formats + disk_formats
+    allowed_formats_help = 'Allowed values are ' + ', '.join(allowed_formats)
+
+    allowed_levels = ["%d" % i for i in range(1, 10)] + ["best", "fast"]
+    allowed_levels_helps = 'Allowed values are ' + ', '.join(allowed_levels)
+
+    parser = argparse.ArgumentParser(
+        description=sys.modules[__name__].__doc__,
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter
+    )
+    parser.add_argument('file', action="store", type=argparse.FileType('r'),
+                        help='Disk image filename')
+    parser.add_argument('-F', '--formats', action="store", type=str, nargs='+',
+                        help='Output format. ' + allowed_formats_help,
+                        choices=allowed_formats, metavar='fmt', required=True)
+    parser.add_argument('-o', '--output', action="store", type=str,
+                        help='Output filename (without file extension)',
+                        required=True, metavar='filename')
+    parser.add_argument('--tar-compression-level', action="store", type=str,
+                        default="9", choices=allowed_levels, metavar='lvl',
+                        help="Compression level. " + allowed_levels_helps)
+    parser.add_argument('--tar-excludes', action="store", type=str, nargs='+',
+                        help="Files to excluded from archive",
+                        metavar='pattern', default=[])
+    parser.add_argument('--zerofree', action="store_true", default=False,
+                        help='Zero free unallocated blocks from ext2/3 '
+                             'file-systems before export to reduce image size')
+    parser.add_argument('--verbose', action="store_true", default=False,
+                        help='Enable very verbose messages')
+    log_format = '%(levelname)s: %(message)s'
+    level = logging.INFO
+    args = parser.parse_args()
+    if args.verbose:
+        level = logging.DEBUG
+
+    handler = logging.StreamHandler(sys.stdout)
+    handler.setLevel(level)
+    handler.setFormatter(logging.Formatter(log_format))
+
+    logger.setLevel(level)
+    logger.addHandler(handler)
+
+    convert_disk_image(args)
diff --git a/grid5000/steps/data/helpers/netinstall_iso_finder.py 
b/grid5000/steps/data/helpers/netinstall_iso_finder.py
new file mode 100644
index 0000000..aa9a2e6
--- /dev/null
+++ b/grid5000/steps/data/helpers/netinstall_iso_finder.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""Find the latest netinstall iso for a Debian version and system 
architecture."""
+
+from html.parser import HTMLParser
+from urllib2 import urlopen
+from urlparse import urljoin
+import re
+import sys
+import argparse
+import logging
+
+logger = logging.getLogger(__name__)
+
+class LinkParser(HTMLParser):
+    """Retrieve links (a hrefs) from a text/html document"""
+    def __init__(self, url):
+        HTMLParser.__init__(self)
+        self.url = url
+        self.links = set()
+        response = urlopen(url)
+        contentType = response.info().get('Content-Type')
+        if not contentType:
+            return
+        logger.debug("url = " + url );
+        logger.debug("contentType = " + contentType );
+        if ';' in contentType:
+            (mediaType,charset) = contentType.split(";")
+            charset = charset.split("=")[1]
+        else:
+            mediaType = contentType
+            # ISO-8859-1 is no longer the default charset, see 
https://tools.ietf.org/html/rfc7231#appendix-B
+            # Let's use UTF-8.
+            charset = "utf-8"
+        if mediaType =='text/html':
+            htmlBytes = response.read()
+            htmlString = htmlBytes.decode(charset)
+            self.feed(htmlString)
+
+    def handle_starttag(self, tag, attrs):
+        if tag == 'a':
+            for (key, value) in attrs:
+                if key == 'href':
+                    new_url = urljoin(self.url,value)
+                    if re.match("^"+self.url, new_url):
+                        self.links.add(new_url)
+
+    def get_links(self):
+        """Returns all the collected links"""
+        return self.links
+
+
+def url_find(to_visit_url_set,visited_url_set,found_url_set):
+    """Recursively look for urls given a regex, a set of urls to visit, a set 
of already visited urls, a set of already found urls. Returns the set of found 
urls"""
+    logger.debug("Progress: to_visit:{} visited:{} 
found:{}".format(len(to_visit_url_set),len(visited_url_set),len(found_url_set)))
+    assert(len(to_visit_url_set.intersection(visited_url_set)) == 0)
+    assert(len(to_visit_url_set.intersection(found_url_set)) == 0)
+    if (len(to_visit_url_set) == 0):
+        return [visited_url_set,found_url_set]
+    else:
+        url = to_visit_url_set.pop()
+        visited_url_set.add(url)
+        if target_regex.match(url):
+            found_url_set.add(url)
+            return url_find(to_visit_url_set, visited_url_set, found_url_set)
+        else:
+            new_url_set = set([url for url in LinkParser(url).get_links() if 
(logger.debug(url) or True) and url_regex.match(url)])
+            new_url_set.difference_update(visited_url_set)
+            to_visit_url_set.update(new_url_set)
+            return url_find(to_visit_url_set, visited_url_set, found_url_set)
+
+def key_normalize(version_string):
+    """"
+    In order to perform a natural sorting, we normalize the version (X.Y.Z) as 
a unique integer with the following formula: X*100 + Y*10 + Z
+    For instance, it solves situations where "9.9.0" is greater than "9.9.11"
+    """
+    splitted_string = version_string.split('.')
+    assert(len(splitted_string) == 3)
+    return 
int(splitted_string[0])*100+int(splitted_string[1])*10+int(splitted_string[2])
+
+if __name__ == '__main__':
+    parser = 
argparse.ArgumentParser(description=sys.modules[__name__].__doc__, 
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+    parser.add_argument("distrib", metavar="DISTRIB", help="distribution")
+    parser.add_argument("version", metavar="VERSION", help="version")
+    parser.add_argument("arch", metavar="ARCH", help="architecture")
+    parser.add_argument("mirror", metavar="MIRROR", help="mirror", nargs="?")
+    parser.add_argument('--info', action="store_true", default=False, 
help='print info messages')
+    parser.add_argument('--debug', action="store_true", default=False, 
help='print debug messages')
+    args = parser.parse_args()
+
+    handler = logging.StreamHandler()
+    if args.debug:
+        logger.setLevel(logging.DEBUG)
+        handler.setLevel(logging.DEBUG)
+    elif args.info:
+        logger.setLevel(logging.INFO)
+        handler.setLevel(logging.INFO)
+    else:
+        logger.setLevel(logging.WARNING)
+        handler.setLevel(logging.WARNING)
+    handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
+    logger.addHandler(handler)
+
+    try:
+        visited = set([])
+        found = set([])
+        if (args.distrib.lower() == "debian"):
+            if args.mirror == None:
+                args.mirror = "http://cdimage.debian.org/";
+            if not re.match("^\d+$",args.version):
+                raise Exception("please give the Debian release number (e.g. 8 
for Jessie)")
+            if args.version == '11':
+                url_regex = 
re.compile("^"+args.mirror+"cdimage/release/(?:"+args.version+"\.\d+\.\d+/(?:"+args.arch+"/(?:iso-cd/(?:debian-"+args.version+"\.\d+\.\d+-"+args.arch+"-netinst\.iso)?)?)?)?$")
+            else:
+                url_regex = 
re.compile("^"+args.mirror+"cdimage/archive/(?:"+args.version+"\.\d+\.\d+/(?:"+args.arch+"/(?:iso-cd/(?:debian-"+args.version+"\.\d+\.\d+-"+args.arch+"-netinst\.iso)?)?)?)?$")
+            target_regex = re.compile("^.*-netinst\.iso$")
+            [visited,found] = url_find(set([args.mirror+"cdimage/"+v+"/" for v 
in ["release","archive"]]), set(), set())
+        elif (args.distrib.lower() == "ubuntu"):
+            if args.mirror == None:
+                args.mirror = "http://(?:archive|old-releases).ubuntu.com/"
+                servers = set(["http://"+s+".ubuntu.com/ubuntu/"; for s in 
["old-releases","archive"]])
+            else:
+                servers = set([args.mirror])
+            if not re.match("^\w+$",args.version):
+                raise Exception("please give the Ubuntu release name")
+            url_regex = 
re.compile("^"+args.mirror+"ubuntu/dists/(?:"+args.version+"(?:-updates)?/(?:main/(?:installer-"+args.arch+"/(?:current/(?:(?:legacy-)?images/(?:netboot/(?:mini\.iso)?)?)?)?)?)?)?$")
+            target_regex = re.compile("^.*/mini\.iso$")
+            [visited,found] = url_find(servers, set(), set())
+        elif (args.distrib.lower() == "centos"):
+            if args.mirror == None:
+                args.mirror = "http://mirror.in2p3.fr/linux/CentOS/";
+            if not re.match("^\d+$",args.version):
+                raise Exception("please give the CentOS release number (e.g. 7 
for CentOS-7)")
+            if args.version == '6':
+                url_regex = 
re.compile("^"+args.mirror+"(?:"+args.version+"/(?:isos/(?:"+args.arch+"/(?:CentOS-"+args.version+"(?:\.\d+)?-"+args.arch+"-netinstall\.iso)?)?)?)?$")
+                target_regex = 
re.compile("^.*CentOS-\d+(?:\.\d+)?-\w+-netinstall\.iso$")
+            elif args.version == '7':
+                url_regex = 
re.compile("^"+args.mirror+"(?:"+args.version+"/(?:isos/(?:"+args.arch+"/(?:CentOS-"+args.version+"-"+args.arch+"-NetInstall-\d+\.iso)?)?)?)?$")
+                target_regex = 
re.compile("^.*CentOS-\d+-\w+-NetInstall-\d+\.iso$")
+            else:
+                url_regex = 
re.compile("^"+args.mirror+"(?:"+args.version+"/(?:isos/(?:"+args.arch+"/(?:CentOS-"+args.version+"\.\d+\.\d+-"+args.arch+"-boot\.iso)?)?)?)?$")
+                target_regex = 
re.compile("^.*CentOS-\d+\.\d+\.\d+-\w+-boot\.iso$")
+            [visited,found] = url_find(set([args.mirror]), set(), set())
+        else:
+            raise Exception("this distribution is not supported")
+        logger.info("URL regex: "+url_regex.pattern)
+        logger.info("Target regex: "+target_regex.pattern)
+        logger.debug("Visited URLs:")
+        for url in visited:
+            logger.debug(url)
+        logger.info("Found URLs:")
+        for url in found:
+            logger.info(url)
+        if len(found) > 0:
+            if (args.distrib.lower() == "debian"):
+                print(sorted(found,key=lambda 
x:key_normalize(re.sub(r".*/debian-(\d+).(\d+).(\d+)-"+args.arch+"-netinst\.iso$",r"\1.\2.\3",x)),reverse=True)[0])
+            else:
+                print(sorted(found, reverse=False)[0])
+        else:
+            raise Exception("no url found")
+    except Exception as exc:
+        sys.stderr.write(u"Error: %s\n" % exc)
+        sys.exit(1)
diff --git a/grid5000/steps/data/helpers/simple_http_server.py 
b/grid5000/steps/data/helpers/simple_http_server.py
new file mode 100644
index 0000000..881343a
--- /dev/null
+++ b/grid5000/steps/data/helpers/simple_http_server.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python2
+"""Simple HTTP server"""
+from __future__ import unicode_literals
+import atexit
+import os
+import sys
+import argparse
+
+
+class HTTPServerDaemon(object):
+
+    """A HTTP server daemon class."""
+
+    def __init__(self, root=os.getcwd()):
+        """ Initialize the object."""
+        self.root = root
+
+    def daemonize(self, pidfile):
+        """Deamonize class. UNIX double fork mechanism."""
+        try:
+            pid = os.fork()
+            if pid > 0:
+                # exit first parent
+                sys.exit(0)
+        except OSError as err:
+            sys.stderr.write('fork #1 failed: {0}\n'.format(err))
+            sys.exit(1)
+
+        # decouple from parent environment
+        os.chdir(self.root)
+        os.setsid()
+        os.umask(0)
+
+        # do second fork
+        try:
+            pid = os.fork()
+            if pid > 0:
+
+                # exit from second parent
+                sys.exit(0)
+        except OSError as err:
+            sys.stderr.write('fork #2 failed: {0}\n'.format(err))
+            sys.exit(1)
+
+        # redirect standard file descriptors
+        sys.stdout.flush()
+        sys.stderr.flush()
+        si = open(os.devnull, 'r')
+        so = open(os.devnull, 'a+')
+        se = open(os.devnull, 'a+')
+
+        os.dup2(si.fileno(), sys.stdin.fileno())
+        os.dup2(so.fileno(), sys.stdout.fileno())
+        os.dup2(se.fileno(), sys.stderr.fileno())
+
+        # Make sure pid file is removed if we quit
+        @atexit.register
+        def delpid(self):
+            os.remove(pidfile)
+
+        # write pidfile
+        pid = str(os.getpid())
+        with open(pidfile, 'w+') as f:
+            f.write(pid + '\n')
+
+    def start(self, pidfile, *args, **kwargs):
+        """Start the daemon."""
+        # Check for a pidfile to see if the daemon already runs
+        try:
+            with open(pidfile, 'r') as pf:
+
+                pid = int(pf.read().strip())
+        except IOError:
+            pid = None
+
+        if pid:
+            message = "pidfile {0} already exist. " + \
+                      "Daemon already running?\n"
+            sys.stderr.write(message.format(pidfile))
+            sys.exit(1)
+
+        # Start the daemon
+        self.daemonize(pidfile)
+        self.run(*args, **kwargs)
+
+    def run(self, host, port):
+        """ Run an HTTP server."""
+        if sys.version_info[0] == 3:
+            from http.server import HTTPServer, SimpleHTTPRequestHandler
+            httpd = HTTPServer((host, port), SimpleHTTPRequestHandler)
+        else:
+            import SimpleHTTPServer
+            import SocketServer
+            handler = SimpleHTTPServer.SimpleHTTPRequestHandler
+            httpd = SocketServer.TCPServer((host, port), handler)
+
+        print("Running on http://%s:%s/"; % (host, port))
+        os.chdir(self.root)
+        try:
+            httpd.serve_forever()
+        except KeyboardInterrupt:
+            sys.stderr.write(u"\nBye\n")
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser(
+        description=sys.modules[__name__].__doc__,
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter
+    )
+    parser.add_argument('--port', action="store", default=9090, type=int,
+                        help='Set the listening port')
+    parser.add_argument('--root', action="store", default=os.getcwd())
+    parser.add_argument('--bind', action="store", default="0.0.0.0",
+                        help='Set the binding address')
+    parser.add_argument('--daemon', action="store_true", default=False)
+    parser.add_argument('--pid', action="store")
+
+    try:
+        args = parser.parse_args()
+        http_server = HTTPServerDaemon(root=args.root)
+        if args.daemon:
+            if args.pid is None:
+                parser.error("Need to set a pid file")
+            http_server.start(args.pid, args.bind, args.port)
+        else:
+            http_server.run(args.bind, args.port)
+    except Exception as exc:
+        sys.stderr.write(u"\nError: %s\n" % exc)
+        sys.exit(1)
diff --git a/grid5000/steps/data/preseed/debian-bullseye-preseed.cfg 
b/grid5000/steps/data/preseed/debian-bullseye-preseed.cfg
new file mode 100644
index 0000000..6956585
--- /dev/null
+++ b/grid5000/steps/data/preseed/debian-bullseye-preseed.cfg
@@ -0,0 +1,461 @@
+#_preseed_V1
+#### Contents of the preconfiguration file (for bullseye)
+### Localization
+# Preseeding only locale sets language, country and locale.
+d-i debian-installer/locale string en_US.UTF-8
+
+# The values can also be preseeded individually for greater flexibility.
+#d-i debian-installer/language string en
+#d-i debian-installer/country string NL
+#d-i debian-installer/locale string en_GB.UTF-8
+# Optionally specify additional locales to be generated.
+#d-i localechooser/supported-locales multiselect en_US.UTF-8, nl_NL.UTF-8
+
+# Keyboard selection.
+d-i keyboard-configuration/xkb-keymap select us
+# d-i keyboard-configuration/toggle select No toggling
+
+### Network configuration
+# Disable network configuration entirely. This is useful for cdrom
+# installations on non-networked devices where the network questions,
+# warning and long timeouts are a nuisance.
+#d-i netcfg/enable boolean false
+
+# netcfg will choose an interface that has link if possible. This makes it
+# skip displaying a list if there is more than one interface.
+d-i netcfg/choose_interface select auto
+
+# To pick a particular interface instead:
+#d-i netcfg/choose_interface select eth1
+
+# To set a different link detection timeout (default is 3 seconds).
+# Values are interpreted as seconds.
+#d-i netcfg/link_wait_timeout string 10
+
+# If you have a slow dhcp server and the installer times out waiting for
+# it, this might be useful.
+#d-i netcfg/dhcp_timeout string 60
+#d-i netcfg/dhcpv6_timeout string 60
+
+# If you prefer to configure the network manually, uncomment this line and
+# the static network configuration below.
+#d-i netcfg/disable_autoconfig boolean true
+
+# If you want the preconfiguration file to work on systems both with and
+# without a dhcp server, uncomment these lines and the static network
+# configuration below.
+#d-i netcfg/dhcp_failed note
+#d-i netcfg/dhcp_options select Configure network manually
+
+# Static network configuration.
+#
+# IPv4 example
+#d-i netcfg/get_ipaddress string 192.168.1.42
+#d-i netcfg/get_netmask string 255.255.255.0
+#d-i netcfg/get_gateway string 192.168.1.1
+#d-i netcfg/get_nameservers string 192.168.1.1
+#d-i netcfg/confirm_static boolean true
+#
+# IPv6 example
+#d-i netcfg/get_ipaddress string fc00::2
+#d-i netcfg/get_netmask string ffff:ffff:ffff:ffff::
+#d-i netcfg/get_gateway string fc00::1
+#d-i netcfg/get_nameservers string fc00::1
+#d-i netcfg/confirm_static boolean true
+
+# Any hostname and domain names assigned from dhcp take precedence over
+# values set here. However, setting the values still prevents the questions
+# from being shown, even if values come from dhcp.
+d-i netcfg/get_hostname string kameleon
+d-i netcfg/get_domain string kameleon
+
+# If you want to force a hostname, regardless of what either the DHCP
+# server returns or what the reverse DNS entry for the IP is, uncomment
+# and adjust the following line.
+#d-i netcfg/hostname string somehost
+
+# Disable that annoying WEP key dialog.
+d-i netcfg/wireless_wep string
+# The wacky dhcp hostname that some ISPs use as a password of sorts.
+#d-i netcfg/dhcp_hostname string radish
+
+# If non-free firmware is needed for the network or other hardware, you can
+# configure the installer to always try to load it, without prompting. Or
+# change to false to disable asking.
+#d-i hw-detect/load_firmware boolean true
+
+### Network console
+# Use the following settings if you wish to make use of the network-console
+# component for remote installation over SSH. This only makes sense if you
+# intend to perform the remainder of the installation manually.
+#d-i anna/choose_modules string network-console
+#d-i network-console/authorized_keys_url string http://10.0.0.1/openssh-key
+#d-i network-console/password password r00tme
+#d-i network-console/password-again password r00tme
+
+### Mirror settings
+# If you select ftp, the mirror/country string does not need to be set.
+#d-i mirror/protocol string ftp
+d-i mirror/country string manual
+d-i mirror/http/hostname string http.fr.debian.org
+d-i mirror/http/directory string /debian
+d-i mirror/http/proxy string
+
+# Suite to install.
+#d-i mirror/suite string testing
+# Suite to use for loading installer components (optional).
+#d-i mirror/udeb/suite string testing
+
+### Account setup
+# Skip creation of a root account (normal user account will be able to
+# use sudo).
+#d-i passwd/root-login boolean false
+# Alternatively, to skip creation of a normal user account.
+#d-i passwd/make-user boolean false
+# Enable login to root account
+d-i passwd/root-login boolean true
+
+# Root password, either in clear text
+d-i passwd/root-password password kameleon
+d-i passwd/root-password-again password kameleon
+# or encrypted using a crypt(3)  hash.
+#d-i passwd/root-password-crypted password [crypt(3) hash]
+
+# To create a normal user account.
+d-i passwd/user-fullname string Kameleon User
+d-i passwd/username string kameleon
+# Normal user's password, either in clear text
+d-i passwd/user-password password kameleon
+d-i passwd/user-password-again password kameleon
+# or encrypted using a crypt(3) hash.
+#d-i passwd/user-password-crypted password [crypt(3) hash]
+# Create the first user with the specified UID instead of the default.
+#d-i passwd/user-uid string 1010
+
+# The user account will be added to some standard initial groups. To
+# override that, use this.
+#d-i passwd/user-default-groups string audio cdrom video
+d-i passwd/user-default-groups string audio cdrom video admin
+
+### Clock and time zone setup
+# Controls whether or not the hardware clock is set to UTC.
+d-i clock-setup/utc boolean true
+
+# You may set this to any valid setting for $TZ; see the contents of
+# /usr/share/zoneinfo/ for valid values.
+d-i time/zone string UTC
+
+# Controls whether to use NTP to set the clock during the install
+d-i clock-setup/ntp boolean true
+# NTP server to use. The default is almost always fine here.
+#d-i clock-setup/ntp-server string ntp.example.com
+
+### Partitioning
+## Partitioning example
+# If the system has free space you can choose to only partition that space.
+# This is only honoured if partman-auto/method (below) is not set.
+#d-i partman-auto/init_automatically_partition select biggest_free
+
+# Alternatively, you may specify a disk to partition. If the system has only
+# one disk the installer will default to using that, but otherwise the device
+# name must be given in traditional, non-devfs format (so e.g. /dev/sda
+# and not e.g. /dev/discs/disc0/disc).
+# For example, to use the first SCSI/SATA hard disk:
+#d-i partman-auto/disk string /dev/sda
+# In addition, you'll need to specify the method to use.
+# The presently available methods are:
+# - regular: use the usual partition types for your architecture
+# - lvm:     use LVM to partition the disk
+# - crypto:  use LVM within an encrypted partition
+d-i partman-auto/method string regular
+
+# If one of the disks that are going to be automatically partitioned
+# contains an old LVM configuration, the user will normally receive a
+# warning. This can be preseeded away...
+d-i partman-lvm/device_remove_lvm boolean true
+# The same applies to pre-existing software RAID array:
+d-i partman-md/device_remove_md boolean true
+# And the same goes for the confirmation to write the lvm partitions.
+d-i partman-lvm/confirm boolean true
+d-i partman-lvm/confirm_nooverwrite boolean true
+
+# You can choose one of the three predefined partitioning recipes:
+# - atomic: all files in one partition
+# - home:   separate /home partition
+# - multi:  separate /home, /var, and /tmp partitions
+d-i partman-auto/choose_recipe select atomic
+
+# Or provide a recipe of your own...
+# If you have a way to get a recipe file into the d-i environment, you can
+# just point at it.
+#d-i partman-auto/expert_recipe_file string /hd-media/recipe
+
+# If not, you can put an entire recipe into the preconfiguration file in one
+# (logical) line. This example creates a small /boot partition, suitable
+# swap, and uses the rest of the space for the root partition:
+#d-i partman-auto/expert_recipe string                         \
+#      boot-root ::                                            \
+#              40 50 100 ext3                                  \
+#                      $primary{ } $bootable{ }                \
+#                      method{ format } format{ }              \
+#                      use_filesystem{ } filesystem{ ext3 }    \
+#                      mountpoint{ /boot }                     \
+#              .                                               \
+#              500 10000 1000000000 ext3                       \
+#                      method{ format } format{ }              \
+#                      use_filesystem{ } filesystem{ ext3 }    \
+#                      mountpoint{ / }                         \
+#              .                                               \
+#              64 512 300% linux-swap                          \
+#                      method{ swap } format{ }                \
+#              .
+
+# The full recipe format is documented in the file partman-auto-recipe.txt
+# included in the 'debian-installer' package or available from D-I source
+# repository. This also documents how to specify settings such as file
+# system labels, volume group names and which physical devices to include
+# in a volume group.
+
+## Partitioning for EFI
+# If your system needs an EFI partition you could add something like
+# this to the recipe above, as the first element in the recipe:
+#               538 538 1075 free                              \
+#                      $iflabel{ gpt }                         \
+#                      $reusemethod{ }                         \
+#                      method{ efi }                           \
+#                      format{ }                               \
+#               .                                              \
+#
+# The fragment above is for the amd64 architecture; the details may be
+# different on other architectures. The 'partman-auto' package in the
+# D-I source repository may have an example you can follow.
+
+# This makes partman automatically partition without confirmation, provided
+# that you told it what to do using one of the methods above.
+d-i partman-partitioning/confirm_write_new_label boolean true
+d-i partman/choose_partition select finish
+d-i partman/confirm boolean true
+d-i partman/confirm_nooverwrite boolean true
+
+# Force UEFI booting ('BIOS compatibility' will be lost). Default: false.
+#d-i partman-efi/non_efi_system boolean true
+# Ensure the partition table is GPT - this is required for EFI
+#d-i partman-partitioning/choose_label string gpt
+#d-i partman-partitioning/default_label string gpt
+
+# When disk encryption is enabled, skip wiping the partitions beforehand.
+#d-i partman-auto-crypto/erase_disks boolean false
+
+## Partitioning using RAID
+# The method should be set to "raid".
+#d-i partman-auto/method string raid
+# Specify the disks to be partitioned. They will all get the same layout,
+# so this will only work if the disks are the same size.
+#d-i partman-auto/disk string /dev/sda /dev/sdb
+
+# Next you need to specify the physical partitions that will be used. 
+#d-i partman-auto/expert_recipe string \
+#      multiraid ::                                         \
+#              1000 5000 4000 raid                          \
+#                      $primary{ } method{ raid }           \
+#              .                                            \
+#              64 512 300% raid                             \
+#                      method{ raid }                       \
+#              .                                            \
+#              500 10000 1000000000 raid                    \
+#                      method{ raid }                       \
+#              .
+
+# Last you need to specify how the previously defined partitions will be
+# used in the RAID setup. Remember to use the correct partition numbers
+# for logical partitions. RAID levels 0, 1, 5, 6 and 10 are supported;
+# devices are separated using "#".
+# Parameters are:
+# <raidtype> <devcount> <sparecount> <fstype> <mountpoint> \
+#          <devices> <sparedevices>
+
+#d-i partman-auto-raid/recipe string \
+#    1 2 0 ext3 /                    \
+#          /dev/sda1#/dev/sdb1       \
+#    .                               \
+#    1 2 0 swap -                    \
+#          /dev/sda5#/dev/sdb5       \
+#    .                               \
+#    0 2 0 ext3 /home                \
+#          /dev/sda6#/dev/sdb6       \
+#    .
+
+# For additional information see the file partman-auto-raid-recipe.txt
+# included in the 'debian-installer' package or available from D-I source
+# repository.
+
+# This makes partman automatically partition without confirmation.
+d-i partman-md/confirm boolean true
+d-i partman-partitioning/confirm_write_new_label boolean true
+d-i partman/choose_partition select finish
+d-i partman/confirm boolean true
+d-i partman/confirm_nooverwrite boolean true
+
+## Controlling how partitions are mounted
+# The default is to mount by UUID, but you can also choose "traditional" to
+# use traditional device names, or "label" to try filesystem labels before
+# falling back to UUIDs.
+#d-i partman/mount_style select uuid
+
+### Base system installation
+# Configure APT to not install recommended packages by default. Use of this
+# option can result in an incomplete system and should only be used by very
+# experienced users.
+#d-i base-installer/install-recommends boolean false
+
+# The kernel image (meta) package to be installed; "none" can be used if no
+# kernel is to be installed.
+#d-i base-installer/kernel/image string linux-image-686
+
+### Apt setup
+# You can choose to install non-free and contrib software.
+#d-i apt-setup/non-free boolean true
+#d-i apt-setup/contrib boolean true
+# Uncomment this if you don't want to use a network mirror.
+#d-i apt-setup/use_mirror boolean false
+# Select which update services to use; define the mirrors to be used.
+# Values shown below are the normal defaults.
+#d-i apt-setup/services-select multiselect security, updates
+#d-i apt-setup/security_host string security.debian.org
+
+# Additional repositories, local[0-9] available
+#d-i apt-setup/local0/repository string \
+#       http://local.server/debian stable main
+#d-i apt-setup/local0/comment string local server
+# Enable deb-src lines
+#d-i apt-setup/local0/source boolean true
+# URL to the public key of the local repository; you must provide a key or
+# apt will complain about the unauthenticated repository and so the
+# sources.list line will be left commented out.
+#d-i apt-setup/local0/key string http://local.server/key
+# If the provided key file ends in ".asc" the key file needs to be an
+# ASCII-armoured PGP key, if it ends in ".gpg" it needs to use the
+# "GPG key public keyring" format, the "keybox database" format is
+# currently not supported.
+
+# By default the installer requires that repositories be authenticated
+# using a known gpg key. This setting can be used to disable that
+# authentication. Warning: Insecure, not recommended.
+#d-i debian-installer/allow_unauthenticated boolean true
+
+# Uncomment this to add multiarch configuration for i386
+#d-i apt-setup/multiarch string i386
+
+
+### Package selection
+#tasksel tasksel/first multiselect standard, web-server, kde-desktop
+tasksel tasksel/first multiselect none
+
+# Individual additional packages to install
+#d-i pkgsel/include string openssh-server build-essential
+d-i pkgsel/include string openssh-server sudo rsync haveged
+# Whether to upgrade packages after debootstrap.
+# Allowed values: none, safe-upgrade, full-upgrade
+d-i pkgsel/upgrade select none
+
+# Some versions of the installer can report back on what software you have
+# installed, and what software you use. The default is not to report back,
+# but sending reports helps the project determine what software is most
+# popular and should be included on the first CD/DVD.
+popularity-contest popularity-contest/participate boolean false
+
+### Boot loader installation
+# Grub is the boot loader (for x86).
+
+# This is fairly safe to set, it makes grub install automatically to the UEFI
+# partition/boot record if no other operating system is detected on the 
machine.
+d-i grub-installer/only_debian boolean true
+
+# This one makes grub-installer install to the UEFI partition/boot record, if
+# it also finds some other OS, which is less safe as it might not be able to
+# boot that other OS.
+d-i grub-installer/with_other_os boolean true
+
+# Due notably to potential USB sticks, the location of the primary drive can
+# not be determined safely in general, so this needs to be specified:
+#d-i grub-installer/bootdev  string /dev/sda
+# To install to the primary device (assuming it is not a USB stick):
+#d-i grub-installer/bootdev  string default
+
+# Alternatively, if you want to install to a location other than the UEFI
+# parition/boot record, uncomment and edit these lines:
+#d-i grub-installer/only_debian boolean false
+#d-i grub-installer/with_other_os boolean false
+#d-i grub-installer/bootdev  string (hd0,1)
+# To install grub to multiple disks:
+#d-i grub-installer/bootdev  string (hd0,1) (hd1,1) (hd2,1)
+
+# Optional password for grub, either in clear text
+#d-i grub-installer/password password r00tme
+#d-i grub-installer/password-again password r00tme
+# or encrypted using an MD5 hash, see grub-md5-crypt(8).
+#d-i grub-installer/password-crypted password [MD5 hash]
+
+# Use the following option to add additional boot parameters for the
+# installed system (if supported by the bootloader installer).
+# Note: options passed to the installer will be added automatically.
+#d-i debian-installer/add-kernel-opts string nousb
+
+# GRUB install devices:
+# Choices: /dev/sda (21474 MB; VMware_Virtual_S), /dev/sda1 (21472 MB; 
VMware_Virtual_S)
+grub-pc     grub-pc/install_devices multiselect /dev/vda
+# Choices: Enter device manually, /dev/sda
+grub-installer  grub-installer/choose_bootdev   select  /dev/vda
+
+### Finishing up the installation
+# During installations from serial console, the regular virtual consoles
+# (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next
+# line to prevent this.
+#d-i finish-install/keep-consoles boolean true
+
+# Avoid that last message about the install being complete.
+d-i finish-install/reboot_in_progress note
+
+# This will prevent the installer from ejecting the CD during the reboot,
+# which is useful in some situations.
+d-i cdrom-detect/eject boolean false
+
+# This is how to make the installer shutdown when finished, but not
+# reboot into the installed system.
+#d-i debian-installer/exit/halt boolean true
+# This will power off the machine instead of just halting it.
+d-i debian-installer/exit/poweroff boolean true
+
+### Preseeding other packages
+# Depending on what software you choose to install, or if things go wrong
+# during the installation process, it's possible that other questions may
+# be asked. You can preseed those too, of course. To get a list of every
+# possible question that could be asked during an install, do an
+# installation, and then run these commands:
+#   debconf-get-selections --installer > file
+#   debconf-get-selections >> file
+
+
+#### Advanced options
+### Running custom commands during the installation
+# d-i preseeding is inherently not secure. Nothing in the installer checks
+# for attempts at buffer overflows or other exploits of the values of a
+# preconfiguration file like this one. Only use preconfiguration files from
+# trusted locations! To drive that home, and because it's generally useful,
+# here's a way to run any shell command you'd like inside the installer,
+# automatically.
+
+# This first command is run as early as possible, just after
+# preseeding is read.
+#d-i preseed/early_command string anna-install some-udeb
+# This command is run immediately before the partitioner starts. It may be
+# useful to apply dynamic partitioner preseeding that depends on the state
+# of the disks (which may not be visible when preseed/early_command runs).
+#d-i partman/early_command \
+#       string debconf-set partman-auto/disk "$(list-devices disk | head -n1)"
+# This command is run just before the install finishes, but when there is
+# still a usable /target directory. You can chroot to /target and use it
+# directly, or use the apt-install and in-target commands to easily install
+# packages and run commands in the target system.
+#d-i preseed/late_command string apt-install zsh; in-target chsh -s /bin/zsh
+
diff --git a/grid5000/steps/data/preseed/debian-buster-preseed.cfg 
b/grid5000/steps/data/preseed/debian-buster-preseed.cfg
new file mode 100644
index 0000000..434efb0
--- /dev/null
+++ b/grid5000/steps/data/preseed/debian-buster-preseed.cfg
@@ -0,0 +1,443 @@
+#### Contents of the preconfiguration file (for buster)
+### Localization
+# Preseeding only locale sets language, country and locale.
+d-i debian-installer/locale string en_US.UTF-8
+
+# The values can also be preseeded individually for greater flexibility.
+#d-i debian-installer/language string en
+#d-i debian-installer/country string NL
+#d-i debian-installer/locale string en_GB.UTF-8
+# Optionally specify additional locales to be generated.
+#d-i localechooser/supported-locales multiselect en_US.UTF-8, nl_NL.UTF-8
+
+# Keyboard selection.
+d-i keyboard-configuration/xkb-keymap select us
+# d-i keyboard-configuration/toggle select No toggling
+
+### Network configuration
+# Disable network configuration entirely. This is useful for cdrom
+# installations on non-networked devices where the network questions,
+# warning and long timeouts are a nuisance.
+#d-i netcfg/enable boolean false
+
+# netcfg will choose an interface that has link if possible. This makes it
+# skip displaying a list if there is more than one interface.
+d-i netcfg/choose_interface select auto
+
+# To pick a particular interface instead:
+#d-i netcfg/choose_interface select eth1
+
+# To set a different link detection timeout (default is 3 seconds).
+# Values are interpreted as seconds.
+#d-i netcfg/link_wait_timeout string 10
+
+# If you have a slow dhcp server and the installer times out waiting for
+# it, this might be useful.
+#d-i netcfg/dhcp_timeout string 60
+#d-i netcfg/dhcpv6_timeout string 60
+
+# If you prefer to configure the network manually, uncomment this line and
+# the static network configuration below.
+#d-i netcfg/disable_autoconfig boolean true
+
+# If you want the preconfiguration file to work on systems both with and
+# without a dhcp server, uncomment these lines and the static network
+# configuration below.
+#d-i netcfg/dhcp_failed note
+#d-i netcfg/dhcp_options select Configure network manually
+
+# Static network configuration.
+#
+# IPv4 example
+#d-i netcfg/get_ipaddress string 192.168.1.42
+#d-i netcfg/get_netmask string 255.255.255.0
+#d-i netcfg/get_gateway string 192.168.1.1
+#d-i netcfg/get_nameservers string 192.168.1.1
+#d-i netcfg/confirm_static boolean true
+#
+# IPv6 example
+#d-i netcfg/get_ipaddress string fc00::2
+#d-i netcfg/get_netmask string ffff:ffff:ffff:ffff::
+#d-i netcfg/get_gateway string fc00::1
+#d-i netcfg/get_nameservers string fc00::1
+#d-i netcfg/confirm_static boolean true
+
+# Any hostname and domain names assigned from dhcp take precedence over
+# values set here. However, setting the values still prevents the questions
+# from being shown, even if values come from dhcp.
+d-i netcfg/get_hostname string kameleon
+d-i netcfg/get_domain string kameleon
+
+# If you want to force a hostname, regardless of what either the DHCP
+# server returns or what the reverse DNS entry for the IP is, uncomment
+# and adjust the following line.
+#d-i netcfg/hostname string somehost
+
+# Disable that annoying WEP key dialog.
+d-i netcfg/wireless_wep string
+# The wacky dhcp hostname that some ISPs use as a password of sorts.
+#d-i netcfg/dhcp_hostname string radish
+
+# If non-free firmware is needed for the network or other hardware, you can
+# configure the installer to always try to load it, without prompting. Or
+# change to false to disable asking.
+#d-i hw-detect/load_firmware boolean true
+
+### Network console
+# Use the following settings if you wish to make use of the network-console
+# component for remote installation over SSH. This only makes sense if you
+# intend to perform the remainder of the installation manually.
+#d-i anna/choose_modules string network-console
+#d-i network-console/authorized_keys_url string http://10.0.0.1/openssh-key
+#d-i network-console/password password r00tme
+#d-i network-console/password-again password r00tme
+
+### Mirror settings
+# If you select ftp, the mirror/country string does not need to be set.
+#d-i mirror/protocol string ftp
+d-i mirror/country string manual
+d-i mirror/http/hostname string http.fr.debian.org
+d-i mirror/http/directory string /debian
+d-i mirror/http/proxy string
+
+# Suite to install.
+#d-i mirror/suite string testing
+# Suite to use for loading installer components (optional).
+#d-i mirror/udeb/suite string testing
+
+### Account setup
+# Skip creation of a root account (normal user account will be able to
+# use sudo).
+#d-i passwd/root-login boolean false
+# Alternatively, to skip creation of a normal user account.
+#d-i passwd/make-user boolean false
+# Enable login to root account
+d-i passwd/root-login boolean true
+
+# Root password, either in clear text
+d-i passwd/root-password password kameleon
+d-i passwd/root-password-again password kameleon
+# or encrypted using a crypt(3)  hash.
+#d-i passwd/root-password-crypted password [crypt(3) hash]
+
+# To create a normal user account.
+d-i passwd/user-fullname string Kameleon User
+d-i passwd/username string kameleon
+# Normal user's password, either in clear text
+d-i passwd/user-password password kameleon
+d-i passwd/user-password-again password kameleon
+# or encrypted using a crypt(3) hash.
+#d-i passwd/user-password-crypted password [crypt(3) hash]
+# Create the first user with the specified UID instead of the default.
+#d-i passwd/user-uid string 1010
+
+# The user account will be added to some standard initial groups. To
+# override that, use this.
+#d-i passwd/user-default-groups string audio cdrom video
+d-i passwd/user-default-groups string audio cdrom video admin
+
+### Clock and time zone setup
+# Controls whether or not the hardware clock is set to UTC.
+d-i clock-setup/utc boolean true
+
+# You may set this to any valid setting for $TZ; see the contents of
+# /usr/share/zoneinfo/ for valid values.
+d-i time/zone string UTC
+
+# Controls whether to use NTP to set the clock during the install
+d-i clock-setup/ntp boolean true
+# NTP server to use. The default is almost always fine here.
+#d-i clock-setup/ntp-server string ntp.example.com
+
+### Partitioning
+## Partitioning example
+# If the system has free space you can choose to only partition that space.
+# This is only honoured if partman-auto/method (below) is not set.
+#d-i partman-auto/init_automatically_partition select biggest_free
+
+# Alternatively, you may specify a disk to partition. If the system has only
+# one disk the installer will default to using that, but otherwise the device
+# name must be given in traditional, non-devfs format (so e.g. /dev/sda
+# and not e.g. /dev/discs/disc0/disc).
+# For example, to use the first SCSI/SATA hard disk:
+#d-i partman-auto/disk string /dev/sda
+# In addition, you'll need to specify the method to use.
+# The presently available methods are:
+# - regular: use the usual partition types for your architecture
+# - lvm:     use LVM to partition the disk
+# - crypto:  use LVM within an encrypted partition
+d-i partman-auto/method string regular
+
+# If one of the disks that are going to be automatically partitioned
+# contains an old LVM configuration, the user will normally receive a
+# warning. This can be preseeded away...
+d-i partman-lvm/device_remove_lvm boolean true
+# The same applies to pre-existing software RAID array:
+d-i partman-md/device_remove_md boolean true
+# And the same goes for the confirmation to write the lvm partitions.
+d-i partman-lvm/confirm boolean true
+d-i partman-lvm/confirm_nooverwrite boolean true
+
+# You can choose one of the three predefined partitioning recipes:
+# - atomic: all files in one partition
+# - home:   separate /home partition
+# - multi:  separate /home, /var, and /tmp partitions
+d-i partman-auto/choose_recipe select atomic
+
+# Or provide a recipe of your own...
+# If you have a way to get a recipe file into the d-i environment, you can
+# just point at it.
+#d-i partman-auto/expert_recipe_file string /hd-media/recipe
+
+# If not, you can put an entire recipe into the preconfiguration file in one
+# (logical) line. This example creates a small /boot partition, suitable
+# swap, and uses the rest of the space for the root partition:
+#d-i partman-auto/expert_recipe string                         \
+#      boot-root ::                                            \
+#              40 50 100 ext3                                  \
+#                      $primary{ } $bootable{ }                \
+#                      method{ format } format{ }              \
+#                      use_filesystem{ } filesystem{ ext3 }    \
+#                      mountpoint{ /boot }                     \
+#              .                                               \
+#              500 10000 1000000000 ext3                       \
+#                      method{ format } format{ }              \
+#                      use_filesystem{ } filesystem{ ext3 }    \
+#                      mountpoint{ / }                         \
+#              .                                               \
+#              64 512 300% linux-swap                          \
+#                      method{ swap } format{ }                \
+#              .
+
+# The full recipe format is documented in the file partman-auto-recipe.txt
+# included in the 'debian-installer' package or available from D-I source
+# repository. This also documents how to specify settings such as file
+# system labels, volume group names and which physical devices to include
+# in a volume group.
+
+# This makes partman automatically partition without confirmation, provided
+# that you told it what to do using one of the methods above.
+d-i partman-partitioning/confirm_write_new_label boolean true
+d-i partman/choose_partition select finish
+d-i partman/confirm boolean true
+d-i partman/confirm_nooverwrite boolean true
+
+# When disk encryption is enabled, skip wiping the partitions beforehand.
+#d-i partman-auto-crypto/erase_disks boolean false
+
+## Partitioning using RAID
+# The method should be set to "raid".
+#d-i partman-auto/method string raid
+# Specify the disks to be partitioned. They will all get the same layout,
+# so this will only work if the disks are the same size.
+#d-i partman-auto/disk string /dev/sda /dev/sdb
+
+# Next you need to specify the physical partitions that will be used. 
+#d-i partman-auto/expert_recipe string \
+#      multiraid ::                                         \
+#              1000 5000 4000 raid                          \
+#                      $primary{ } method{ raid }           \
+#              .                                            \
+#              64 512 300% raid                             \
+#                      method{ raid }                       \
+#              .                                            \
+#              500 10000 1000000000 raid                    \
+#                      method{ raid }                       \
+#              .
+
+# Last you need to specify how the previously defined partitions will be
+# used in the RAID setup. Remember to use the correct partition numbers
+# for logical partitions. RAID levels 0, 1, 5, 6 and 10 are supported;
+# devices are separated using "#".
+# Parameters are:
+# <raidtype> <devcount> <sparecount> <fstype> <mountpoint> \
+#          <devices> <sparedevices>
+
+#d-i partman-auto-raid/recipe string \
+#    1 2 0 ext3 /                    \
+#          /dev/sda1#/dev/sdb1       \
+#    .                               \
+#    1 2 0 swap -                    \
+#          /dev/sda5#/dev/sdb5       \
+#    .                               \
+#    0 2 0 ext3 /home                \
+#          /dev/sda6#/dev/sdb6       \
+#    .
+
+# For additional information see the file partman-auto-raid-recipe.txt
+# included in the 'debian-installer' package or available from D-I source
+# repository.
+
+# This makes partman automatically partition without confirmation.
+d-i partman-md/confirm boolean true
+d-i partman-partitioning/confirm_write_new_label boolean true
+d-i partman/choose_partition select finish
+d-i partman/confirm boolean true
+d-i partman/confirm_nooverwrite boolean true
+
+## Controlling how partitions are mounted
+# The default is to mount by UUID, but you can also choose "traditional" to
+# use traditional device names, or "label" to try filesystem labels before
+# falling back to UUIDs.
+#d-i partman/mount_style select uuid
+
+### Base system installation
+# Configure APT to not install recommended packages by default. Use of this
+# option can result in an incomplete system and should only be used by very
+# experienced users.
+#d-i base-installer/install-recommends boolean false
+
+# The kernel image (meta) package to be installed; "none" can be used if no
+# kernel is to be installed.
+#d-i base-installer/kernel/image string linux-image-686
+
+### Apt setup
+# You can choose to install non-free and contrib software.
+#d-i apt-setup/non-free boolean true
+#d-i apt-setup/contrib boolean true
+# Uncomment this if you don't want to use a network mirror.
+#d-i apt-setup/use_mirror boolean false
+# Select which update services to use; define the mirrors to be used.
+# Values shown below are the normal defaults.
+#d-i apt-setup/services-select multiselect security, updates
+#d-i apt-setup/security_host string security.debian.org
+
+# Additional repositories, local[0-9] available
+#d-i apt-setup/local0/repository string \
+#       http://local.server/debian stable main
+#d-i apt-setup/local0/comment string local server
+# Enable deb-src lines
+#d-i apt-setup/local0/source boolean true
+# URL to the public key of the local repository; you must provide a key or
+# apt will complain about the unauthenticated repository and so the
+# sources.list line will be left commented out
+#d-i apt-setup/local0/key string http://local.server/key
+# Scan another CD or DVD?
+d-i apt-setup/cdrom/set-first boolean false
+
+# By default the installer requires that repositories be authenticated
+# using a known gpg key. This setting can be used to disable that
+# authentication. Warning: Insecure, not recommended.
+#d-i debian-installer/allow_unauthenticated boolean true
+
+# Uncomment this to add multiarch configuration for i386
+#d-i apt-setup/multiarch string i386
+
+
+### Package selection
+#tasksel tasksel/first multiselect standard, web-server, kde-desktop
+tasksel tasksel/first multiselect none
+
+# Individual additional packages to install
+#d-i pkgsel/include string openssh-server build-essential
+d-i pkgsel/include string openssh-server sudo rsync haveged
+# Whether to upgrade packages after debootstrap.
+# Allowed values: none, safe-upgrade, full-upgrade
+d-i pkgsel/upgrade select none
+
+# Some versions of the installer can report back on what software you have
+# installed, and what software you use. The default is not to report back,
+# but sending reports helps the project determine what software is most
+# popular and include it on CDs.
+popularity-contest popularity-contest/participate boolean false
+
+### Boot loader installation
+# Grub is the default boot loader (for x86). If you want lilo installed
+# instead, uncomment this:
+#d-i grub-installer/skip boolean true
+# To also skip installing lilo, and install no bootloader, uncomment this
+# too:
+#d-i lilo-installer/skip boolean true
+
+
+# This is fairly safe to set, it makes grub install automatically to the MBR
+# if no other operating system is detected on the machine.
+d-i grub-installer/only_debian boolean true
+
+# This one makes grub-installer install to the MBR if it also finds some other
+# OS, which is less safe as it might not be able to boot that other OS.
+d-i grub-installer/with_other_os boolean true
+
+# Due notably to potential USB sticks, the location of the MBR can not be
+# determined safely in general, so this needs to be specified:
+#d-i grub-installer/bootdev  string /dev/sda
+# To install to the first device (assuming it is not a USB stick):
+#d-i grub-installer/bootdev  string default
+
+# Alternatively, if you want to install to a location other than the mbr,
+# uncomment and edit these lines:
+#d-i grub-installer/only_debian boolean false
+#d-i grub-installer/with_other_os boolean false
+#d-i grub-installer/bootdev  string (hd0,1)
+# To install grub to multiple disks:
+#d-i grub-installer/bootdev  string (hd0,1) (hd1,1) (hd2,1)
+
+# Optional password for grub, either in clear text
+#d-i grub-installer/password password r00tme
+#d-i grub-installer/password-again password r00tme
+# or encrypted using an MD5 hash, see grub-md5-crypt(8).
+#d-i grub-installer/password-crypted password [MD5 hash]
+
+# Use the following option to add additional boot parameters for the
+# installed system (if supported by the bootloader installer).
+# Note: options passed to the installer will be added automatically.
+#d-i debian-installer/add-kernel-opts string nousb
+
+# GRUB install devices:
+# Choices: /dev/sda (21474 MB; VMware_Virtual_S), /dev/sda1 (21472 MB; 
VMware_Virtual_S)
+grub-pc     grub-pc/install_devices multiselect /dev/vda
+# Choices: Enter device manually, /dev/sda
+grub-installer  grub-installer/choose_bootdev   select  /dev/vda
+
+### Finishing up the installation
+# During installations from serial console, the regular virtual consoles
+# (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next
+# line to prevent this.
+#d-i finish-install/keep-consoles boolean true
+
+# Avoid that last message about the install being complete.
+d-i finish-install/reboot_in_progress note
+
+# This will prevent the installer from ejecting the CD during the reboot,
+# which is useful in some situations.
+d-i cdrom-detect/eject boolean false
+
+# This is how to make the installer shutdown when finished, but not
+# reboot into the installed system.
+#d-i debian-installer/exit/halt boolean true
+# This will power off the machine instead of just halting it.
+d-i debian-installer/exit/poweroff boolean true
+
+### Preseeding other packages
+# Depending on what software you choose to install, or if things go wrong
+# during the installation process, it's possible that other questions may
+# be asked. You can preseed those too, of course. To get a list of every
+# possible question that could be asked during an install, do an
+# installation, and then run these commands:
+#   debconf-get-selections --installer > file
+#   debconf-get-selections >> file
+
+
+#### Advanced options
+### Running custom commands during the installation
+# d-i preseeding is inherently not secure. Nothing in the installer checks
+# for attempts at buffer overflows or other exploits of the values of a
+# preconfiguration file like this one. Only use preconfiguration files from
+# trusted locations! To drive that home, and because it's generally useful,
+# here's a way to run any shell command you'd like inside the installer,
+# automatically.
+
+# This first command is run as early as possible, just after
+# preseeding is read.
+#d-i preseed/early_command string anna-install some-udeb
+# This command is run immediately before the partitioner starts. It may be
+# useful to apply dynamic partitioner preseeding that depends on the state
+# of the disks (which may not be visible when preseed/early_command runs).
+#d-i partman/early_command \
+#       string debconf-set partman-auto/disk "$(list-devices disk | head -n1)"
+# This command is run just before the install finishes, but when there is
+# still a usable /target directory. You can chroot to /target and use it
+# directly, or use the apt-install and in-target commands to easily install
+# packages and run commands in the target system.
+#d-i preseed/late_command string apt-install zsh; in-target chsh -s /bin/zsh
+
diff --git a/grid5000/steps/data/qemu-sendkeys.rb 
b/grid5000/steps/data/qemu-sendkeys.rb
new file mode 100644
index 0000000..d1bcb0f
--- /dev/null
+++ b/grid5000/steps/data/qemu-sendkeys.rb
@@ -0,0 +1,121 @@
+#!/usr/bin/env ruby
+# Translate a string to "sendkey" commands for QEMU.
+# Martin Vidner, MIT License
+
+# https://en.wikibooks.org/wiki/QEMU/Monitor#sendkey_keys
+# sendkey keys
+#
+# You can emulate keyboard events through sendkey command. The syntax is: 
sendkey keys. To get a list of keys, type sendkey [tab]. Examples:
+#
+#     sendkey a
+#     sendkey shift-a
+#     sendkey ctrl-u
+#     sendkey ctrl-alt-f1
+#
+# As of QEMU 0.12.5 there are:
+# shift     shift_r     alt     alt_r   altgr   altgr_r
+# ctrl  ctrl_r  menu    esc     1   2
+# 3     4   5   6   7   8
+# 9     0   minus   equal   backspace   tab
+# q     w   e   r   t   y
+# u     i   o   p   ret     a
+# s     d   f   g   h   j
+# k     l   z   x   c   v
+# b     n   m   comma   dot     slash
+# asterisk  spc     caps_lock   f1  f2  f3
+# f4    f5  f6  f7  f8  f9
+# f10   num_lock    scroll_lock     kp_divide   kp_multiply     kp_subtract
+# kp_add    kp_enter    kp_decimal  sysrq   kp_0    kp_1
+# kp_2  kp_3    kp_4    kp_5    kp_6    kp_7
+# kp_8  kp_9    <   f11     f12     print
+# home  pgup    pgdn    end     left    up
+# down  right   insert  delete
+
+require "optparse"
+
+# incomplete! only what I need now.
+KEYS = {
+  "%" => "shift-5",
+  "/" => "slash",
+  ":" => "shift-semicolon",
+  "=" => "equal",
+  "." => "dot",
+  " " => "spc",
+  "-" => "minus",
+  "_" => "shift-minus",
+  "*" => "asterisk",
+  "," => "comma",
+  "+" => "shift-equal",
+  "|" => "shift-backslash",
+  "\\" => "backslash",
+}
+
+class Main
+  attr_accessor :command
+  attr_accessor :delay_s
+  attr_accessor :keystring
+
+  def initialize
+    self.command = nil
+    self.delay_s = 0.1
+
+    OptionParser.new do |opts|
+      opts.banner = "Usage: sendkeys [-c command_to_pipe_to] STRING\n" +
+        "Where STRING can be 'ls<enter>ls<gt>/dev/null<enter>'"
+
+      opts.on("-c", "--command COMMAND",
+              "Pipe sendkeys to this commands, individually") do |v|
+        self.command = v
+      end
+      opts.on("-d", "--delay SECONDS", Float,
+              "Delay SECONDS after each key (default: 0.1)") do |v|
+        self.delay_s = v
+      end
+    end.parse!
+    self.keystring = ARGV[0]
+  end
+
+  def sendkey(qemu_key_name)
+    if qemu_key_name == "wait"
+      sleep 1
+    else
+      if qemu_key_name =~ /[A-Za-z]/ && qemu_key_name == qemu_key_name.upcase
+        key = "shift-#{qemu_key_name.downcase}"
+      else
+        key = qemu_key_name
+      end
+      qemu_cmd = "sendkey #{key}"
+      if command
+        system "echo '#{qemu_cmd}' | #{command}"
+      else
+        puts qemu_cmd
+        $stdout.flush             # important when we are piped
+      end
+      sleep delay_s
+    end
+  end
+
+  PATTERN = /
+              \G  # where last match ended
+              < [^>]+ >
+            |
+              \G
+              .
+            /x
+  def run
+    keystring.scan(PATTERN) do |match|
+      if match[0] == "<"
+        key_name = match.slice(1..-2)
+        sendkey case key_name
+                when "lt" then "shift-comma"
+                when "gt" then "shift-dot"
+                else key_name
+                end
+      else
+        sendkey KEYS.fetch(match, match)
+      end
+    end
+  end
+end
+
+Main.new.run
diff --git a/grid5000/steps/data/qemu-sendkeys/netinst-iso-debian 
b/grid5000/steps/data/qemu-sendkeys/netinst-iso-debian
new file mode 100644
index 0000000..7705a44
--- /dev/null
+++ b/grid5000/steps/data/qemu-sendkeys/netinst-iso-debian
@@ -0,0 +1 @@
+<esc><wait>auto preseed/url=http://%LOCAL_IP%:%HTTP_PORT%/preseed.cfg<kp_enter>
diff --git a/grid5000/steps/data/setup/hiera/hiera.yaml 
b/grid5000/steps/data/setup/hiera/hiera.yaml
new file mode 100644
index 0000000..a0e47a5
--- /dev/null
+++ b/grid5000/steps/data/setup/hiera/hiera.yaml
@@ -0,0 +1,11 @@
+---
+:backends:
+  - yaml
+  - json
+:yaml:
+  :datadir: /tmp/hiera/hieradata
+:json:
+  :datadir: /tmp/hiera/hieradata
+:hierarchy:
+  - common
+  - defaults
diff --git a/grid5000/steps/data/setup/hiera/hieradata/defaults.yaml 
b/grid5000/steps/data/setup/hiera/hieradata/defaults.yaml
new file mode 100644
index 0000000..2fc8326
--- /dev/null
+++ b/grid5000/steps/data/setup/hiera/hieradata/defaults.yaml
@@ -0,0 +1,12 @@
+---
+env::std::misc::rootpwd: ""
+env::std::oar::ssh:
+  oarnodesetting_ssh_key: ""
+  oarnodesetting_ssh_key_pub: ""
+  id_rsa: ""
+  id_rsa_pub: ""
+  oar_authorized_keys: ""
+  oar_ssh_host_dsa_key: ""
+  oar_ssh_host_dsa_key_pub: ""
+  oar_ssh_host_rsa_key: ""
+  oar_ssh_host_rsa_key_pub: ""
diff --git a/grid5000/steps/data/setup/puppet/manifests/base.pp 
b/grid5000/steps/data/setup/puppet/manifests/base.pp
new file mode 100644
index 0000000..316c364
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/manifests/base.pp
@@ -0,0 +1,6 @@
+# Base environement creation recipe
+# All recipes are stored in 'env' module. Here called with 'base' variant 
parameter.
+
+class { 'env':
+  given_variant    => 'base';
+}
diff --git a/grid5000/steps/data/setup/puppet/manifests/big.pp 
b/grid5000/steps/data/setup/puppet/manifests/big.pp
new file mode 100644
index 0000000..fbae417
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/manifests/big.pp
@@ -0,0 +1,6 @@
+# Big environement creation recipe (base plus multiple packages)
+# All recipes are stored in 'big' module. Here called with 'min' variant 
parameter.
+
+class { 'env':
+  given_variant    => 'big';
+}
diff --git a/grid5000/steps/data/setup/puppet/manifests/min.pp 
b/grid5000/steps/data/setup/puppet/manifests/min.pp
new file mode 100644
index 0000000..185d240
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/manifests/min.pp
@@ -0,0 +1,6 @@
+# Minimal environement creation recipe
+# All recipes are stored in 'env' module. Here called with 'min' variant 
parameter.
+
+class { 'env':
+  given_variant    => 'min';
+}
diff --git a/grid5000/steps/data/setup/puppet/manifests/nfs.pp 
b/grid5000/steps/data/setup/puppet/manifests/nfs.pp
new file mode 100644
index 0000000..7f68dc2
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/manifests/nfs.pp
@@ -0,0 +1,6 @@
+# Base environement creation recipe with NFS enabled
+# All recipes are stored in 'env' module. Here called with 'nfs' variant 
parameter.
+
+class { 'env':
+  given_variant    => 'nfs';
+}
diff --git a/grid5000/steps/data/setup/puppet/manifests/std.pp 
b/grid5000/steps/data/setup/puppet/manifests/std.pp
new file mode 100644
index 0000000..8a25c5f
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/manifests/std.pp
@@ -0,0 +1,6 @@
+# Standard environement creation recipe
+# All recipes are stored in 'env' module. Here called with 'std' variant 
parameter.
+
+class { 'env':
+  given_variant    => 'std';
+}
diff --git a/grid5000/steps/data/setup/puppet/manifests/xen.pp 
b/grid5000/steps/data/setup/puppet/manifests/xen.pp
new file mode 100644
index 0000000..eb4cdf2
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/manifests/xen.pp
@@ -0,0 +1,6 @@
+# Xen environement creation recipe
+# All recipes are stored in 'env' module. Here called with 'min' variant 
parameter.
+
+class { 'env':
+  given_variant    => 'xen';
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/cpufreq/cpufrequtils 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/cpufreq/cpufrequtils
new file mode 100644
index 0000000..03070fe
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/cpufreq/cpufrequtils
@@ -0,0 +1 @@
+GOVERNOR="performance"
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/ganglia/gmond.conf 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/ganglia/gmond.conf
new file mode 100644
index 0000000..a2f8292
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/base/ganglia/gmond.conf
@@ -0,0 +1,336 @@
+/* This configuration is as close to 2.5.x default behavior as possible
+   The values closely match ./gmond/metric.h definitions in 2.5.x */
+globals {
+  daemonize = yes
+  setuid = yes
+  user = ganglia
+  debug_level = 0
+  max_udp_msg_len = 1472
+  mute = no
+  deaf = yes
+  host_dmax = 0 /*secs */
+  cleanup_threshold = 300 /*secs */
+  gexec = no
+  send_metadata_interval = 0
+}
+
+/* If a cluster attribute is specified, then all gmond hosts are wrapped inside
+ * of a <CLUSTER> tag.  If you do not specify a cluster tag, then all <HOSTS> 
will
+ * NOT be wrapped inside of a <CLUSTER> tag. */
+cluster {
+  name =  "my_clustername"
+  owner = "INRIA"
+  latlong = "unspecified"
+  url = "unspecified"
+}
+
+/* The host section describes attributes of the host, like the location */
+host {
+  location = "unspecified"
+}
+
+/* Feel free to specify as many udp_send_channels as you like.  Gmond
+   used to only support having a single channel */
+udp_send_channel { 
+  mcast_join = 239.2.11.71
+  port = 8649
+  ttl = 1
+}
+
+/* You can specify as many udp_recv_channels as you like as well. */
+udp_recv_channel {
+  mcast_join = 239.2.11.71
+  port = 8649
+  bind = 239.2.11.71
+}
+
+/* You can specify as many tcp_accept_channels as you like to share
+   an xml description of the state of the cluster */
+tcp_accept_channel {
+  port = 8649
+}
+
+/* Each metrics module that is referenced by gmond must be specified and
+   loaded. If the module has been statically linked with gmond, it does not
+   require a load path. However all dynamically loadable modules must include
+   a load path. */
+modules {
+  module {
+    name = "core_metrics"
+  }
+  module {
+    name = "cpu_module"
+    path = "/usr/lib/ganglia/modcpu.so"
+  }
+  module {
+    name = "disk_module"
+    path = "/usr/lib/ganglia/moddisk.so"
+  }
+  module {
+    name = "load_module"
+    path = "/usr/lib/ganglia/modload.so"
+  }
+  module {
+    name = "mem_module"
+    path = "/usr/lib/ganglia/modmem.so"
+  }
+  module {
+    name = "net_module"
+    path = "/usr/lib/ganglia/modnet.so"
+  }
+  module {
+    name = "proc_module"
+    path = "/usr/lib/ganglia/modproc.so"
+  }
+  module {
+    name = "sys_module"
+    path = "/usr/lib/ganglia/modsys.so"
+  }
+}
+
+include ('/etc/ganglia/conf.d/*.conf')
+
+
+/* The old internal 2.5.x metric array has been replaced by the following
+   collection_group directives.  What follows is the default behavior for
+   collecting and sending metrics that is as close to 2.5.x behavior as
+   possible. */
+
+/* This collection group will cause a heartbeat (or beacon) to be sent every
+   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
+   the age of the running gmond. */
+collection_group {
+  collect_once = yes
+  time_threshold = 20
+  metric {
+    name = "heartbeat"
+  }
+}
+
+/* This collection group will send general info about this host every 1200 
secs.
+   This information doesn't change between reboots and is only collected once. 
*/
+collection_group {
+  collect_once = yes
+  time_threshold = 1200
+  metric {
+    name = "cpu_num"
+    title = "CPU Count"
+  }
+  metric {
+    name = "cpu_speed"
+    title = "CPU Speed"
+  }
+  metric {
+    name = "mem_total"
+    title = "Memory Total"
+  }
+  /* Should this be here? Swap can be added/removed between reboots. */
+  metric {
+    name = "swap_total"
+    title = "Swap Space Total"
+  }
+  metric {
+    name = "boottime"
+    title = "Last Boot Time"
+  }
+  metric {
+    name = "machine_type"
+    title = "Machine Type"
+  }
+  metric {
+    name = "os_name"
+    title = "Operating System"
+  }
+  metric {
+    name = "os_release"
+    title = "Operating System Release"
+  }
+  metric {
+    name = "location"
+    title = "Location"
+  }
+}
+
+/* This collection group will send the status of gexecd for this host every 
300 secs */
+/* Unlike 2.5.x the default behavior is to report gexecd OFF.  */
+collection_group {
+  collect_once = yes
+  time_threshold = 300
+  metric {
+    name = "gexec"
+    title = "Gexec Status"
+  }
+}
+
+/* This collection group will collect the CPU status info every 20 secs.
+   The time threshold is set to 90 seconds.  In honesty, this time_threshold 
could be
+   set significantly higher to reduce unneccessary network chatter. */
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* CPU status */
+  metric {
+    name = "cpu_user"
+    value_threshold = "1.0"
+    title = "CPU User"
+  }
+  metric {
+    name = "cpu_system"
+    value_threshold = "1.0"
+    title = "CPU System"
+  }
+  metric {
+    name = "cpu_idle"
+    value_threshold = "5.0"
+    title = "CPU Idle"
+  }
+  metric {
+    name = "cpu_nice"
+    value_threshold = "1.0"
+    title = "CPU Nice"
+  }
+  metric {
+    name = "cpu_aidle"
+    value_threshold = "5.0"
+    title = "CPU aidle"
+  }
+  metric {
+    name = "cpu_wio"
+    value_threshold = "1.0"
+    title = "CPU wio"
+  }
+  /* The next two metrics are optional if you want more detail...
+     ... since they are accounted for in cpu_system.
+  metric {
+    name = "cpu_intr"
+    value_threshold = "1.0"
+    title = "CPU intr"
+  }
+  metric {
+    name = "cpu_sintr"
+    value_threshold = "1.0"
+    title = "CPU sintr"
+  }
+  */
+}
+
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* Load Averages */
+  metric {
+    name = "load_one"
+    value_threshold = "1.0"
+    title = "One Minute Load Average"
+  }
+  metric {
+    name = "load_five"
+    value_threshold = "1.0"
+    title = "Five Minute Load Average"
+  }
+  metric {
+    name = "load_fifteen"
+    value_threshold = "1.0"
+    title = "Fifteen Minute Load Average"
+  }
+}
+
+/* This group collects the number of running and total processes */
+collection_group {
+  collect_every = 80
+  time_threshold = 950
+  metric {
+    name = "proc_run"
+    value_threshold = "1.0"
+    title = "Total Running Processes"
+  }
+  metric {
+    name = "proc_total"
+    value_threshold = "1.0"
+    title = "Total Processes"
+  }
+}
+
+/* This collection group grabs the volatile memory metrics every 40 secs and
+   sends them at least every 180 secs.  This time_threshold can be increased
+   significantly to reduce unneeded network traffic. */
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "mem_free"
+    value_threshold = "1024.0"
+    title = "Free Memory"
+  }
+  metric {
+    name = "mem_shared"
+    value_threshold = "1024.0"
+    title = "Shared Memory"
+  }
+  metric {
+    name = "mem_buffers"
+    value_threshold = "1024.0"
+    title = "Memory Buffers"
+  }
+  metric {
+    name = "mem_cached"
+    value_threshold = "1024.0"
+    title = "Cached Memory"
+  }
+  metric {
+    name = "swap_free"
+    value_threshold = "1024.0"
+    title = "Free Swap Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 300
+  metric {
+    name = "bytes_out"
+    value_threshold = 4096
+    title = "Bytes Sent"
+  }
+  metric {
+    name = "bytes_in"
+    value_threshold = 4096
+    title = "Bytes Received"
+  }
+  metric {
+    name = "pkts_in"
+    value_threshold = 256
+    title = "Packets Received"
+  }
+  metric {
+    name = "pkts_out"
+    value_threshold = 256
+    title = "Packets Sent"
+  }
+}
+
+/* Different than 2.5.x default since the old config made no sense */
+collection_group {
+  collect_every = 1800
+  time_threshold = 3600
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "disk_free"
+    value_threshold = 1.0
+    title = "Disk Space Available"
+  }
+  metric {
+    name = "part_max_used"
+    value_threshold = 1.0
+    title = "Maximum Disk Space Used"
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/90-ib.rules
 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/90-ib.rules
new file mode 100644
index 0000000..994f4a0
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/90-ib.rules
@@ -0,0 +1,6 @@
+KERNEL=="umad*", NAME="infiniband/%k"
+KERNEL=="issm*", NAME="infiniband/%k"
+KERNEL=="ucm*", NAME="infiniband/%k", MODE="0666"
+KERNEL=="uverbs*", NAME="infiniband/%k", MODE="0666"
+KERNEL=="ucma", NAME="infiniband/%k", MODE="0666"
+KERNEL=="rdma_cm", NAME="infiniband/%k", MODE="0666"
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/openib.conf
 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/openib.conf
new file mode 100644
index 0000000..87981c7
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/openib.conf
@@ -0,0 +1,39 @@
+# Start HCA driver upon boot
+ONBOOT=yes
+
+# Load UCM module
+UCM_LOAD=no
+
+# Load RDMA_CM module
+RDMA_CM_LOAD=yes
+
+# Load RDMA_UCM module
+RDMA_UCM_LOAD=yes
+
+# Increase ib_mad thread priority
+RENICE_IB_MAD=no
+
+# Load MTHCA
+MTHCA_LOAD=yes
+
+# Load MLX4 modules
+MLX4_LOAD=yes
+
+# Load MLX5 modules
+MLX5_LOAD=yes
+
+# Load MLX4_EN module
+MLX4_EN_LOAD=yes
+
+# Load CXGB3 modules
+CXGB3_LOAD=no
+
+# Load NES modules
+NES_LOAD=no
+
+# Load IPoIB
+IPOIB_LOAD=yes
+
+# Enable IPoIB Connected Mode
+SET_IPOIB_CM=yes
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/openibd 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/openibd
new file mode 100644
index 0000000..b943e72
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/openibd
@@ -0,0 +1,1610 @@
+#!/bin/bash
+### BEGIN INIT INFO
+# Provides:       openibd
+# Required-Start: $local_fs
+# Required-Stop: opensmd
+# Default-Start:  2 3 5
+# Default-Stop: 0 1 2 6
+# Description:    Activates/Deactivates InfiniBand Driver to #                 
start at boot time.
+### END INIT INFO
+#
+# Copyright (c) 2013 Mellanox Technologies. All rights reserved.
+# Copyright (c) 2010 QLogic Corporation. All rights reserved.
+#
+# This Software is licensed under one of the following licenses:
+#
+# 1) under the terms of the "Common Public License 1.0" a copy of which is
+#    available from the Open Source Initiative, see
+#    http://www.opensource.org/licenses/cpl.php.
+#
+# 2) under the terms of the "The BSD License" a copy of which is
+#    available from the Open Source Initiative, see
+#    http://www.opensource.org/licenses/bsd-license.php.
+#
+# 3) under the terms of the "GNU General Public License (GPL) Version 2" a
+#    copy of which is available from the Open Source Initiative, see
+#    http://www.opensource.org/licenses/gpl-license.php.
+#
+# Licensee has the right to choose one of the above licenses.
+#
+# Redistributions of source code must retain the above copyright
+# notice and one of the license notices.
+#
+# Redistributions in binary form must reproduce both the above copyright
+# notice, one of the license notices in the documentation
+# and/or other materials provided with the distribution.
+#
+#
+#  $Id: openibd 9139 2006-08-29 14:03:38Z vlad $
+#
+
+# config: /etc/infiniband/openib.conf
+OPENIBD_CONFIG=${OPENIBD_CONFIG:-"/etc/infiniband/openib.conf"}
+CONFIG=$OPENIBD_CONFIG
+export LANG=en_US.UTF-8
+
+if [ ! -f $CONFIG ]; then
+    echo No InfiniBand configuration found
+    exit 0
+fi
+
+. $CONFIG
+
+CWD=`pwd`
+cd /etc/infiniband
+WD=`pwd`
+
+PATH=$PATH:/sbin:/usr/bin:/lib/udev
+if [ -e /etc/profile.d/ofed.sh ]; then
+        . /etc/profile.d/ofed.sh
+fi
+
+# Only use ONBOOT option if called by a runlevel directory.
+# Therefore determine the base, follow a runlevel link name ...
+base=${0##*/}
+link=${base#*[SK][0-9][0-9]}
+# ... and compare them
+if [[ $link == $base && "$0" != "/etc/rc.d/init.d/openibd" ]] ; then
+    RUNMODE=manual
+    ONBOOT=yes
+else
+    RUNMODE=auto
+fi
+
+# Allow unsupported modules, if disallowed by current configuration
+modprobe=/sbin/modprobe
+if ${modprobe} -c | grep -q '^allow_unsupported_modules  *0'; then
+    modprobe="${modprobe} --allow-unsupported-modules"
+fi
+
+ACTION=$1
+shift
+max_ports_num_in_hca=0
+
+# Check if OpenIB configured to start automatically
+if [ "X${ONBOOT}" != "Xyes" ]; then
+    exit 0
+fi
+
+if ( grep -i 'SuSE Linux' /etc/issue >/dev/null 2>&1 ); then
+    if [ -n "$INIT_VERSION" ] ; then
+        # MODE=onboot
+            if LANG=C egrep -L "^ONBOOT=['\"]?[Nn][Oo]['\"]?" ${CONFIG} > 
/dev/null ; then
+                    exit 0
+            fi
+    fi
+fi
+
+#########################################################################
+# Get a sane screen width
+[ -z "${COLUMNS:-}" ] && COLUMNS=80
+
+[ -z "${CONSOLETYPE:-}" ] && [ -x /sbin/consoletype ] && 
CONSOLETYPE="`/sbin/consoletype`"
+
+# Read in our configuration
+if [ -z "${BOOTUP:-}" ]; then
+  if [ -f /etc/sysconfig/init ]; then
+      . /etc/sysconfig/init
+  else
+    # This all seem confusing? Look in /etc/sysconfig/init,
+    # or in /usr/doc/initscripts-*/sysconfig.txt
+    BOOTUP=color
+    RES_COL=60
+    MOVE_TO_COL="echo -en \\033[${RES_COL}G"
+    SETCOLOR_SUCCESS="echo -en \\033[1;32m"
+    SETCOLOR_FAILURE="echo -en \\033[1;31m"
+    SETCOLOR_WARNING="echo -en \\033[1;33m"
+    SETCOLOR_NORMAL="echo -en \\033[0;39m"
+    LOGLEVEL=1
+  fi
+  if [ "$CONSOLETYPE" = "serial" ]; then
+      BOOTUP=serial
+      MOVE_TO_COL=
+      SETCOLOR_SUCCESS=
+      SETCOLOR_FAILURE=
+      SETCOLOR_WARNING=
+      SETCOLOR_NORMAL=
+  fi
+fi
+
+if [ "${BOOTUP:-}" != "verbose" ]; then
+   INITLOG_ARGS="-q"
+else
+   INITLOG_ARGS=
+fi
+
+echo_success() {
+  echo -n $@
+  [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+  echo -n "[  "
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_SUCCESS
+  echo -n $"OK"
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+  echo -n "  ]"
+  echo -e "\r"
+  return 0
+}
+
+echo_done() {
+  echo -n $@
+  [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+  echo -n "[  "
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+  echo -n $"done"
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+  echo -n "  ]"
+  echo -e "\r"
+  return 0
+}
+
+echo_failure() {
+  echo -n $@
+  [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+  echo -n "["
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_FAILURE
+  echo -n $"FAILED"
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+  echo -n "]"
+  echo -e "\r"
+  return 1
+}
+
+echo_warning() {
+  echo -n $@
+  [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+  echo -n "["
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_WARNING
+  echo -n $"WARNING"
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+  echo -n "]"
+  echo -e "\r"
+  return 1
+}
+
+count_ib_ports()
+{
+    local cnt=0
+    local ports_in_hca=0
+    sysdir=/sys/class/infiniband
+    hcas=$(/bin/ls -1 ${sysdir} 2> /dev/null)
+    for hca in $hcas
+    do
+        ports_in_hca=$(/bin/ls -1 ${sysdir}/${hca}/ports 2> /dev/null | wc -l)
+        if [ $ports_in_hca -gt $max_ports_num_in_hca ]; then
+                max_ports_num_in_hca=$ports_in_hca
+        fi
+        cnt=$[ $cnt + $ports_in_hca ]
+    done
+
+    return $cnt
+}
+
+# Setting Environment variables
+if [ -f /etc/redhat-release ]; then
+    DISTRIB="RedHat"
+    NETWORK_CONF_DIR="/etc/sysconfig/network-scripts"
+elif [ -f /etc/rocks-release ]; then
+    DISTRIB="Rocks"
+    NETWORK_CONF_DIR="/etc/sysconfig/network-scripts"
+elif [ -f /etc/SuSE-release ]; then
+    DISTRIB="SuSE"
+    NETWORK_CONF_DIR="/etc/sysconfig/network"
+elif [ -f /etc/debian_version ]; then
+    DISTRIB="Debian"
+    NETWORK_CONF_DIR="/etc/infiniband"
+else
+    DISTRIB=`ls /etc/*-release | head -n 1 | xargs -iXXX basename XXX -release 
2> /dev/null`
+    if [ -d /etc/sysconfig/network-scripts ]; then
+        NETWORK_CONF_DIR="/etc/sysconfig/network-scripts"
+    elif [ -d /etc/sysconfig/network ]; then
+        NETWORK_CONF_DIR="/etc/sysconfig/network"
+    else
+        echo_failure "You system is not supported for IPoIB configuration"
+        echo "Try to load driver manually using configuration files from $WD 
directory"
+        exit 1
+    fi
+fi
+
+# Define kernel version prefix
+KPREFIX=`uname -r | cut -c -3 | tr -d '.' | tr -d '[:space:]'`
+
+# Setting OpenIB start parameters
+POST_LOAD_MODULES=""
+
+RUN_SYSCTL=${RUN_SYSCTL:-"no"}
+
+IPOIB=0
+IPOIB_MTU=${IPOIB_MTU:-65520}
+if [ "X${IPOIB_LOAD}" == "Xyes" ]; then
+    IPOIB=1
+fi
+
+if [ "X${SRP_LOAD}" == "Xyes" ]; then
+    POST_LOAD_MODULES="$POST_LOAD_MODULES ib_srp"
+fi
+
+if [ "X${SRPT_LOAD}" == "Xyes" ]; then
+    POST_LOAD_MODULES="$POST_LOAD_MODULES ib_srpt"
+fi
+
+if [ "X${QLGC_VNIC_LOAD}" == "Xyes" ]; then
+    POST_LOAD_MODULES="$POST_LOAD_MODULES qlgc_vnic"
+fi
+
+if [ "X${SRP_TARGET_LOAD}" == "Xyes" ]; then
+    POST_LOAD_MODULES="$POST_LOAD_MODULES ib_srp_target"
+fi
+
+if [ "X${RDMA_CM_LOAD}" == "Xyes" ]; then
+    POST_LOAD_MODULES="$POST_LOAD_MODULES rdma_cm"
+fi
+
+if [ "X${UCM_LOAD}" == "Xyes" ]; then
+    POST_LOAD_MODULES="$POST_LOAD_MODULES ib_ucm"
+fi
+
+if [ "X${RDS_LOAD}" == "Xyes" ]; then
+    POST_LOAD_MODULES="$POST_LOAD_MODULES rds rds_rdma rds_tcp"
+fi
+
+if [ "X${RDMA_UCM_LOAD}" == "Xyes" ]; then
+    POST_LOAD_MODULES="$POST_LOAD_MODULES rdma_ucm"
+fi
+
+GEN1_UNLOAD_MODULES="ib_srp_target scsi_target ib_srp kdapltest_module 
ib_kdapl ib_useraccess ib_useraccess_cm ib_cm ib_dapl_srv ib_ip2pr ib_ipoib 
ib_tavor mod_thh mod_rhh ib_dm_client ib_sa_client ib_client_query ib_poll 
ib_mad ib_core ib_services"
+
+UNLOAD_MODULES="ib_mthca mlx5_ib mlx5_core mlx4_ib ib_ipath ipath_core ib_ehca 
iw_nes i40iw iw_cxgb3 cxgb3 iw_cxgb4 cxgb4 ocrdma bnxt_re bnxt_en"
+UNLOAD_MODULES="$UNLOAD_MODULES ib_qib"
+UNLOAD_MODULES="$UNLOAD_MODULES ib_ipoib ib_madeye ib_rds"
+UNLOAD_MODULES="$UNLOAD_MODULES rpcrdma rds_rdma rds_tcp rds ib_ucm kdapl 
ib_srp_target scsi_target ib_srpt ib_srp ib_iser"
+UNLOAD_MODULES="$UNLOAD_MODULES rdma_ucm rdma_cm iw_cm ib_cm ib_local_sa 
findex"
+UNLOAD_MODULES="$UNLOAD_MODULES ib_sa ib_uverbs ib_umad ib_mad ib_core ib_addr"
+
+STATUS_MODULES="rdma_ucm ib_rds rds rds_rdma rds_tcp ib_srpt ib_srp qlgc_vnic 
rdma_cm ib_addr ib_local_sa findex ib_ipoib ib_ehca ib_ipath ipath_core 
mlx4_core mlx4_ib mlx4_en mlx5_core mlx5_ib ib_mthca ib_uverbs ib_umad ib_ucm 
ib_sa ib_cm ib_mad ib_core iw_cxgb3 iw_cxgb4 iw_nes i40iw vmw_pvrdma"
+STATUS_MODULES="$STATUS_MODULES ib_qib ocrdma bnxt_re bnxt_en"
+
+if (modinfo scsi_transport_srp 2>/dev/null | grep depends: | grep -q compat 
2>/dev/null) ||
+    (lsmod 2>/dev/null | grep scsi_transport_srp | grep -q compat); then
+    UNLOAD_MODULES="$UNLOAD_MODULES scsi_transport_srp"
+    STATUS_MODULES="$STATUS_MODULES scsi_transport_srp"
+fi
+
+ipoib_ha_pidfile=/var/run/ipoib_ha.pid
+srp_daemon_pidfile=/var/run/srp_daemon.pid
+_truescale=/etc/infiniband/truescale.cmds
+
+get_interfaces()
+{
+    interfaces=$(cd /sys/class/net;/bin/ls -d ib* 2> /dev/null)
+}
+
+get_mlx4_en_interfaces()
+{
+    mlx4_en_interfaces=""
+    for ethpath in /sys/class/net/*
+    do
+        if (grep 0x15b3 ${ethpath}/device/vendor > /dev/null 2>&1); then
+            mlx4_en_interfaces="$mlx4_en_interfaces ${ethpath##*/}"
+        fi
+    done
+}
+
+# If module $1 is loaded return - 0 else - 1
+is_module()
+{
+local RC
+
+    /sbin/lsmod | grep -w "$1" > /dev/null 2>&1
+    RC=$?
+
+return $RC
+}
+
+log_msg()
+{
+    logger -i "openibd: $@"
+}
+
+load_module()
+{
+    local module=$1
+    filename=`modinfo $module | grep filename | awk '{print $NF}'`
+
+    if [ ! -n "$filename" ]; then
+        echo_failure "Module $module does not exist"
+        log_msg "Error: Module $module does not exist"
+        return 1
+    fi
+
+    if [ -L $filename ]; then
+        filename=`readlink -f $filename`
+    fi
+
+    rpm_name=`/bin/rpm -qf $filename --queryformat "[%{NAME}]" 2> /dev/null`
+    if [ $? -ne 0 ]; then
+            echo_warning "Module $module does not belong to OFED"
+            log_msg "Module $module does not belong to OFED"
+    else
+        case "$rpm_name" in
+            *compat-rdma* | kernel-ib)
+            ;;
+            *)
+            echo_warning "Module $module belong to $rpm_name which is not a 
part of OFED"
+            log_msg "Module $module belong to $rpm_name which is not a part of 
OFED"
+            ;;
+        esac
+    fi
+    ${modprobe} $module > /dev/null 2>&1
+}
+
+# Return module's refcnt
+is_ref()
+{
+    local refcnt
+    refcnt=`cat /sys/module/"$1"/refcnt 2> /dev/nill`
+    return $refcnt
+}
+
+get_sw_fw_info()
+{
+    INFO=/etc/infiniband/info
+    OFEDHOME="/usr/local"
+    if [ -x ${INFO} ]; then
+        OFEDHOME=$(${INFO} | grep -w prefix | cut -d '=' -f 2)
+    fi
+    MREAD=$(which mstmread 2> /dev/null)
+
+    # Get OFED Build id
+    if [ -r ${OFEDHOME}/BUILD_ID ]; then
+        echo  "Software"
+        echo  "-------------------------------------"
+        printf "Build ID:\n"
+        cat ${OFEDHOME}/BUILD_ID
+        echo  "-------------------------------------"
+    fi
+
+    # Get FW version
+    if [ ! -x ${MREAD} ]; then
+        return 1
+    fi
+
+    vendor="15b3"
+    slots=$(lspci -n -d "${vendor}:" 2> /dev/null | grep -v "5a46" | cut -d ' 
' -f 1)
+    for mst_device in $slots
+    do
+        major=$($MREAD ${mst_device} 0x82478 2> /dev/null | cut -d ':' -f 2)
+        subminor__minor=$($MREAD ${mst_device} 0x8247c 2> /dev/null | cut -d 
':' -f 2)
+        ftime=$($MREAD ${mst_device} 0x82480 2> /dev/null | cut -d ':' -f 2)
+        fdate=$($MREAD ${mst_device} 0x82484 2> /dev/null | cut -d ':' -f 2)
+
+        major=$(echo -n $major | cut -d x -f 2 | cut -b 4)
+        subminor__minor1=$(echo -n $subminor__minor | cut -d x -f 2 | cut -b 
3,4)
+        subminor__minor2=$(echo -n $subminor__minor | cut -d x -f 2 | cut -b 
5,6,7,8)
+        echo
+        echo "Device ${mst_device} Info:"
+        echo "Firmware:"
+
+        printf "\tVersion:"
+        printf "\t$major.$subminor__minor1.$subminor__minor2\n"
+
+        day=$(echo -n $fdate | cut -d x -f 2 | cut -b 7,8)
+        month=$(echo -n $fdate | cut -d x -f 2 | cut -b 5,6)
+        year=$(echo -n $fdate | cut -d x -f 2 | cut -b 1,2,3,4)
+        hour=$(echo -n $ftime | cut -d x -f 2 | cut -b 5,6)
+        min=$(echo -n $ftime | cut -d x -f 2 | cut -b 3,4)
+        sec=$(echo -n $ftime | cut -d x -f 2 | cut -b 1,2)
+
+        printf "\tDate:"
+        printf "\t$day/$month/$year $hour:$min:$sec\n"
+    done
+}
+
+# Create debug info
+get_debug_info()
+{
+    trap '' 2 9 15
+    DEBUG_INFO=/tmp/ib_debug_info.log
+    /bin/rm -f $DEBUG_INFO
+    touch $DEBUG_INFO
+    echo "Hostname: `hostname -s`" >> $DEBUG_INFO
+    test -e /etc/issue && echo "OS: `cat /etc/issue`" >> $DEBUG_INFO
+    echo "Current kernel: `uname -r`" >> $DEBUG_INFO
+    echo "Architecture: `uname -m`" >> $DEBUG_INFO
+    which gcc &>/dev/null && echo "GCC version: `gcc --version`"  >> 
$DEBUG_INFO
+    echo "CPU: `cat /proc/cpuinfo | /bin/grep -E \"model name|arch\" | head 
-1`" >> $DEBUG_INFO
+    echo "`cat /proc/meminfo | /bin/grep \"MemTotal\"`" >> $DEBUG_INFO
+    echo "Chipset: `/sbin/lspci 2> /dev/null | head -1 | cut -d ':' -f 2-`" >> 
$DEBUG_INFO
+
+    echo >> $DEBUG_INFO
+    get_sw_fw_info >> $DEBUG_INFO
+    echo >> $DEBUG_INFO
+
+    echo >> $DEBUG_INFO
+    echo "############# LSPCI ##############" >> $DEBUG_INFO
+    /sbin/lspci 2> /dev/null >> $DEBUG_INFO
+
+    echo >> $DEBUG_INFO
+    echo "############# LSPCI -N ##############" >> $DEBUG_INFO
+    /sbin/lspci -n 2> /dev/null >> $DEBUG_INFO
+
+    echo >> $DEBUG_INFO
+    echo "############# LSMOD ##############" >> $DEBUG_INFO
+    /sbin/lsmod >> $DEBUG_INFO
+
+    echo >> $DEBUG_INFO
+    echo "############# DMESG ##############" >> $DEBUG_INFO
+    /bin/dmesg >> $DEBUG_INFO
+
+    if [ -r /var/log/messages ]; then
+        echo >> $DEBUG_INFO
+        echo "############# Messages ##############" >> $DEBUG_INFO
+        tail -50 /var/log/messages >> $DEBUG_INFO
+    fi
+
+    echo >> $DEBUG_INFO
+    echo "############# Running Processes ##############" >> $DEBUG_INFO
+    /bin/ps -ef >> $DEBUG_INFO
+    echo "##############################################" >> $DEBUG_INFO
+
+    echo
+    echo "Please open an issue in the http://bugs.openfabrics.org and attach 
$DEBUG_INFO"
+    echo
+}
+
+ib_set_node_desc()
+{
+      # Wait while node's hostname is set
+      NODE_DESC_TIME_BEFORE_UPDATE=${NODE_DESC_TIME_BEFORE_UPDATE:-10}
+      local declare -i UPDATE_TIMEOUT=${NODE_DESC_UPDATE_TIMEOUT:-120}
+      sleep $NODE_DESC_TIME_BEFORE_UPDATE
+      # Reread NODE_DESC value
+      . $CONFIG
+      NODE_DESC=${NODE_DESC:-$(hostname -s)}
+      while [ "${NODE_DESC}" == "localhost" ] && [ $UPDATE_TIMEOUT -gt 0 ]; do
+          sleep 1
+          . $CONFIG
+          NODE_DESC=${NODE_DESC:-$(hostname -s)}
+          let UPDATE_TIMEOUT--
+      done
+      # Add node description to sysfs
+      ibsysdir="/sys/class/infiniband"
+      if [ -d ${ibsysdir} ]; then
+          declare -i hca_id=1
+          for hca in ${ibsysdir}/*
+          do
+              if [ -e ${hca}/node_desc ]; then
+                  log_msg "Set node_desc for $(basename $hca): ${NODE_DESC} 
HCA-${hca_id}"
+                  echo -n "${NODE_DESC} HCA-${hca_id}" >> ${hca}/node_desc
+              fi
+              let hca_id++
+          done
+      fi
+}
+
+
+need_location_code_fix()
+{
+       local sub ARCH KVERSION
+       ARCH=$(uname -m)
+       KVERSION=$(uname -r)
+
+       if [ "$ARCH" != "ppc64" ]; then
+               return 1;
+       fi
+
+       case $KVERSION in
+       2.6.9-*.EL*)
+               sub=$(echo $KVERSION | cut -d"-" -f2 | cut -d"." -f1)
+               if [ $sub -lt 62 ]; then
+                       return 2;
+               fi
+       ;;
+       2.6.16.*-*-*)
+               sub=$(echo $KVERSION | cut -d"." -f4 | cut -d"-" -f1)
+               if [ $sub -lt 53 ]; then
+                       return 0;
+               fi
+       ;;
+       2.6.18-*.el5*)
+               sub=$(echo $KVERSION | cut -d"-" -f2 | cut -d"." -f1)
+               if [ $sub -lt 54 ]; then
+                       return 0;
+               fi
+       ;;
+       2.6.*)
+               sub=$(echo $KVERSION | cut -d"." -f3 | cut -d"-" -f1 | tr -d 
[:alpha:][:punct:])
+               if [ $sub -lt 24 ]; then
+                       return 0;
+               fi
+       ;;
+       esac
+
+       return 1;
+}
+
+fix_location_codes()
+{
+       # ppc64 only:
+       # Fix duplicate location codes on kernels where ibmebus can't handle 
them
+
+       need_location_code_fix
+       ret=$?
+       if  [ $ret = 1 ]; then return 0; fi
+       if ! [ -d /proc/device-tree -a -f /proc/ppc64/ofdt ]; then return 0; fi
+
+       local i=1 phandle lcode len
+       # output all duplicate location codes and their devices
+       for attr in $(find /proc/device-tree -name "ibm,loc-code" | grep 
"lh.a"); do
+               echo -e $(dirname $attr)"\t"$(cat $attr)
+       done | sort -k2 | uniq -f1 --all-repeated=separate | cut -f1 | while 
read dev; do
+               if [ -n "$dev" ]; then
+                       # append an instance counter to the location code
+                       phandle=$(hexdump -e '8 "%u"' $dev/ibm,phandle)
+                       lcode=$(cat $dev/ibm,loc-code)-I$i
+                       len=$(echo -n "$lcode" | wc -c)
+                       node=${dev#/proc/device-tree}
+
+                       # kernel-2.6.9 don't provide "update_property"
+                       if [ ! -z "$(echo -n "$node" | grep "lhca")" ]; then
+                               if [ $ret = 2 ]; then
+                                       echo -n "add_node $node" > /tmp/addnode
+                                       cd $dev
+                                       for a in *; do
+                                               SIZE=$(stat -c%s $a)
+                                               if [ "$a" = "ibm,loc-code" ] ; 
then
+                                                       echo -n " $a $len 
$lcode" >> /tmp/addnode
+                                               elif [ "$a" = "interrupts" ] ; 
then
+                                                       echo -n " $a 0 " >> 
/tmp/addnode
+                                               else
+                                                       echo -n " $a $SIZE " >> 
/tmp/addnode
+                                                       cat $a >> /tmp/addnode
+                                               fi
+                                       done
+                                       echo -n "remove_node $node" > 
/proc/ppc64/ofdt
+                                       cat /tmp/addnode > /proc/ppc64/ofdt
+                                       rm -rf /tmp/addnode
+                               else
+                                       echo -n "update_property $phandle 
ibm,loc-code $len $lcode" > /proc/ppc64/ofdt
+                               fi
+                       i=$(($i + 1))
+                       fi
+               else
+                       # empty line means new group -- reset i
+                       i=1
+               fi
+       done
+}
+
+rotate_log()
+{
+        local log=$1
+        if [ -s ${log} ]; then
+                cat ${log} >> ${log}.$(date +%Y-%m-%d)
+                /bin/rm -f ${log}
+        fi
+        touch ${log}
+}
+
+is_ivyb()
+{
+    cpu_family=`/usr/bin/lscpu 2>&1 | grep "CPU family" | cut -d':' -f 2 | sed 
-e 's/ //g'`
+    cpu_model=`/usr/bin/lscpu 2>&1 | grep "Model:" | cut -d':' -f 2 | sed -e 
's/ //g'`
+
+    case "${cpu_family}_${cpu_model}" in
+        6_62)
+        return 0
+        ;;
+        *)
+        return 1
+        ;;
+    esac
+}
+
+# Check whether IPoIB interface configured to be started upon boot.
+is_onboot()
+{
+        local i=$1
+        shift
+
+        case $DISTRIB in
+            RedHat|Rocks)
+                if LANG=C egrep -L "^ONBOOT=['\"]?[Nn][Oo]['\"]?" 
${NETWORK_CONF_DIR}/ifcfg-$i > /dev/null ; then
+                    return 1
+                fi
+            ;;
+            SuSE)
+                if ! LANG=C egrep -L "^STARTMODE=['\"]?onboot['\"]?" 
${NETWORK_CONF_DIR}/ifcfg-$i > /dev/null ; then
+                    return 1
+                fi
+            ;;
+            Debian)
+                if ! ( LANG=C grep auto /etc/network/interfaces | grep -w $i > 
/dev/null 2>&1) ; then
+                    return 1
+                fi
+            ;;
+            *)
+                if LANG=C egrep -L "^ONBOOT=['\"]?[Nn][Oo]['\"]?" 
${NETWORK_CONF_DIR}/ifcfg-$i > /dev/null ; then
+                    return 1
+                fi
+            ;;
+        esac
+
+        return 0
+}
+
+set_ipoib_cm()
+{
+        local i=$1
+        shift
+
+        if [ ! -e /sys/class/net/${i}/mode ]; then
+                echo "Failed to configure IPoIB connected mode for ${i}"
+                return 1
+        fi
+
+        sleep 1
+        echo connected > /sys/class/net/${i}/mode
+       /sbin/ip link set ${i} mtu ${IPOIB_MTU}
+}
+
+bring_up()
+{
+        local i=$1
+        shift
+
+        case $DISTRIB in
+                RedHat|Rocks)
+                    . ${NETWORK_CONF_DIR}/ifcfg-${i}
+                    if [ ! -z ${IPADDR} ] && [ ! -z ${NETMASK} ] && [ ! -z 
${BROADCAST} ]; then
+                        /sbin/ifconfig ${i} ${IPADDR} netmask ${NETMASK} 
broadcast ${BROADCAST} > /dev/null 2>&1
+                    else
+                        /sbin/ifup ${i} 2> /dev/null
+                    fi
+                ;;
+                SuSE)
+                    if [ "$KPREFIX" == "26" ]; then
+                        ifconfig ${i} up > /dev/null 2>&1
+                    fi
+                            # Workaround for ifup issue: two devices with the 
same IP address
+                        . ${NETWORK_CONF_DIR}/ifcfg-${i}
+                if [ ! -z ${IPADDR} ] && [ ! -z ${NETMASK} ] && [ ! -z 
${BROADCAST} ]; then
+                    /sbin/ifconfig ${i} ${IPADDR} netmask ${NETMASK} broadcast 
${BROADCAST} > /dev/null 2>&1
+                else
+                            /sbin/ifup ${i}
+                fi
+                    # /sbin/ifup ${i} > /dev/null 2>&1
+                ;;
+                Debian)
+                    . ${NETWORK_CONF_DIR}/ifcfg-${i}
+                    /sbin/ip address add ${IPADDR}/${NETMASK} dev ${i} > 
/dev/null 2>&1
+                    /sbin/ip link set ${i} up > /dev/null 2>&1
+                ;;
+            *)
+                    /sbin/ifup ${i} 2> /dev/null
+                ;;
+        esac
+
+        if [ "X${SET_IPOIB_CM}" == "Xyes" ]; then
+                set_ipoib_cm ${i}
+        fi
+
+        return $?
+}
+
+is_active_vf()
+{
+    # test if have ConnectX with VFs
+    # if not, no need to proceed further. Return 0 (no VFs active)
+    lspci | grep Mellanox | grep ConnectX | grep Virtual > /dev/null
+    if [ $? -ne 0 ] ; then
+        # No VFs activated
+        return 1
+    fi
+
+    # test for virsh
+    virsh -v > /dev/null 2> /dev/null
+    if [ $? -ne 0 ] ; then
+        # No virsh
+        return 1
+    fi
+
+    # test if running virsh by mistake on a guest
+    virsh sysinfo > /dev/null 2> /dev/null
+    if [ $? -ne 0 ] ; then
+        # virsh running on a guest
+        return 1
+    fi
+
+    # find all pci devices using the mlx4_core driver
+    MLX4_CORE_DEVICES=`for j in \`virsh nodedev-list | grep pci \` ; do
+        virsh nodedev-dumpxml $j 2> /dev/null| grep mlx4_core > /dev/null
+        if [ $? -eq 0 ] ; then echo $j; fi
+    done`
+
+    # for all devices using mlx4_core, see if any have active VFs
+    ACTIVE_MLX4_VFS=`for k in \`echo $MLX4_CORE_DEVICES\` ; do
+        IFS=$'\n'
+        for f in \`virsh -d 4 nodedev-dumpxml $k | grep "address domain"\` ; do
+            for g in \`virsh list | grep -E  "running|paused" | awk '{ print 
$2 }' \`; do
+                virsh dumpxml $g 2> /dev/null | grep $f | grep "address domain"
+            done
+        done
+    done`
+
+    if [ "x$ACTIVE_MLX4_VFS" = "x" ] ; then
+        # NO GUESTS
+        return 1
+    else
+        # There are active virtual functions
+        return 0
+    fi
+}
+
+start()
+{
+    local RC=0
+
+    if is_active_vf; then
+        echo "There are active virtual functions. Cannot continue..."
+        exit 1
+    fi
+
+    # W/A: inbox drivers are loaded at boot instead of new ones
+    local loaded_modules=$(/sbin/lsmod 2>/dev/null | grep -E 
'^be2net|^cxgb|^mlx|^iw_nes|^i40iw|^iw_cxgb|^ib_qib|^ib_mthca|^ocrdma|^bnxt_re|^ib_ipoib|^ib_srp|^ib_iser|^ib_uverbs|^ib_addr|^ib_mad|^ib_sa|^iw_cm|^ib_core|^ib_ucm|^ib_cm|^rdma_ucm|^ib_umad|^rdma_cm|^compat'
 | awk '{print $1}')
+    for loaded_module in $loaded_modules
+    do
+        local loaded_srcver=$(/bin/cat /sys/module/$loaded_module/srcversion 
2>/dev/null)
+        local curr_srcver=$(/sbin/modinfo $loaded_module 2>/dev/null | grep 
srcversion | awk '{print $NF}')
+        if [ "X$loaded_srcver" != "X$curr_srcver" ]; then
+            log_msg "start(): Detected loaded old version of module 
'$loaded_module', calling stop..."
+            stop
+            break
+        fi
+    done
+
+    # W/A: modules loaded from initrd without taking new params from 
/etc/modprobe.d/
+    local conf_files=$(grep -rE "options.*mlx" /etc/modprobe.d/*.conf 
2>/dev/null | grep -v ":#" | cut -d":" -f"1" | uniq)
+    local goFlag=1
+    if [ "X$conf_files" != "X" ]; then
+        for file in $conf_files
+        do
+            while read line && [ $goFlag -eq 1 ]
+            do
+                local curr_mod=$(echo $line | sed -r -e 's/.*options //g' | 
awk '{print $NR}')
+                if ! is_module $curr_mod; then
+                    continue
+                fi
+                for item in $(echo $line | sed -r -e 
"s/.*options\s*${curr_mod}//g")
+                do
+                    local param=${item%=*}
+                    local conf_value=${item##*=}
+                    local real_value=$(cat 
/sys/module/${curr_mod}/parameters/${param} 2>/dev/null)
+                    if [ "X$conf_value" != "X$real_value" ]; then
+                        log_msg "start(): Detected '$curr_mod' loaded with 
'$param=$real_value' instead of '$param=$conf_value' as configured in '$file', 
calling stop..."
+                        goFlag=0
+                        stop
+                        break
+                    fi
+                done
+            done < $file
+            if [ $goFlag -ne 1 ]; then
+                break
+            fi
+        done
+    fi
+
+    if is_ivyb; then
+        # Clear SB registers on IvyB machines
+        ivyb_slots=`/sbin/lspci -n | grep -w '8086:0e28' | cut -d ' ' -f 1`
+        for ivyb_slot in $ivyb_slots
+        do
+            if [ "0x`/sbin/setpci -s $ivyb_slot 0x858.W`" == "0x0000" ]; then
+                setpci -s $ivyb_slot 0x858.W=0xffff
+            fi
+            if [ "0x`/sbin/setpci -s $ivyb_slot 0x85C.W`" == "0x0000" ]; then
+                setpci -s $ivyb_slot 0x85C.W=0xffff
+            fi
+        done
+    fi
+
+    if [ $DISTRIB = "SuSE" ]; then
+        if [ -x /sbin/rpc.statd ]; then
+            /sbin/rpc.statd
+        fi
+    fi
+
+    # Load Mellanox HCA driver
+    if [ "X${MTHCA_LOAD}" == "Xyes" ]; then
+        load_module ib_mthca
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading Mellanox HCA driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    if [ "X${MLX4_LOAD}" == "Xyes" ]; then
+        load_module mlx4_core
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading Mellanox MLX4 HCA driver: "
+        else
+                # Set port configuration
+                if [ -f /etc/infiniband/connectx.conf ]; then
+                         . /etc/infiniband/connectx.conf > /dev/null 2>&1
+                fi
+        fi
+        load_module mlx4_ib
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading Mellanox MLX4_IB HCA driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    if [ "X${MLX4_EN_LOAD}" == "Xyes" ]; then
+        if ! is_module mlx4_core; then
+                load_module mlx4_core
+        fi
+
+        load_module mlx4_en
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading Mellanox MLX4_EN HCA driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    if [ "X${MLX5_LOAD}" == "Xyes" ]; then
+        load_module mlx5_core
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading Mellanox MLX5 HCA driver: "
+        fi
+        load_module mlx5_ib
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading Mellanox MLX5_IB HCA driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    # Load QLogic QIB driver
+    if [ "X${QIB_LOAD}" == "Xyes" ]; then
+        load_module ib_qib
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+            echo_failure $"Loading QLogic QIB driver: "
+        elif [ -x ${_truescale} ]; then
+            ${_truescale} start
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    # Load QLogic InfiniPath driver
+    if [ "X${IPATH_LOAD}" == "Xyes" ]; then
+        load_module ib_ipath
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading QLogic InfiniPath driver: "
+        fi
+        # Don't exit on error
+        # Workarround for Bug 252.
+        # RC=$[ $RC + $my_rc ]
+    fi
+
+    # Load eHCA driver
+    if [ "X${EHCA_LOAD}" == "Xyes" ]; then
+        fix_location_codes
+        load_module ib_ehca
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading eHCA driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    # Load iw_cxgb3 driver
+    if [ "X${CXGB3_LOAD}" == "Xyes" ]; then
+        fix_location_codes
+        load_module iw_cxgb3
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading cxgb3 driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    # Load iw_cxgb4 driver
+    if [ "X${CXGB4_LOAD}" == "Xyes" ]; then
+        fix_location_codes
+        load_module iw_cxgb4
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading cxgb4 driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    # Load iw_nes driver
+    if [ "X${NES_LOAD}" == "Xyes" ]; then
+        fix_location_codes
+        load_module iw_nes
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading nes driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    # Load i40iw driver
+    if [ "X${I40IW_LOAD}" == "Xyes" ]; then
+        fix_location_codes
+        load_module i40iw
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading i40iw driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    # Load Broadcom bnxt_re driver
+    if [ "X${BNXT_RE_LOAD}" == "Xyes" ]; then
+        load_module bnxt_re
+        load_module bnxt_en
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+                echo_failure $"Loading Broadcom Netxtreme driver: "
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    # Load Emulex One Connect driver
+    if [ "X${OCRDMA_LOAD}" == "Xyes" ]; then
+        load_module ocrdma
+        my_rc=$?
+        if [ $my_rc -ne 0 ]; then
+            echo_failure $"Loading Emulex One Connect driver: "
+        elif [ -x ${_truescale} ]; then
+            ${_truescale} start
+        fi
+        RC=$[ $RC + $my_rc ]
+    fi
+
+    # Load VMware Paravirtual RDMA driver
+    if [ "X${VMW_PVRDMA_LOAD}" == "Xyes" ]; then
+       load_module vmw_pvrdma
+       my_rc=$?
+       if [ $my_rc -ne 0 ]; then
+           echo_failure $"Loading VMware Paravirtual RDMA driver: "
+       fi
+       RC=$[ $RC + $my_rc ]
+    fi
+
+    ib_set_node_desc > /dev/null 2>&1 &
+
+    load_module ib_umad
+    RC=$[ $RC + $? ]
+    load_module ib_uverbs
+    RC=$[ $RC + $? ]
+
+    if [ $IPOIB -eq 1 ]; then
+        load_module ib_ipoib
+        RC=$[ $RC + $? ]
+    fi
+
+    if [ $RC -eq 0 ]; then
+        echo_success $"Loading HCA driver and Access Layer: "
+    else
+        echo_failure $"Loading HCA driver and Access Layer: "
+        get_debug_info
+        exit 1
+    fi
+
+    # Enable IPoIB Interface if configured
+    if [ $IPOIB -eq 1 ]; then
+        get_interfaces
+        echo Setting up InfiniBand network interfaces:
+        for i in $interfaces
+        do
+                if [[ ! -e ${WD}/ifcfg-${i} && ! -e 
${NETWORK_CONF_DIR}/ifcfg-${i} ]]; then
+                        echo "No configuration found for ${i}"
+                        if [ "X${SET_IPOIB_CM}" == "Xyes" ]; then
+                                set_ipoib_cm ${i}
+                        fi
+                else
+                        REMOVE_NETWORK_CONF=0
+                        if [ ! -e ${NETWORK_CONF_DIR}/ifcfg-${i} ]; then
+                                ln -snf ${WD}/ifcfg-${i} 
${NETWORK_CONF_DIR}/ifcfg-${i}
+                                REMOVE_NETWORK_CONF=1
+                        fi
+
+                        if [ "$RUNMODE" != "manual" ]; then
+                                if ! is_onboot $i; then
+                                        continue
+                                fi
+                        fi
+
+                        bring_up $i
+                        RC=$?
+
+                        unset IPADDR NETMASK BROADCAST
+
+                        if [ $REMOVE_NETWORK_CONF -eq 1 ]; then
+                            rm -f ${NETWORK_CONF_DIR}/ifcfg-${i}
+                        fi
+
+                        if [ $RC -eq 0 ]; then
+                                echo_success $"Bringing up interface $i:"
+                        else
+                                echo_failure $"Bringing up interface $i:"
+                        fi
+                fi
+
+                # Bring up child interfaces if configured
+                for child_conf in $(/bin/ls -1 
${NETWORK_CONF_DIR}/ifcfg-${i}.???? 2> /dev/null)
+                do
+                        ch_i=${child_conf##*-}
+                        # Skip saved interfaces rpmsave and rpmnew
+                        if (echo $ch_i | grep rpm > /dev/null 2>&1); then
+                                continue
+                        fi
+                        if [ "$RUNMODE" != "manual" ]; then
+                                if ! is_onboot $ch_i; then
+                                        continue
+                                fi
+                        fi
+
+                        if [ ! -f /sys/class/net/${i}/create_child ]; then
+                                continue
+                        fi
+
+                        pkey=0x${ch_i##*.}
+                       if [ ! -e /sys/class/net/${i}.${ch_i##*.} ] ; then
+                               echo $pkey > /sys/class/net/${i}/create_child
+                       fi
+                        bring_up $ch_i
+                        RC=$?
+
+                        unset IPADDR NETMASK BROADCAST
+                        if [ $RC -eq 0 ]; then
+                                echo_success $"Bringing up interface $ch_i:"
+                        else
+                                echo_failure $"Bringing up interface $ch_i:"
+                        fi
+                done
+        done
+        echo_done "Setting up service network   .   .   ."
+
+    fi
+
+    # Load configured modules
+    if [ "$POST_LOAD_MODULES" != "" ]; then
+        for mod in  $POST_LOAD_MODULES
+        do
+                case $mod in
+                        ib_srp)
+                                load_module $mod
+                                if [ "X${SRPHA_ENABLE}" == "Xyes" ]; then
+                                    if [ ! -x /sbin/multipath ]; then
+                                        echo "/sbin/multipath is required to 
enable SRP HA."
+                                    else
+                                        # Create 91-srp.rules file
+                                        mkdir -p /etc/udev/rules.d
+                                        if [ "$DISTRIB" == "SuSE"  ]; then
+                                            cat > 
/etc/udev/rules.d/91-srp.rules << EOF
+ACTION=="add", KERNEL=="sd*[!0-9]", RUN+="/sbin/multipath %M:%m"
+EOF
+                                        fi
+                                        ${modprobe} dm_multipath > /dev/null 
2>&1
+                                        srp_daemon.sh &
+                                        srp_daemon_pid=$!
+                                        echo ${srp_daemon_pid} > 
${srp_daemon_pidfile}
+                                    fi
+                                elif [ "X${SRP_DAEMON_ENABLE}" == "Xyes" ]; 
then
+                                        srp_daemon.sh &
+                                        srp_daemon_pid=$!
+                                        echo ${srp_daemon_pid} > 
${srp_daemon_pidfile}
+                                fi
+                        ;;
+                        *)
+                                load_module $mod
+                        ;;
+                esac
+                RC=$?
+                [ $RC -ne 0 ] && echo_failure "Loading $mod"
+        done
+    fi
+
+    # Create devices using udev
+    if [ -x /sbin/udevstart ]; then
+        UDEVSTART=/sbin/udevstart
+    elif [ -x /sbin/start_udev ]; then
+        UDEVSTART=/sbin/start_udev
+    else
+        UDEVSTART=
+    fi
+
+    if [ ! -z "${UDEVSTART}" ]; then
+        devstart_cnt=0
+        devstart_maxcnt=10
+        while [ ! -d /dev/infiniband/ ] && [ $devstart_cnt -lt 
$devstart_maxcnt ]; do
+            sleep 1
+            let devstart_cnt++
+        done
+
+        if [ ! -d /dev/infiniband/ ] && [ $devstart_cnt -eq $devstart_maxcnt 
]; then
+            ${UDEVSTART} > /dev/null 2>&1
+        fi
+
+        if [ ! -d /dev/infiniband/ ]; then
+            echo_warning $"udevstart: No devices created under /dev/infiniband"
+        fi
+    fi
+
+   # Create qlgc_vnic interfaces. This needs to be done after udevstart
+    if [ "X${QLGC_VNIC_LOAD}" == "Xyes" ]; then
+        if [ -x /etc/init.d/qlgc_vnic ]; then
+               /etc/init.d/qlgc_vnic start
+        fi
+    fi
+
+    if [ X${RENICE_IB_MAD} == "Xyes" ]; then
+        # Set max_ports_num_in_hca variable
+        count_ib_ports
+        ports_num=$?
+        list_of_ibmads=""
+        for (( i=1 ; $i <= ${max_ports_num_in_hca} ; i++ ))
+        do
+                list_of_ibmads="${list_of_ibmads} ib_mad${i}"
+        done
+
+        ib_mad_pids=($(pidof ${list_of_ibmads} 2> /dev/null))
+        num_of_root_ibmad_procs=$(/bin/ps h -o user -p ${ib_mad_pids[*]} | 
grep -w root | wc -l)
+        get_pid_retries=0
+        while [ ${num_of_root_ibmad_procs} -lt $ports_num ]
+        do
+            # Wait maximum for 5 sec to get ib_mad process pid
+            if [ $get_pid_retries -gt 10 ]; then
+                    echo Failed to get $ports_num ib_mad PIDs to renice. Got 
${num_of_root_ibmad_procs}.
+                    break
+            fi
+            usleep 500000
+            ib_mad_pids=($(pidof ${list_of_ibmads} 2> /dev/null))
+            num_of_root_ibmad_procs=$(/bin/ps h -o user -p ${ib_mad_pids[*]} | 
grep -w root | wc -l)
+            let get_pid_retries++
+        done
+        for ib_mad_pid in ${ib_mad_pids[*]}
+        do
+            if [ "$(/bin/ps -p ${ib_mad_pid} h -o user 2> /dev/null)" == 
"root" ]; then
+                    renice -19 ${ib_mad_pid} > /dev/null 2>&1
+            fi
+        done
+    fi
+
+    if  [ -x /sbin/sysctl_perf_tuning ] && [ "X${RUN_SYSCTL}" == "Xyes" ]; then
+        /sbin/sysctl_perf_tuning load
+    fi
+
+    return $RC
+}
+
+UNLOAD_REC_TIMEOUT=100
+unload_rec()
+{
+        local mod=$1
+        shift
+
+        if is_module $mod ; then
+                ${modprobe} -r $mod >/dev/null 2>&1
+                if [ $? -ne 0 ];then
+                        for dep in `/sbin/rmmod $mod 2>&1 | grep "is in use 
by" | sed -r -e 's/.*use by //g' | sed -e 's/,/ /g'`
+                        do
+                                # if $dep was not loaded by openibd, don't 
unload it; fail with error.
+                                if ! `echo $UNLOAD_MODULES | grep -q $dep` ; 
then
+                                        rm_mod $mod
+                                else
+                                        unload_rec $dep
+                                fi
+                        done
+                fi
+                if is_module $mod ; then
+                        if [ "X$RUNMODE" == "Xauto" ] && [ "X$mod" == 
"Xmlx4_core" ] && [ $UNLOAD_REC_TIMEOUT -gt 0 ]; then
+                                let UNLOAD_REC_TIMEOUT--
+                                sleep 1
+                                unload_rec $mod
+                        else
+                                rm_mod $mod
+                        fi
+                fi
+        fi
+}
+
+rm_mod()
+{
+        local mod=$1
+        shift
+
+        unload_log=`/sbin/rmmod $mod 2>&1`
+        if [ $? -ne 0 ]; then
+            echo_failure $"Unloading $mod"
+            if [ ! -z "${unload_log}" ]; then
+                echo $unload_log
+            fi
+            # get_debug_info
+            [ ! -z $2 ] && echo $2
+            exit 1
+        fi
+}
+
+unload()
+{
+        # Unload module $1
+        local mod=$1
+        local unload_log
+
+        if is_module $mod; then
+            case $mod in
+                ib_ipath)
+                    # infinipath depends on modprobe.conf remove rule
+                    unload_rec $mod
+                    sleep 2
+                    ;;
+                ib_qib)
+                    if [ -x ${_truescale} ]; then
+                        ${_truescale} stop
+                    fi
+
+                    if [ -d /ipathfs ]; then
+                        umount /ipathfs
+                        rmdir /ipathfs
+                    fi
+
+                    unload_rec $mod
+                    sleep 2
+                    ;;
+                ib_mthca | mlx4_ib | mlx5_ib | ib_ehca | iw_cxgb3 | iw_cxgb4 | 
iw_nes | i40iw)
+                    unload_rec $mod
+                    sleep 2
+                    ;;
+                *)
+                    unload_rec $mod
+                    if [ $? -ne 0 ] || is_module $mod; then
+                        # Try rmmod if modprobe failed: case that previous 
installation included more IB modules.
+                        unload_rec $mod
+                    fi
+                    ;;
+            esac
+        fi
+}
+
+stop()
+{
+
+        # Check if Lustre is loaded
+        if ( grep -q "ko2iblnd" /proc/modules ); then
+            echo
+            echo "Please stop Lustre services before unloading the"
+            echo "Infiniband stack."
+            echo
+            exit 1
+        fi
+
+        if is_active_vf; then
+            echo "There are active virtual functions. Cannot continue..."
+            exit 1
+        fi
+
+        # Check if applications which use infiniband are running
+        local apps="opensm osmtest ibbs ibns ibacm iwpmd"
+        local pid
+
+        for app in $apps
+        do
+        if ( /usr/bin/pgrep $app > /dev/null 2>&1 ); then
+                echo
+                echo "Please stop \"$app\" and all applications running over 
InfiniBand"
+                echo "Then run \"$0 $ACTION\""
+                echo
+                exit 1
+            fi
+        done
+
+        # Lookup for remaining applications using infiniband devices
+        local entries
+
+        if [ -d /dev/infiniband ]; then
+            entries=$(lsof +c 0 +d /dev/infiniband 2>/dev/null | grep -v 
"^COMMAND" | \
+            awk '{print $1 " " $2 " " $3 " " $NF}' | sort -u)
+        fi
+
+        if [ -n "$entries" ]; then
+
+            echo "Please stop the following applications still using 
Infiniband devices:"
+
+            while IFS= read -r entry; do
+                app=$(echo "$entry" | cut -f1 -d' ')
+                pid=$(echo "$entry" | cut -f2 -d' ')
+                owner=$(echo "$entry" | cut -f3 -d' ')
+                device=$(echo "$entry" | cut -f4 -d' ' | awk -F/ '{print $NF}')
+
+                echo "$app($pid) user $owner is using device $device"
+            done <<< "$entries"
+
+            echo
+            echo "Then run \"$0 $ACTION\""
+
+            exit 1
+        fi
+
+        # W/A for http://bugs.openfabrics.org/bugzilla/show_bug.cgi?id=2259
+        for bond in $(cat /sys/class/net/bonding_masters 2> /dev/null) ; do
+                if_type=$(cat /sys/class/net/$bond/type 2> /dev/null)
+                if [ $if_type -eq 32 ] ; then
+                        for slave in $(cat /sys/class/net/$bond/bonding/slaves 
2> /dev/null) ; do
+                                echo -$slave > 
/sys/class/net/$bond/bonding/slaves
+                        done
+                        echo -$bond > /sys/class/net/bonding_masters
+                fi
+        done
+
+        # Check if open-iscsi is running and if there are open iSER sessions
+        if [ $(pidof iscsid | wc -w) -gt 0 ]; then
+                iser_session_cnt=$(iscsiadm -m session 2>&1 | grep -c "^iser")
+
+                if [ $iser_session_cnt -gt 0 ]; then
+                        echo
+                        # If it's RH4, open-iscsi must be stopped before 
openibd
+                        if [[ -f /etc/redhat-release && $(grep -c "Red Hat 
Enterprise Linux AS release 4" /etc/redhat-release) -eq 1 ]]; then
+                            echo "Please stop open-iscsi: /etc/init.d/iscsi 
stop"
+                        else
+                            echo "Please logout from all open-iscsi over iSER 
sessions"
+                        fi
+                        echo "Then run \"$0 $ACTION\""
+                        echo
+                        exit 1
+                fi
+        fi
+
+        # Check for any multipath devices running over SRP devices
+        if is_module ib_srp; then
+            for f in `/bin/ls /sys/class/scsi_host`; do
+                if [ -f /sys/class/scsi_host/$f/local_ib_port ]; then
+                    for i in `/bin/ls 
/sys/class/scsi_host/$f/device/target*/*/block* | awk -F: '{print $NF}'`
+                    do
+                        holders=`ls /sys/block/$i/holders 2> /dev/null`
+                        if [ -n "$holders" ]; then
+                            echo "Please flush multipath devices running over 
SRP devices"
+                            echo
+                            exit 1
+                        fi
+                    done
+                fi
+            done
+        fi
+        # Stop IPoIB HA daemon if running
+        if [ -f $ipoib_ha_pidfile ]; then
+                local line p
+                read line < $ipoib_ha_pidfile
+                for p in $line ; do
+                        [ -z "${p//[0-9]/}" -a -d "/proc/$p" ] && 
ipoib_ha_pids="$ipoib_ha_pids $p"
+                done
+                /bin/rm -f $ipoib_ha_pidfile
+        fi
+
+        if [ -n "${ipoib_ha_pids:-}" ]; then
+            kill -9 ${ipoib_ha_pids} > /dev/null 2>&1
+            mcastpid=$(pidof -x mcasthandle)
+            if [ -n "${mcastpid:-}" ]; then
+                kill -9 ${mcastpid} > /dev/null 2>&1
+            fi
+        fi
+
+        # Stop SRP HA daemon if running
+        if [ -f $srp_daemon_pidfile ]; then
+                local line p
+                read line < $srp_daemon_pidfile
+                for p in $line ; do
+                        [ -z "${p//[0-9]/}" -a -d "/proc/$p" ] && 
srp_daemon_pids="$srp_daemon_pids $p"
+                done
+                /bin/rm -f $srp_daemon_pidfile
+        fi
+
+        if [ -n "${srp_daemon_pids:-}" ]; then
+            kill -15 ${srp_daemon_pids} > /dev/null 2>&1
+        fi
+
+        if [ "X${SRPHA_ENABLE}" == "Xyes" ]; then
+                /bin/rm -f /etc/udev/rules.d/91-srp.rules > /dev/null 2>&1
+                mpath_pids=$(pidof -x multipath)
+                if [ -n "${mpath_pids:-}" ]; then
+                    kill -9 ${mpath_pids} > /dev/null 2>&1
+                fi
+
+                if is_module ib_srp; then
+                    for f in `/bin/ls /sys/class/scsi_host`
+                    do
+                            if [ -f /sys/class/scsi_host/$f/local_ib_port ]; 
then
+                                    for i in `/bin/ls -d 
/sys/class/scsi_host/$f/device/target*/*/block* | awk -F: '{print $NF}'`
+                                    do
+                                        mdev=`/sbin/scsi_id -g -s /block/$i 2> 
/dev/null`
+                                        if [ -n "${mdev}" ]; then
+                                            /sbin/multipath -f $mdev > 
/dev/null 2>&1
+                                        fi
+                                    done
+                            fi
+                    done
+                fi
+        fi
+
+               if [ -d /sys/class/infiniband_qlgc_vnic/ ]; then
+                    if [ -x /etc/init.d/qlgc_vnic ]; then
+                       /etc/init.d/qlgc_vnic stop 2>&1 1>/dev/null
+                    fi
+               fi
+
+        # Unload modules
+        if [ "$UNLOAD_MODULES" != "" ]; then
+                for mod in  $UNLOAD_MODULES
+                do
+                        unload $mod
+                done
+        fi
+
+        # Unload mlx4_core
+        if is_module mlx4_core; then
+            is_ref mlx4_core
+            if [ $? -eq 0 ]; then
+                unload mlx4_core
+            elif is_module mlx4_en; then
+                # Unload mlx4_en if one or more of the following cases takes 
place:
+                # - No MLX4 eth devices present
+                # - mlx4_en module was not loaded by the openibd script
+                if (grep 0x15b3 /sys/class/net/eth*/device/vendor > /dev/null 
2>&1) && [ "X$MLX4_EN_LOAD" != "Xyes" ]; then
+                    echo "MLX4_EN module is loaded and in use."
+                    echo "To unload MLX4_EN run: 'modprobe -r mlx4_en 
mlx4_core'"
+                else
+                    unload mlx4_en
+                    unload mlx4_core
+                fi
+            fi
+        fi
+
+        if  [ -x /sbin/sysctl_perf_tuning ] && [ "X${RUN_SYSCTL}" == "Xyes" ]; 
then
+            /sbin/sysctl_perf_tuning unload
+        fi
+
+        /bin/rm -rf /dev/infiniband
+        echo_success $"Unloading HCA driver: "
+        sleep 1
+}
+
+status()
+{
+    local RC=0
+
+       if is_module ib_mthca || is_module mlx4_core || is_module mlx5_core || 
is_module ib_qib || is_module ib_ipath || is_module ib_ehca || is_module 
iw_cxgb3 || is_module iw_cxgb4 || is_module iw_nes || is_module i40iw; then
+               echo
+               echo "  HCA driver loaded"
+               echo
+       else
+               echo
+               echo $"HCA driver is not loaded"
+               echo
+       fi
+
+    if is_module ib_ipoib; then
+       get_interfaces
+       if [ -n "$interfaces" ]; then
+           echo $"Configured IPoIB devices:"
+           echo $interfaces
+           echo
+           echo $"Currently active IPoIB devices:"
+
+           for i in $interfaces
+           do
+                if [[ ! -e ${NETWORK_CONF_DIR}/ifcfg-${i} ]]; then
+                    continue
+                fi
+                echo `/sbin/ip -o link show $i | awk -F ": " '/UP>/ { print $2 
}'`
+                RC=$?
+           done
+       fi
+    fi
+
+    if is_module mlx4_en; then
+       get_mlx4_en_interfaces
+       if [ -n "$mlx4_en_interfaces" ]; then
+           echo $"Configured MLX4_EN devices:"
+           echo $mlx4_en_interfaces
+           echo
+           echo $"Currently active MLX4_EN devices:"
+
+           for i in $mlx4_en_interfaces
+           do
+                echo `/sbin/ip -o link show $i | awk -F ": " '/UP>/ { print $2 
}'`
+           done
+       fi
+    fi
+
+    echo
+
+    local cnt=0
+
+    for mod in  $STATUS_MODULES
+    do
+        if is_module $mod; then
+                [ $cnt -eq 0 ] && echo "The following OFED modules are 
loaded:" && echo
+                let cnt++
+                echo "  $mod"
+        fi
+    done
+
+    echo
+
+    return $RC
+}
+
+
+RC=0
+start_time=$(date +%s | tr -d '[:space:]')
+
+trap_handler()
+{
+    let run_time=$(date +%s | tr -d '[:space:]')-${start_time}
+
+    # Ask to wait for 5 seconds if trying to stop openibd
+    if [ $run_time -gt 5 ] && [ "$ACTION" == "stop" ]; then
+        printf "\nProbably some application are still using InfiniBand 
modules...\n"
+    else
+        printf "\nPlease wait ...\n"
+    fi
+    return 0
+}
+
+trap 'trap_handler' 2 9 15
+
+case $ACTION in
+        start)
+                start
+                ;;
+        stop)
+                stop
+                ;;
+        restart)
+                stop
+                start
+                ;;
+        status)
+                status
+                ;;
+        *)
+                echo
+                echo "Usage: `basename $0` {start|stop|restart|status}"
+                echo
+                exit 1
+                ;;
+esac
+
+RC=$?
+exit $RC
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/openibd.service
 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/openibd.service
new file mode 100644
index 0000000..d71e899
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/infiniband/openibd.service
@@ -0,0 +1,22 @@
+[Unit]
+SourcePath=/etc/init.d/openibd
+Description=LSB: Activates/Deactivates InfiniBand Driver to start at boot time.
+Before=runlevel2.target runlevel3.target runlevel5.target shutdown.target
+After=local-fs.target network.target network-online.target
+Conflicts=shutdown.target
+
+[Service]
+Type=forking
+Restart=no
+TimeoutSec=5min
+IgnoreSIGPIPE=no
+KillMode=process
+GuessMainPID=no
+RemainAfterExit=yes
+SysVStartPriority=1
+ExecStart=/etc/init.d/openibd start
+ExecStop=/etc/init.d/openibd stop
+
+[Install]
+WantedBy=multi-user.target
+WantedBy=network-online.target
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/kexec/kexec 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/kexec/kexec
new file mode 100644
index 0000000..5a7e9db
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/base/kexec/kexec
@@ -0,0 +1,13 @@
+# Defaults for kexec initscript
+# sourced by /etc/init.d/kexec and /etc/init.d/kexec-load
+
+# Load a kexec kernel (true/false)
+LOAD_KEXEC=false
+
+# Kernel and initrd image
+KERNEL_IMAGE="/vmlinuz"
+INITRD="/initrd.img"
+
+# If empty, use current /proc/cmdline
+APPEND=""
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/mx/ip_over_mx 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/mx/ip_over_mx
new file mode 100644
index 0000000..d9f6ceb
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/base/mx/ip_over_mx
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+if [ "$IFACE" != "myri0" ]; then
+    exit 0
+fi
+
+SHORTNAME=$(hostname -s)
+
+/etc/init.d/mx start
+
+/sbin/ifconfig "$IFACE" $(gethostip -d "$SHORTNAME-$IFACE") netmask 
255.255.240.0 up
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/ndctl/ndctl.preset 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/ndctl/ndctl.preset
new file mode 100644
index 0000000..d487ae7
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/base/ndctl/ndctl.preset
@@ -0,0 +1 @@
+disable ndctl-monitor.service
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/sshfs/40-fuse.rules 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/sshfs/40-fuse.rules
new file mode 100644
index 0000000..9585111
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/sshfs/40-fuse.rules
@@ -0,0 +1 @@
+KERNEL=="fuse", MODE="0666"
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/tuning/limits-grid5000.conf
 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/tuning/limits-grid5000.conf
new file mode 100644
index 0000000..9483bec
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/tuning/limits-grid5000.conf
@@ -0,0 +1,5 @@
+# Grid 5000
+# Needed for openmpi
+* hard memlock unlimited
+* soft memlock unlimited
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/tuning/sysctl-00-grid5000.conf
 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/tuning/sysctl-00-grid5000.conf
new file mode 100644
index 0000000..ff44ed6
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/tuning/sysctl-00-grid5000.conf
@@ -0,0 +1,6 @@
+#
+# Grid'5000 Tuning
+net.ipv4.tcp_rmem=4096 87380 67108864
+net.ipv4.tcp_wmem=4096 16384 67108864
+net.core.rmem_max = 4194304
+net.core.wmem_max = 4194304
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/base/userns/sysctl-00-userns.conf
 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/userns/sysctl-00-userns.conf
new file mode 100644
index 0000000..575f6aa
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/base/userns/sysctl-00-userns.conf
@@ -0,0 +1,2 @@
+# Necessaire pour Nix
+kernel.unprivileged_userns_clone=1
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/amd_gpu/70-amdgpu.rules
 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/amd_gpu/70-amdgpu.rules
new file mode 100644
index 0000000..325f040
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/amd_gpu/70-amdgpu.rules
@@ -0,0 +1,4 @@
+# INSTALLED BY PUPPET
+KERNEL=="kfd", GROUP="8000", MODE="0660"
+KERNEL=="card*", DRIVERS=="amdgpu", GROUP="8000", MODE="0660"
+KERNEL=="renderD*", DRIVERS=="amdgpu", GROUP="8000", MODE="0660"
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/60-qemu-system.rules
 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/60-qemu-system.rules
new file mode 100644
index 0000000..c99e555
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/60-qemu-system.rules
@@ -0,0 +1 @@
+KERNEL=="kvm", GROUP="8000", MODE="0666"
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/create_tap 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/create_tap
new file mode 100644
index 0000000..273fbe4
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/create_tap
@@ -0,0 +1,13 @@
+#!/bin/sh
+set -e
+BRIDGE="br0"
+if [ -z "$SUDO_USER" ]; then
+  echo "error: SUDO_USER is not set"
+  exit 1
+fi
+TAPDEV=`tunctl -b -u $SUDO_USER`
+/sbin/brctl addif $BRIDGE $TAPDEV
+ip link set $TAPDEV up
+echo $TAPDEV >> /var/lib/oar/tap_devices_for_user_$SUDO_USER
+chown oar:oar /var/lib/oar/tap_devices_for_user_$SUDO_USER
+echo $TAPDEV
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/random_mac 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/random_mac
new file mode 100644
index 0000000..9c2bc8a
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/random_mac
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+SITE_NAME=$(hostname | cut -d. -f2)
+
+# Code the 2nd byte of the IP in the mac address, in order to avoid conflicts
+# with g5k-subnets (see [[Virtual network interlink]])
+
+if   [ "x$SITE_NAME" = "xbordeaux"     ] ; then
+  SITE_HEX=83
+elif [ "x$SITE_NAME" = "xlille"        ] ; then
+  SITE_HEX=8b
+elif [ "x$SITE_NAME" = "xlyon"         ] ; then
+  SITE_HEX=8f
+elif [ "x$SITE_NAME" = "xnancy"        ] ; then
+  SITE_HEX=93
+elif [ "x$SITE_NAME" = "xrennes"       ] ; then
+  SITE_HEX=9f
+elif [ "x$SITE_NAME" = "xtoulouse"     ] ; then
+  SITE_HEX=a3
+elif [ "x$SITE_NAME" = "xsophia"       ] ; then
+  SITE_HEX=a7
+elif [ "x$SITE_NAME" = "xreims"        ] ; then
+  SITE_HEX=ab
+elif [ "x$SITE_NAME" = "xluxembourg"   ] ; then
+  SITE_HEX=af
+elif [ "x$SITE_NAME" = "xnantes"       ] ; then
+  SITE_HEX=b3
+elif [ "x$SITE_NAME" = "xgrenoble"     ] ; then
+  SITE_HEX=b7
+elif [ "x$SITE_NAME" = "xqualif"       ] ; then
+  SITE_HEX=ff
+else
+  # Orsay (or unknown site)
+  SITE_HEX=97
+fi
+
+MACADDR="00:16:3e:$SITE_HEX:$(dd if=/dev/urandom count=1 2>/dev/null | md5sum 
| sed 's/^\(..\)\(..\).*$/\1:\2/')"
+echo $MACADDR
diff --git a/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/sudoers 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/sudoers
new file mode 100644
index 0000000..2b5c5a8
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/big/kvm/sudoers
@@ -0,0 +1,2 @@
+# Allow members of group g5k-users to create a tap interface and add it to the 
bridge
+%g5k-users ALL=NOPASSWD: /usr/local/bin/create_tap, 
/usr/local/bin/mic-setup-my-user
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/85-mic.rules 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/85-mic.rules
new file mode 100644
index 0000000..4ec0c5f
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/85-mic.rules
@@ -0,0 +1,2 @@
+# Installed by puppet
+KERNEL=="scif", SUBSYSTEM=="mic", MODE="666"
diff --git a/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/fstab 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/fstab
new file mode 100644
index 0000000..fd00fd1
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/fstab
@@ -0,0 +1,2 @@
+nfs:/export/home       /home   nfs             
rsize=8192,wsize=8192,nolock,intr       0       0
+nfs:/export/grid5000   /grid5000       nfs             
rsize=8192,wsize=8192,nolock,intr       0       0
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/mic0.filelist 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/mic0.filelist
new file mode 100644
index 0000000..6f5d3cb
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/mic0.filelist
@@ -0,0 +1,41 @@
+dir /etc/network 755 0 0
+file /etc/network/interfaces etc/network/interfaces 644 0 0
+file /etc/passwd etc/passwd 644 0 0
+file /etc/shadow etc/shadow 000 0 0
+dir /home 755 0 0
+dir /root 755 0 0
+dir /grid5000 755 0 0
+file /root/.profile root/.profile 644 0 0
+dir /home/micuser 755 400 400
+file /home/micuser/.profile home/micuser/.profile 644 400 400
+file /etc/group etc/group 644 0 0
+dir /root/.ssh 700 0 0
+file /root/.ssh/id_rsa.pub root/.ssh/id_rsa.pub 600 0 0
+file /root/.ssh/authorized_keys root/.ssh/authorized_keys 600 0 0
+file /root/.ssh/id_rsa root/.ssh/id_rsa 600 0 0
+file /etc/hostname etc/hostname 644 0 0
+file /etc/resolv.conf etc/resolv.conf 644 0 0
+file /etc/nsswitch.conf etc/nsswitch.conf 644 0 0
+dir /etc/ssh 755 0 0
+file /etc/ssh/ssh_host_key etc/ssh/ssh_host_key 600 0 0
+file /etc/ssh/ssh_host_key.pub etc/ssh/ssh_host_key.pub 644 0 0
+file /etc/ssh/ssh_host_rsa_key etc/ssh/ssh_host_rsa_key 600 0 0
+file /etc/ssh/ssh_host_rsa_key.pub etc/ssh/ssh_host_rsa_key.pub 644 0 0
+file /etc/ssh/ssh_host_dsa_key etc/ssh/ssh_host_dsa_key 600 0 0
+file /etc/ssh/ssh_host_dsa_key.pub etc/ssh/ssh_host_dsa_key.pub 644 0 0
+file /etc/ssh/ssh_host_ecdsa_key etc/ssh/ssh_host_ecdsa_key 600 0 0
+file /etc/ssh/ssh_host_ecdsa_key.pub etc/ssh/ssh_host_ecdsa_key.pub 644 0 0
+file /etc/localtime etc/localtime 644 0 0
+file /etc/fstab etc/fstab 644 0 0
+dir /etc/init.d 0755 0 0
+dir /etc/rc5.d 0755 0 0
+file /etc/init.d/timesync etc/init.d/timesync 0755 0 0
+slink /etc/rc5.d/S01timesync ../init.d/timesync 0755 0 0
+dir /etc/init.d 0755 0 0
+dir /etc/rc5.d 0755 0 0
+file /etc/init.d/sysonline etc/init.d/sysonline 0755 0 0
+slink /etc/rc5.d/S99sysonline ../init.d/sysonline 0755 0 0
+file /etc/init.d/pm etc/init.d/pm 0755 0 0
+slink /etc/rc5.d/S90pm ../init.d/pm 0755 0 0
+file /etc/hosts etc/hosts 644 0 0
+slink /opt/intel /grid5000/software/intel 0777 0 0
diff --git a/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/mpss 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/mpss
new file mode 100644
index 0000000..56a1a94
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/big/mic/mpss
@@ -0,0 +1,191 @@
+#!/bin/bash
+# Copyright 2010-2013 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License, version 2,
+# as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Disclaimer: The codes contained in these modules may be specific to
+# the Intel Software Development Platform codenamed Knights Ferry,
+# and the Intel product codenamed Knights Corner, and are not backward
+# compatible with other Intel products. Additionally, Intel will NOT
+# support the codes or instruction set in future products.
+#
+# Intel offers no warranty of any kind regarding the code. This code is
+# licensed on an "AS IS" basis and Intel is not obligated to provide
+# any support, assistance, installation, training, or other services
+# of any kind. Intel is also not obligated to provide any updates,
+# enhancements or extensions. Intel specifically disclaims any warranty
+# of merchantability, non-infringement, fitness for any particular
+# purpose, and any other warranty.
+#
+# Further, Intel disclaims all liability of any kind, including but
+# not limited to liability for infringement of any proprietary rights,
+# relating to the use of the code, even if Intel is notified of the
+# possibility of such liability. Except as expressly stated in an Intel
+# license agreement provided with this code and agreed upon with Intel,
+# no license, express or implied, by estoppel or otherwise, to any
+# intellectual property rights is granted herein.
+#
+# mpss Start mpssd.
+#
+# chkconfig: 2345 95 05
+# description: start MPSS stack processing.
+#
+### BEGIN INIT INFO
+# Provides: mpss
+# Required-Start:
+# Required-Stop: iptables
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: MPSS stack control
+# Description: MPSS stack control
+### END INIT INFO
+
+exec=/usr/sbin/mpssd
+sysfs="/sys/class/mic"
+
+. /lib/lsb/init-functions
+
+start()
+{
+       [ -x $exec ] || exit 5
+
+       # add directory to standard library search path
+       if [ ! -f /etc/ld.so.conf.d/mic.conf ]; then 
+               echo "/usr/lib64/" > /etc/ld.so.conf.d/mic.conf
+               ldconfig
+       fi
+
+       # create ssh key for root
+       if [ ! -f /root/.ssh/id_rsa ]; then
+               ssh-keygen -t rsa -P "" -f /root/.ssh/id_rsa
+       fi      
+
+       # enable access to nfs for mic (not needed with bridge)
+#      iptables -t nat -A POSTROUTING -s 192.168.0.0/16 -j MASQUERADE
+#      echo 1 >/proc/sys/net/ipv4/ip_forward 
+
+       # Ensure the driver is loaded
+       [ -d "$sysfs" ] || modprobe mic
+
+       echo -n $"Starting MPSS Stack: "
+
+        [ -d "/var/lock/subsys" ] || mkdir /var/lock/subsys
+       micctrl --initdefaults
+       start-stop-daemon --start --exec $exec
+
+       if [ "`ps -e | awk '{print $4}' | grep mpssd`" = "mpssd" ]; then
+               echo
+               micctrl -s
+               return 0
+       fi
+
+       micctrl -w 1> /dev/null
+       micctrl -s
+}
+
+stop()
+{
+       echo -n $"Shutting down MPSS Stack: "
+
+       WAITRET=0
+       MPSSD=`ps ax | grep /usr/sbin/mpssd | grep -v grep`
+
+       if [ "$MPSSD" = "" ]; then
+               echo
+               return 0;
+       fi
+
+       MPSSDPID=`echo $MPSSD | awk '{print $1}'`
+       kill -s QUIT $MPSSDPID > /dev/null 2>/dev/null
+       RETVAL=$?
+
+       if [ $RETVAL = 0 ]; then
+               while [ "`ps -e | awk '{print $4}' | grep mpssd`" = "mpssd" ]; 
do sleep 1; done
+               micctrl -w 1> /dev/null
+               WAITRET=$?
+               if [ $WAITRET = 9 ]; then
+                       echo -n $"Shutting down MPSS Stack by force: "
+                       micctrl -r 1> /dev/null
+                       RETVAL=$?
+                       if [ $RETVAL = 0 ]; then
+                               micctrl -w 1> /dev/null
+                               WAITRET=$?
+                       fi
+               fi
+       fi
+
+}
+
+restart()
+{
+       stop
+       start
+}
+
+status()
+{
+       if [ "`ps -e | awk '{print $4}' | grep mpssd`" = "mpssd" ]; then
+                echo "mpss is running"
+               STOPPED=0
+        else
+                echo "mpss is stopped"
+               STOPPED=3
+        fi
+        return $STOPPED
+}
+
+unload()
+{
+       if [ ! -d "$sysfs" ]; then
+               return
+       fi
+
+       stop
+       RETVAL=$?
+
+       echo $"Removing MIC Module: "
+
+       if [ $RETVAL = 0 ]; then
+               sleep 1
+               modprobe -r mic
+               RETVAL=$?
+       fi
+
+       if [ $RETVAL -ne 0 ]; then
+               rc_failed 3
+       fi
+       echo
+       return $RETVAL
+}
+
+case $1 in
+       start)
+               start
+               ;;
+       stop)
+               stop
+               ;;
+       restart)
+               restart
+               ;;
+       status)
+               status
+               ;;
+       unload)
+               unload
+               ;;
+       *)
+               echo $"Usage: $0 {start|stop|restart|status|unload}"
+esac
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/cuda.conf 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/cuda.conf
new file mode 100644
index 0000000..8bd574f
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/cuda.conf
@@ -0,0 +1 @@
+/usr/local/cuda/lib64
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/dcgm-exporter.service
 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/dcgm-exporter.service
new file mode 100644
index 0000000..ed87bd8
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/dcgm-exporter.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=NVIDIA DCGM prometheus exporter service
+After=network.target
+# Ensure that /dev/nvidia0 is created by first calling nvidia-smi.
+# If no GPU is found, nvidia-smi will not create /dev/nvidia0 and we will not 
run.
+Wants=nvidia-smi.service
+After=nvidia-smi.service
+ConditionPathExists=/dev/nvidia0
+
+[Service]
+Type=simple
+ExecStart=/usr/sbin/dcgm-exporter
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/ganglia-monitor.service
 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/ganglia-monitor.service
new file mode 100644
index 0000000..d7b8211
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/ganglia-monitor.service
@@ -0,0 +1,25 @@
+# INSTALLED BY PUPPET
+
+[Unit]
+SourcePath=/etc/init.d/ganglia-monitor
+Description=(null)
+Before=runlevel2.target runlevel3.target runlevel4.target runlevel5.target 
shutdown.target
+After=network-online.target nss-lookup.target remote-fs.target 
systemd-journald-dev-log.socket
+Wants=network-online.target
+Conflicts=shutdown.target
+
+[Service]
+Type=forking
+Restart=no
+TimeoutSec=5min
+IgnoreSIGPIPE=no
+KillMode=process
+GuessMainPID=no
+RemainAfterExit=yes
+SysVStartPriority=2
+ExecStartPre=/bin/bash -c "[[ $(lsmod | grep -ic nvidia) -eq 0 ]] && rm -f 
/etc/ganglia/conf.d/{nvidia.pyconf,modpython-nvidia.conf} || true"
+ExecStart=/etc/init.d/ganglia-monitor start
+ExecStop=/etc/init.d/ganglia-monitor stop
+
+[Install]
+WantedBy=multi-user.target
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/modpython-nvidia.conf
 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/modpython-nvidia.conf
new file mode 100644
index 0000000..3e95248
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/modpython-nvidia.conf
@@ -0,0 +1,13 @@
+/*
+  INSTALLED BY PUPPET
+*/
+
+modules {
+  module {
+    name = "python_module"
+    path = "/usr/lib/ganglia/modpython.so"
+    params = "/usr/lib/ganglia/python_modules"
+  }
+}
+
+include ('/etc/ganglia/conf.d/nvidia.pyconf')
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/nvidia-persistenced.service
 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/nvidia-persistenced.service
new file mode 100644
index 0000000..1c8569c
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/nvidia-persistenced.service
@@ -0,0 +1,18 @@
+[Unit]
+Description=NVIDIA Persistence Daemon
+Wants=syslog.target
+# Ensure that /dev/nvidia0 is created by first calling nvidia-smi.
+# If no GPU is found, nvidia-smi will not create /dev/nvidia0 and we will not 
run.
+Wants=nvidia-smi.service
+After=nvidia-smi.service
+ConditionPathExists=/dev/nvidia0
+
+[Service]
+Type=forking
+PIDFile=/var/run/nvidia-persistenced/nvidia-persistenced.pid
+Restart=always
+ExecStart=/usr/bin/nvidia-persistenced --verbose
+ExecStopPost=/bin/rm -rf /var/run/nvidia-persistenced
+
+[Install]
+WantedBy=multi-user.target
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/nvidia-smi.service
 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/nvidia-smi.service
new file mode 100644
index 0000000..e3eb471
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/big/nvidia/nvidia-smi.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Call nvidia-smi once to create /dev/nvidiaX
+
+[Service]
+Type=oneshot
+# Ignore the exit code: the command fails when no GPU is found
+ExecStart=-/usr/bin/nvidia-smi
+# Ignore stdout: error messages are expected when there is no GPU
+StandardOutput=null
+
+[Install]
+WantedBy=multi-user.target
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/min/apt/grid5000-archive-key.asc
 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/apt/grid5000-archive-key.asc
new file mode 100644
index 0000000..3d5350a
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/apt/grid5000-archive-key.asc
@@ -0,0 +1,52 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFuSnqEBEADJ+sFR94jY2uGHWOwlfmHEvsiqzX4BQdpSlLz7S/Gs7go6RtAU
+cZLT7ehp1dG/QJdgSCqBZ2xTbDHnMYm7hv9/LnKd/YJuRzHjr0fXbZ9rOAc7D6TB
+Cr1VgjJN5fIgCG5LWs6xPpxFL9XSZdiOE/xPMcygiHkSnEjlShccO3PQmoSUrYEz
+K3YxIcDjBmJcFpk2ay1gpxBi54KtY4aaYy1tZneEIMBh1aybqilbQhM9qIyz7fp8
+mKkq4/XQkXc1VvfSyLn2vM0cOtkD0X9FCU5v7lEri6tgpajHqEcQyRziDfWDC+33
+1OMvQgeBoqR6WfW13cquxA15JO873Cwwl52U5IriB52m3nd73UboLQjThyu7V/id
+ZGUFMNO/VvCBclA/uoZhRkOaTrfWXfhNnBMdJxppHrWAk5rEja+DXItNZtS4LqPa
+rBQvPszTC3PKGZS7DlSCU9C+orb08I4GZe2Y7ccEyOPIRowyleorROR2yPuxeN3w
+Ht330RX5UTk8InUYlh5PSLfRcfY5sjYAbayRXxJlDudl+DrJDkrfRnnU8zjqceDx
+rjHedofcfn16JwlR86MrPyEvfOsJ4pE9haVyOgfJsDe5PBimxddyevEFx8pYEJfi
+K15tx2/mYWWN10N4sx/lR0HvL75kllQW85JdL9rQLPQsqx23A1DkCnC1iwARAQAB
+tEJwYWNrYWdlcy5ncmlkNTAwMC5mciBBcmNoaXZlIEtleSA8c3VwcG9ydC1zdGFm
+ZkBsaXN0cy5ncmlkNTAwMC5mcj6JAk4EEwEIADgWIQQ8OL3qoF1Ke+14FeWx809W
+eXvy0QUCW5KeoQIbAwULCQgHAgYVCAkKCwIEFgIDAQIeAQIXgAAKCRCx809WeXvy
+0aF+EACvChuPyo47akv7HKnsxUq4KlraDmY2w0cFXL0Pavy2Kr8uui5AaSMPcgEb
+drAsHd0vb8Xr9dz4my6W3Iw6dIOb6R+1JjRK3DXeBFsV/RYSKK4MZ+moJ37yTQof
+0eJaYwJaeZoYBMHCfffLGOr063MB7YE+B3PCM6wHBaRUSRCotHZrzNZJSbWftEq+
+zVkZcuZU9o9bM0vCwgEnjgNiEieeQNBw64NAySm/xjC1eHjscEchK2jvyIWqqW3y
+LfbWmUaoYS0JL6m2SirocC/In+vTtsZUlpNaw1XEMG3dTUYI57FlZu5HXQEwLUlR
+CNtxlZDyqLP88KB5uPnRFJP34A+BCiitJhXHLn105kDaKEY8h6gx4rVweSFj5TOd
+nqgcGvStoOq87UYXtCHieGnP7W4ZkDNMwnBcMSXG+Ha3nc1BOJ9X7UavqJzPJM65
+W5bs152Ga81w7ILeegH+rGUixAz7hHOREMG1bfkk+urVadaPvgnC/qEO2JW6S/8k
+eS45UKpKUtqg8mKt1ZC5iqnDdFCg3BtdWtLd4BzffunpoOz0YmnPx1x8/nLXhzlb
+vdblITIvqiqlyRJmH1tL12e3/+4PulvO3OAPKitUfTjDrS9hS5y4U9ZPMZjp1/wi
+IULywOcOPQebaffH/o2Nmcn/KTUVLDYb7s2sOR4wjhaljTsokLkCDQRbkp6hARAA
+4pTzdSXs6wEmUiIUuC8/vUSQyqcXpB+7DbbPaAXEE9A/3V3c5fuRdwUZBcaSQBnq
+JkCyYJIVMFW6Y1Y2ZUXs5FYIagVmPfgL8v+YPtBH0T9FZ+f+JdDSRiGw0GSEE7vH
+qQ8ClnKITrImCADVYNh7cmaPGYFlkdCHiguWMmqigxiXbgnYSdl59XkuTs8ugREy
+0/BRrwngmUrCAzd8viMGt3PJyssgXVbtx4lDQDGYMPrT+5a30xZvWxbl0pG3hJyY
+M6hBqF+d/u410+KJ8CX2pEXiqzeddgsjhqtvsqb8Vu6fAurCfDD95Axf59vjyvu5
+DyODicu7DqxldEFwcPb7vvuDdVkmC3LNaAEbxMJDWs72wHMy/35QBEbn9I7qOZK5
+TknjFA/VhGCYRRYyPzZe9Y57iiWGjFPxlhZ7ePLis+JmxPeFcsc9jT7ozAB0vN1e
+YDGOwHBv/m1+8YVuqzyV3xChxCtzXU6verBqYezSHZAa8IGPa0dxwklnRWy++MtO
+mCNQS+54Gf2+BlkmSaIUKJu51bIZRvnyJgeDjxQVYEgViS5u2pGe7h0iu6bAeFKz
+kj6XhgfgRoAJH0wFNIRBJGF9KG0uBCnSu2C4D0A/DhW1C/rCgvj6aNH1QpjlTvOm
+pGT/+kxFw5HOnng6ziQ2z15I0hl/qm9lRLig8QqsdlUAEQEAAYkCNgQYAQgAIBYh
+BDw4veqgXUp77XgV5bHzT1Z5e/LRBQJbkp6hAhsMAAoJELHzT1Z5e/LRZosP/0nB
+/5RE09QJr20SuPG7mhKBGQbRRliHTIp8Q4UfD1tsMPvo60c1lzaU6ht7k2rYFSNV
+2b3sCJBCWx3Xc88uyCKKkGWAOdCmz17wBbqJEY41mT0AxdjoKozeD09VOcraODQ7
+D6jfcR3NqPjAuMAxlS1sM1G1ECJ6gfK0QEBKmlmOwU4pzAIrniRZ9Pjf4tgfGLJK
+/11U5C3OG2aImhX7PiaWHQNn4p8LkWVvPxGP99Jb42z+34xtuGajwOqLEDh7Wftc
+xUzwu2BgjZQzwlQHbULVNOcx00gpXYJt5WheZ4bStZNWdTyQaWYn0GabDLaU3bdp
+/00CWn0XBlWTmpMuaErN5PcRLJQJ22PNA6Jz9OsHfIhpgou1MHlA7zWIXYapuytq
+ixaHgdGcjB1bAjWLeZU5ttECpUEpeyBSKLFawWvCzOScUV6e2VrADxagfsnSCQHQ
+UG6qv9JNmlmKo3zw42CzeSA03wmGY188kbhJMCUU6bJ8G20q9p1Xy9KEIOVgk5QZ
+NU4AKejAuwulbeQkPjz1aatp/PyveuSQJTPrd9S45cYShg6Plvi+egSU9E8ciehY
+nVpWhVxwHSvNi9lqHNBYU2otM8ShJzk9xmHOLTeg/s7zXyTvsZpXQn6yNSvc3BOh
+aBSpMPFlS9PyaAHZjk9nsRKLwjP/SI7YJbwWPPwi
+=ETJ1
+-----END PGP PUBLIC KEY BLOCK-----
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/min/cpu_microcode/amd64-microcode
 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/cpu_microcode/amd64-microcode
new file mode 100644
index 0000000..1a4d194
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/cpu_microcode/amd64-microcode
@@ -0,0 +1,12 @@
+# Configuration script for amd64-microcode version 3
+
+#
+# initramfs helper
+#
+
+#
+# Set this to "no" to disable automatic microcode updates on boot;
+# Set this to "early" to always install microcode updates to the early 
initramfs
+# Set this to "auto" to autodetect mode for current system (default);
+#
+AMD64UCODE_INITRAMFS=early
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/min/cpu_microcode/intel-microcode
 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/cpu_microcode/intel-microcode
new file mode 100644
index 0000000..a3626dd
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/cpu_microcode/intel-microcode
@@ -0,0 +1,27 @@
+# Configuration script for intel-microcode version 3
+
+#
+# initramfs helper
+#
+
+# Set this to "no" to disable automatic microcode updates on boot;
+# Set this to "auto" to use early initramfs mode automatically (default);
+# Set this to "early" to always attempt to create an early initramfs;
+IUCODE_TOOL_INITRAMFS=yes
+
+# Set this to "yes" (default) to use "iucode_tool --scan-system" to reduce
+# the initramfs size bloat, by detecting which Intel processors are active
+# in this system, and installing only their microcodes.
+#
+# Set this to "no" to either include all microcodes, or only the microcodes
+# selected through the use of IUCODE_TOOL_EXTRA_OPTIONS below.
+#
+# WARNING: including all microcodes will increase initramfs size greatly.
+# This can cause boot issues if the initramfs is already large.
+IUCODE_TOOL_SCANCPUS=no
+
+# Extra options to pass to iucode_tool, useful to forbid or to
+# force the inclusion of microcode for specific processor signatures.
+# See iucode_tool(8) for details.
+#IUCODE_TOOL_EXTRA_OPTIONS=""
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/min/image_versioning/git_tag
 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/image_versioning/git_tag
new file mode 100644
index 0000000..3fc2634
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/image_versioning/git_tag
@@ -0,0 +1,2 @@
+# This file will contains git tag of repository used to build the image.
+# This git tag will be extracted by kameleon
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/min/image_versioning/postinst
 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/image_versioning/postinst
new file mode 100644
index 0000000..5f5114e
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/image_versioning/postinst
@@ -0,0 +1,3 @@
+# This file is intended to be completed by kameleon and contains the path of 
the postinstall used by kadeploy to deploy this image
+# If this file is changed (postinstall increment), the version of the image 
must be incremented as well.
+# To avoid any unfortunate forgetting, this path is stored in the tgz to 
change the tgz checksum.
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/min/locales/locale 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/locales/locale
new file mode 100644
index 0000000..a709cd8
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/min/locales/locale
@@ -0,0 +1,3 @@
+LANGUAGE=en_US:en
+LANG=en_US.UTF-8
+LC_ALL=en_US.UTF-8
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/min/locales/locale.gen 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/locales/locale.gen
new file mode 100644
index 0000000..a66d814
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/min/locales/locale.gen
@@ -0,0 +1 @@
+en_US.UTF-8 UTF-8
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/min/network/g5k-update-host-name
 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/network/g5k-update-host-name
new file mode 100644
index 0000000..0422d0e
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/network/g5k-update-host-name
@@ -0,0 +1,16 @@
+
+if [ -n "$new_host_name" ]; then
+  if ! echo "$new_host_name" | egrep -q '^.*-eth.*$'; then
+    if [ -n "$new_domain_name" ]; then
+      hostname="${new_host_name}.${new_domain_name}"
+    else
+      hostname="${new_host_name}"
+    fi
+
+    echo "$hostname" > /etc/hostname 2> /dev/null
+    hostname "$hostname"
+    echo "$hostname" > /etc/mailname 2> /dev/null
+  fi
+  systemctl restart syslog
+fi
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/min/network/hosts 
b/grid5000/steps/data/setup/puppet/modules/env/files/min/network/hosts
new file mode 100644
index 0000000..0d49331
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/min/network/hosts
@@ -0,0 +1,10 @@
+127.0.0.1 localhost
+
+# The following lines are desirable for IPv6 capable hosts
+::1     ip6-localhost ip6-loopback
+fe00::0 ip6-localnet
+ff00::0 ip6-mcastprefix
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
+ff02::3 ip6-allhosts
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/ca2019.grid5000.fr.cert
 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/ca2019.grid5000.fr.cert
new file mode 100644
index 0000000..ee8b084
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/ca2019.grid5000.fr.cert
@@ -0,0 +1,26 @@
+-----BEGIN CERTIFICATE-----
+MIIEbjCCA1agAwIBAgIJAJceqF9a8UnpMA0GCSqGSIb3DQEBCwUAMH0xCzAJBgNV
+BAYTAkZSMREwDwYDVQQKDAhHcmlkNTAwMDEOMAwGA1UECwwFSU5SSUExGzAZBgNV
+BAMMEmNhMjAxOS5ncmlkNTAwMC5mcjEuMCwGCSqGSIb3DQEJARYfc3VwcG9ydC1z
+dGFmZkBsaXN0cy5ncmlkNTAwMC5mcjAeFw0xOTA1MTMxMjQyNTdaFw0zNDA1MDkx
+MjQyNTdaMH0xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhHcmlkNTAwMDEOMAwGA1UE
+CwwFSU5SSUExGzAZBgNVBAMMEmNhMjAxOS5ncmlkNTAwMC5mcjEuMCwGCSqGSIb3
+DQEJARYfc3VwcG9ydC1zdGFmZkBsaXN0cy5ncmlkNTAwMC5mcjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAKcIXPnhkWgYLwUgSm1quyt62Lh4OApekYwc
+TA4S3by1nnOhYTbKnKe+chm5xKgt1oKA442DzAA85tffZ9QRCcuDMMRXN8Xywc74
+nqQ66zX2Kxiav8ncHzJSTPn+PjlYaJHl59eVa9Rb0bQk36tIt+lnno6N6bJhVNID
+FoOxK0SCjPg72Sa+pwzYJksFFRdbB2cwnzRPTnH3Q2k+ofzB82xKPGjEQOCfs2SK
+7uvsJvN/wG+UID+yJSY0S+6FSeTBScJAALxskP2Wuyp1VQ1a4pOfPE1IrFEu8O8W
+3oy91WQYnpjBUMFdrW1TK4EcXUU8jeyXk5Bwn6l5+Fe+yAP08pECAwEAAaOB8DCB
+7TAdBgNVHQ4EFgQU/rp9soPqEmpIs083TeggATSu6T8wgbAGA1UdIwSBqDCBpYAU
+/rp9soPqEmpIs083TeggATSu6T+hgYGkfzB9MQswCQYDVQQGEwJGUjERMA8GA1UE
+CgwIR3JpZDUwMDAxDjAMBgNVBAsMBUlOUklBMRswGQYDVQQDDBJjYTIwMTkuZ3Jp
+ZDUwMDAuZnIxLjAsBgkqhkiG9w0BCQEWH3N1cHBvcnQtc3RhZmZAbGlzdHMuZ3Jp
+ZDUwMDAuZnKCCQCXHqhfWvFJ6TAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjAN
+BgkqhkiG9w0BAQsFAAOCAQEAVCfhuFJ3VouWWYqMrM10cMiF4E3GYUdGfnB9ecaQ
+5UKjN1kdiOdVf+luXZksfljIOCXN8eY5+NMOm+7uzawOKuVqsxvfLt7duKbP2yw4
+VmQMfWn6/zhkkJR0/QFXchvzii5dWXNb6JJj/Z7cuy7i8/sapUtS5gnqxkYuE8og
+3duLwaW96cZI5aAqdcz4t+BADn+Sk0EY4fhyRxq3vMDw7yzY+07iIOSMVLuLDIIa
+hIXFonphQGPD9Asz2EOBbJN6JRC+RWtniLT6BqghFvz+cLXFCqTqJvf+YRs11xwn
+uqCzvhyO0cW+/oBUvyAb6uP/kM2ABkidw1g5hNsvPdBCvQ==
+-----END CERTIFICATE-----
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/common-auth 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/common-auth
new file mode 100644
index 0000000..3e92893
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/common-auth
@@ -0,0 +1,13 @@
+#
+# /etc/pam.d/common-auth - authentication settings common to all services
+#
+# This file is included from other service-specific PAM config files,
+# and should contain a list of the authentication modules that define
+# the central authentication scheme for use on the system
+# (e.g., /etc/shadow, LDAP, Kerberos, etc.).  The default is to use the
+# traditional Unix authentication mechanisms.
+#
+#auth  required        pam_unix.so nullok_secure
+
+auth    sufficient      pam_ldap.so
+auth    requisite       pam_unix.so use_first_pass nullok_secure
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/common-password 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/common-password
new file mode 100644
index 0000000..74f1425
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/common-password
@@ -0,0 +1,34 @@
+#
+# /etc/pam.d/common-password - password-related modules common to all services
+#
+# This file is included from other service-specific PAM config files,
+# and should contain a list of modules that define the services to be
+# used to change user passwords.  The default is pam_unix.
+
+# Explanation of pam_unix options:
+#
+# The "nullok" option allows users to change an empty password, else
+# empty passwords are treated as locked accounts.
+#
+# The "md5" option enables MD5 passwords.  Without this option, the
+# default is Unix crypt.
+#
+# The "obscure" option replaces the old `OBSCURE_CHECKS_ENAB' option in
+# login.defs.
+#
+# You can also use the "min" option to enforce the length of the new
+# password.
+#
+# See the pam_unix manpage for other options.
+
+password sufficient      pam_ldap.so
+password   required   pam_unix.so nullok obscure md5
+
+# Alternate strength checking for password. Note that this
+# requires the libpam-cracklib package to be installed.
+# You will need to comment out the password line above and
+# uncomment the next two in order to use this.
+# (Replaces the `OBSCURE_CHECKS_ENAB', `CRACKLIB_DICTPATH')
+#
+# password required      pam_cracklib.so retry=3 minlen=6 difok=3
+# password required      pam_unix.so use_authtok nullok md5
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/ldap.conf 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/ldap.conf
new file mode 100644
index 0000000..16557a1
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/ldap.conf
@@ -0,0 +1,20 @@
+#
+# LDAP Defaults
+#
+
+# See ldap.conf(5) for details
+# This file should be world readable but not world writable.
+
+#BASE  dc=example,dc=com
+#URI   ldap://ldap.example.com ldap://ldap-master.example.com:666
+
+#SIZELIMIT     12
+#TIMELIMIT     15
+#DEREF         never
+
+base            dc=grid5000,dc=fr
+uri             ldaps://ldap/
+ldap_version    3
+
+tls_cacert /etc/ldap/certificates/ca2019.grid5000.fr.cert
+tls_reqcert demand
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/libnss-ldap.conf 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/libnss-ldap.conf
new file mode 100644
index 0000000..626cccd
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/libnss-ldap.conf
@@ -0,0 +1,324 @@
+# the configuration of this file will be done by debconf as long as the
+# first line of the file says '###DEBCONF###'
+#
+# you should use dpkg-reconfigure libnss-ldap to configure this file.
+#
+# @(#)$Id: ldap.conf,v 2.48 2008/07/03 02:30:29 lukeh Exp $
+#
+# This is the configuration file for the LDAP nameservice
+# switch library and the LDAP PAM module.
+#
+# PADL Software
+# http://www.padl.com
+#
+
+# Your LDAP server. Must be resolvable without using LDAP.
+# Multiple hosts may be specified, each separated by a
+# space. How long nss_ldap takes to failover depends on
+# whether your LDAP client library supports configurable
+# network or connect timeouts (see bind_timelimit).
+#host 127.0.0.1
+
+# The distinguished name of the search base.
+base dc=grid5000,dc=fr
+
+# Another way to specify your LDAP server is to provide an
+uri ldaps://ldap/
+# Unix Domain Sockets to connect to a local LDAP Server.
+#uri ldap://127.0.0.1/
+#uri ldaps://127.0.0.1/
+#uri ldapi://%2fvar%2frun%2fldapi_sock/
+# Note: %2f encodes the '/' used as directory separator
+
+# The LDAP version to use (defaults to 3
+# if supported by client library)
+ldap_version 3
+
+# The distinguished name to bind to the server with.
+# Optional: default is to bind anonymously.
+# Please do not put double quotes around it as they
+# would be included literally.
+#binddn cn=proxyuser,dc=padl,dc=com
+
+# The credentials to bind with.
+# Optional: default is no credential.
+#bindpw secret
+
+# The distinguished name to bind to the server with
+# if the effective user ID is root. Password is
+# stored in /etc/libnss-ldap.secret (mode 600)
+# Use 'echo -n "mypassword" > /etc/libnss-ldap.secret' instead
+# of an editor to create the file.
+#rootbinddn cn=manager,dc=example,dc=net
+
+# The port.
+# Optional: default is 389.
+#port 389
+
+# The search scope.
+#scope sub
+#scope one
+#scope base
+
+# Search timelimit
+#timelimit 30
+
+# Bind/connect timelimit
+#bind_timelimit 30
+
+# Reconnect policy:
+#  hard_open: reconnect to DSA with exponential backoff if
+#             opening connection failed
+#  hard_init: reconnect to DSA with exponential backoff if
+#             initializing connection failed
+#  hard:      alias for hard_open
+#  soft:      return immediately on server failure
+bind_policy hard
+
+# Connection policy:
+#  persist:   DSA connections are kept open (default)
+#  oneshot:   DSA connections destroyed after request
+#nss_connect_policy persist
+
+# Idle timelimit; client will close connections
+# (nss_ldap only) if the server has not been contacted
+# for the number of seconds specified below.
+#idle_timelimit 3600
+
+# Use paged rseults
+#nss_paged_results yes
+
+# Pagesize: when paged results enable, used to set the
+# pagesize to a custom value
+#pagesize 1000
+
+# Filter to AND with uid=%s
+#pam_filter objectclass=account
+pam_filter &(objectclass=posixAccount)(!(uid=oar))
+
+# The user ID attribute (defaults to uid)
+#pam_login_attribute uid
+
+# Search the root DSE for the password policy (works
+# with Netscape Directory Server)
+#pam_lookup_policy yes
+
+# Check the 'host' attribute for access control
+# Default is no; if set to yes, and user has no
+# value for the host attribute, and pam_ldap is
+# configured for account management (authorization)
+# then the user will not be allowed to login.
+#pam_check_host_attr yes
+
+# Check the 'authorizedService' attribute for access
+# control
+# Default is no; if set to yes, and the user has no
+# value for the authorizedService attribute, and
+# pam_ldap is configured for account management
+# (authorization) then the user will not be allowed
+# to login.
+#pam_check_service_attr yes
+
+# Group to enforce membership of
+#pam_groupdn cn=PAM,ou=Groups,dc=padl,dc=com
+
+# Group member attribute
+#pam_member_attribute uniquemember
+
+# Specify a minium or maximum UID number allowed
+pam_min_uid 1000
+pam_max_uid 0
+
+
+# Template login attribute, default template user
+# (can be overriden by value of former attribute
+# in user's entry)
+#pam_login_attribute userPrincipalName
+#pam_template_login_attribute uid
+#pam_template_login nobody
+
+# HEADS UP: the pam_crypt, pam_nds_passwd,
+# and pam_ad_passwd options are no
+# longer supported.
+#
+# Do not hash the password at all; presume
+# the directory server will do it, if
+# necessary. This is the default.
+#pam_password clear
+
+# Hash password locally; required for University of
+# Michigan LDAP server, and works with Netscape
+# Directory Server if you're using the UNIX-Crypt
+# hash mechanism and not using the NT Synchronization
+# service.
+#pam_password crypt
+
+# Remove old password first, then update in
+# cleartext. Necessary for use with Novell
+# Directory Services (NDS)
+#pam_password nds
+
+# RACF is an alias for the above. For use with
+# IBM RACF
+#pam_password racf
+
+# Update Active Directory password, by
+# creating Unicode password and updating
+# unicodePwd attribute.
+#pam_password ad
+
+# Use the OpenLDAP password change
+# extended operation to update the password.
+#pam_password exop
+
+# Redirect users to a URL or somesuch on password
+# changes.
+#pam_password_prohibit_message Please visit http://internal to change your 
password.
+
+# Use backlinks for answering initgroups()
+#nss_initgroups backlink
+
+# Enable support for RFC2307bis (distinguished names in group
+# members)
+#nss_schema rfc2307bis
+
+# RFC2307bis naming contexts
+# Syntax:
+# nss_base_XXX         base?scope?filter
+# where scope is {base,one,sub}
+# and filter is a filter to be &'d with the
+# default filter.
+# You can omit the suffix eg:
+# nss_base_passwd      ou=People,
+# to append the default base DN but this
+# may incur a small performance impact.
+#nss_base_passwd       ou=People,dc=padl,dc=com?one
+#nss_base_shadow       ou=People,dc=padl,dc=com?one
+#nss_base_group                ou=Group,dc=padl,dc=com?one
+#nss_base_hosts                ou=Hosts,dc=padl,dc=com?one
+#nss_base_services     ou=Services,dc=padl,dc=com?one
+#nss_base_networks     ou=Networks,dc=padl,dc=com?one
+#nss_base_protocols    ou=Protocols,dc=padl,dc=com?one
+#nss_base_rpc          ou=Rpc,dc=padl,dc=com?one
+#nss_base_ethers       ou=Ethers,dc=padl,dc=com?one
+#nss_base_netmasks     ou=Networks,dc=padl,dc=com?ne
+#nss_base_bootparams   ou=Ethers,dc=padl,dc=com?one
+#nss_base_aliases      ou=Aliases,dc=padl,dc=com?one
+#nss_base_netgroup     ou=Netgroup,dc=padl,dc=com?one
+
+# attribute/objectclass mapping
+# Syntax:
+#nss_map_attribute     rfc2307attribute        mapped_attribute
+#nss_map_objectclass   rfc2307objectclass      mapped_objectclass
+
+# configure --enable-nds is no longer supported.
+# NDS mappings
+#nss_map_attribute uniqueMember member
+
+# Services for UNIX 3.5 mappings
+#nss_map_objectclass posixAccount User
+#nss_map_objectclass shadowAccount User
+#nss_map_attribute uid msSFU30Name
+#nss_map_attribute uniqueMember msSFU30PosixMember
+#nss_map_attribute userPassword msSFU30Password
+#nss_map_attribute homeDirectory msSFU30HomeDirectory
+#nss_map_attribute homeDirectory msSFUHomeDirectory
+#nss_map_objectclass posixGroup Group
+#pam_login_attribute msSFU30Name
+#pam_filter objectclass=User
+#pam_password ad
+
+# configure --enable-mssfu-schema is no longer supported.
+# Services for UNIX 2.0 mappings
+#nss_map_objectclass posixAccount User
+#nss_map_objectclass shadowAccount user
+#nss_map_attribute uid msSFUName
+#nss_map_attribute uniqueMember posixMember
+#nss_map_attribute userPassword msSFUPassword
+#nss_map_attribute homeDirectory msSFUHomeDirectory
+#nss_map_attribute shadowLastChange pwdLastSet
+#nss_map_objectclass posixGroup Group
+#nss_map_attribute cn msSFUName
+#pam_login_attribute msSFUName
+#pam_filter objectclass=User
+#pam_password ad
+
+# RFC 2307 (AD) mappings
+#nss_map_objectclass posixAccount user
+#nss_map_objectclass shadowAccount user
+#nss_map_attribute uid sAMAccountName
+#nss_map_attribute homeDirectory unixHomeDirectory
+#nss_map_attribute shadowLastChange pwdLastSet
+#nss_map_objectclass posixGroup group
+#nss_map_attribute uniqueMember member
+#pam_login_attribute sAMAccountName
+#pam_filter objectclass=User
+#pam_password ad
+
+# configure --enable-authpassword is no longer supported
+# AuthPassword mappings
+#nss_map_attribute userPassword authPassword
+
+# AIX SecureWay mappings
+#nss_map_objectclass posixAccount aixAccount
+#nss_base_passwd ou=aixaccount,?one
+#nss_map_attribute uid userName
+#nss_map_attribute gidNumber gid
+#nss_map_attribute uidNumber uid
+#nss_map_attribute userPassword passwordChar
+#nss_map_objectclass posixGroup aixAccessGroup
+#nss_base_group ou=aixgroup,?one
+#nss_map_attribute cn groupName
+#nss_map_attribute uniqueMember member
+#pam_login_attribute userName
+#pam_filter objectclass=aixAccount
+#pam_password clear
+
+# For pre-RFC2307bis automount schema
+#nss_map_objectclass automountMap nisMap
+#nss_map_attribute automountMapName nisMapName
+#nss_map_objectclass automount nisObject
+#nss_map_attribute automountKey cn
+#nss_map_attribute automountInformation nisMapEntry
+
+# Netscape SDK LDAPS
+#ssl on
+
+# Netscape SDK SSL options
+#sslpath /etc/ssl/certs
+
+# OpenLDAP SSL mechanism
+# start_tls mechanism uses the normal LDAP port, LDAPS typically 636
+#ssl start_tls
+ssl on
+
+# OpenLDAP SSL options
+# Require and verify server certificate (yes/no)
+# Default is to use libldap's default behavior, which can be configured in
+# /etc/openldap/ldap.conf using the TLS_REQCERT setting.  The default for
+# OpenLDAP 2.0 and earlier is "no", for 2.1 and later is "yes".
+tls_checkpeer no
+tls_reqcert demand
+
+# CA certificates for server certificate verification
+# At least one of these are required if tls_checkpeer is "yes"
+tls_cacertfile /etc/ldap/certificates/ca2019.grid5000.fr.cert
+tls_cacertdir /etc/ssl/certificates
+
+# Seed the PRNG if /dev/urandom is not provided
+#tls_randfile /var/run/egd-pool
+
+# SSL cipher suite
+# See man ciphers for syntax
+#tls_ciphers TLSv1
+
+# Client certificate and key
+# Use these, if your server requires client authentication.
+#tls_cert
+#tls_key
+
+# Disable SASL security layers. This is needed for AD.
+#sasl_secprops maxssf=0
+
+# Override the default Kerberos ticket cache location.
+#krb5_ccname FILE:/etc/.ldapcache
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/nscd.conf 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/nscd.conf
new file mode 100644
index 0000000..82b5755
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/nscd.conf
@@ -0,0 +1,81 @@
+#
+# /etc/nscd.conf
+#
+# An example Name Service Cache config file.  This file is needed by nscd.
+#
+# Legal entries are:
+#
+#      logfile                 <file>
+#      debug-level             <level>
+#      threads                 <initial #threads to use>
+#      max-threads             <maximum #threads to use>
+#      server-user             <user to run server as instead of root>
+#              server-user is ignored if nscd is started with -S parameters
+#       stat-user               <user who is allowed to request statistics>
+#      reload-count            unlimited|<number>
+#      paranoia                <yes|no>
+#      restart-interval        <time in seconds>
+#
+#       enable-cache           <service> <yes|no>
+#      positive-time-to-live   <service> <time in seconds>
+#      negative-time-to-live   <service> <time in seconds>
+#       suggested-size         <service> <prime number>
+#      check-files             <service> <yes|no>
+#      persistent              <service> <yes|no>
+#      shared                  <service> <yes|no>
+#      max-db-size             <service> <number bytes>
+#      auto-propagate          <service> <yes|no>
+#
+# Currently supported cache names (services): passwd, group, hosts, services
+#
+
+
+#      logfile                 /var/log/nscd.log
+#      threads                 6
+#      max-threads             128
+#      server-user             nobody
+#      stat-user               somebody
+       debug-level             0
+#      reload-count            5
+       paranoia                no
+#      restart-interval        3600
+
+       enable-cache            passwd          yes
+       positive-time-to-live   passwd          300
+       negative-time-to-live   passwd          20
+       suggested-size          passwd          211
+       check-files             passwd          no
+       persistent              passwd          no
+       shared                  passwd          yes
+       max-db-size             passwd          33554432
+       auto-propagate          passwd          yes
+
+       enable-cache            group           yes
+       positive-time-to-live   group           300
+       negative-time-to-live   group           60
+       suggested-size          group           211
+       check-files             group           no
+       persistent              group           no
+       shared                  group           yes
+       max-db-size             group           33554432
+       auto-propagate          group           yes
+
+# hosts caching is broken with gethostby* calls, hence is now disabled
+# per default.  See /usr/share/doc/nscd/NEWS.Debian.
+       enable-cache            hosts           no
+       positive-time-to-live   hosts           3600
+       negative-time-to-live   hosts           20
+       suggested-size          hosts           211
+       check-files             hosts           no
+       persistent              hosts           no
+       shared                  hosts           yes
+       max-db-size             hosts           33554432
+
+       enable-cache            services        yes
+       positive-time-to-live   services        28800
+       negative-time-to-live   services        20
+       suggested-size          services        211
+       check-files             services        no
+       persistent              services        no
+       shared                  services        yes
+       max-db-size             services        33554432
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/nslcd.conf 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/nslcd.conf
new file mode 100644
index 0000000..ff9443e
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/nslcd.conf
@@ -0,0 +1,36 @@
+# /etc/nslcd.conf
+# nslcd configuration file. See nslcd.conf(5)
+# for details.
+
+# The user and group nslcd should run as.
+uid nslcd
+gid nslcd
+
+# The location at which the LDAP server(s) should be reachable.
+uri ldaps://ldap/
+
+# The search base that will be used for all queries.
+base dc=grid5000,dc=fr
+
+# The LDAP protocol version to use.
+ldap_version 3
+
+# The DN to bind with for normal lookups.
+#binddn cn=annonymous,dc=example,dc=net
+#bindpw secret
+
+# The DN used for password modifications by root.
+#rootpwmoddn cn=admin,dc=example,dc=com
+
+# SSL options
+ssl on
+
+tls_cacertfile /etc/ldap/certificates/ca2019.grid5000.fr.cert
+tls_reqcert demand
+
+# The search scope.
+#scope sub
+
+# Specifies the period if inactivity (in seconds) after which the connection 
to the LDAP server will be closed. 
+# The default is not to time out connections. 
+idle_timelimit 30
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/nsswitch.conf 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/nsswitch.conf
new file mode 100644
index 0000000..71f836d
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/ldap/nsswitch.conf
@@ -0,0 +1,19 @@
+# /etc/nsswitch.conf
+#
+# Example configuration of GNU Name Service Switch functionality.
+# If you have the `glibc-doc-reference' and `info' packages installed, try:
+# `info libc "Name Service Switch"' for information about this file.
+
+passwd:         files ldap
+group:          files ldap
+shadow:         files ldap
+
+hosts:          files dns
+networks:       files
+
+protocols:      db files
+services:       db files
+ethers:         db files
+rpc:            db files
+
+netgroup:       nis
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/nfs/openiscsi/55-openiscsi.rules
 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/openiscsi/55-openiscsi.rules
new file mode 100644
index 0000000..a3fff48
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/openiscsi/55-openiscsi.rules
@@ -0,0 +1 @@
+KERNEL=="sd*", SUBSYSTEMS=="scsi", PROGRAM="/etc/udev/scripts/iscsidev.sh 
%b",SYMLINK+="iscsi/%c/part%n"
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/nfs/openiscsi/iscsidev.sh 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/openiscsi/iscsidev.sh
new file mode 100644
index 0000000..caa828a
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/nfs/openiscsi/iscsidev.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+BUS=${1}
+HOST=${BUS%%:*}
+
+[ -e /sys/class/iscsi_host ] || exit 1
+
+file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname"
+
+target_name=$(cat ${file})
+
+# This is not an open-scsi drive
+if [ -z "${target_name}" ]; then
+   exit 1
+fi
+
+echo "${target_name##*:}"
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-disk-manager-backend
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-disk-manager-backend
new file mode 100644
index 0000000..97192ea
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-disk-manager-backend
@@ -0,0 +1,284 @@
+#!/usr/bin/env ruby
+
+# INSTALLED BY PUPPET
+# Location : puppet/modules/env/files/std/g5k-manager/g5k-disk-manager-backend
+
+require 'open-uri'
+require 'json'
+require 'optparse'
+require_relative '../lib/g5k/g5k-manager.rb'
+
+DISABLE_DELAY = 2
+ENABLE_DELAY = 1
+ENABLE_LAST_DELAY = 2
+
+def main
+  options = parse_cmdline
+  start
+  if options[:on_boot]
+    startup_service(options[:force])
+  else
+    manage_disks(options)
+  end
+  close
+end
+
+def start
+  Dir.chdir(TMPDIR)
+end
+
+def close
+  rmtmp
+  exit 0
+end
+
+def parse_cmdline
+  options = {}
+  OptionParser.new do |opts|
+    opts.banner = 'Usage: g5k-disk-manager-backend [--enable 1,2,3] [--disable 
4,5]'
+    opts.on('--on-boot', 'Enable all disks at boot time') do |v|
+      options[:on_boot] = v
+    end
+    opts.on('--force', 'Force usage of --on-boot even if the node has been 
deployed by user') do |f|
+      options[:force] = f
+    end
+    opts.on('--enable DISK_IDS', 'Enable disks') do |disks|
+      options[:enable] = disks
+    end
+    opts.on('--disable DISK_IDS', 'Disable disks') do |disks|
+      options[:disable] = disks
+    end
+    opts.on('-h', '--help', 'Prints this help') do
+      puts opts
+      exit
+    end
+  end.parse!
+  options
+end
+
+# The aim of this function is to activate all disks of the node in a predefined
+# order, so that sdb, sdc, ... devices names are always given to the same
+# physical disks.
+# It must be done just before g5k-checks is launched on the node, to avoid
+# g5k-checks errors.
+# See also /etc/systemd/system/g5k-disk-manager-backend.service.
+def startup_service(force)
+  _status, hostname = sh('hostname')
+
+  unless reservable_disks?
+    notice "This cluster doesn't have reservable disks: exit service"
+    close
+  end
+
+  if user_deploy?(hostname) && !force
+    notice 'The environment is deployed manually by a user: the disks have not 
been activated'
+    close
+  end
+
+  unless megacli_compliant?
+    notice 'No compliant RAID controller was found: the disks have not been 
activated'
+    close
+  end
+
+  # Get the disks identifiers
+  physical_disks, virtual_disks = get_disks
+
+  # If there is one virtual drive: exit, to exclude RAID 0 and RAID 1 
configured
+  # clusters
+  num_virtual_drives = virtual_disks.count
+  debug "num_virtual_drives = #{num_virtual_drives}"
+  if num_virtual_drives >= 1
+    notice 'One virtual drive of RAID disks is present: the disks have not 
been activated'
+    close
+  end
+
+  # Remove the first disk from the list (first disk is the main disk sda)
+  physical_disks.shift
+
+  # Disable then enable the disks
+  disable(physical_disks)
+  num_enable_errors = enable(physical_disks)
+
+  if num_enable_errors.zero?
+    notice 'All disks have been activated with success'
+  else
+    error(1, "#{num_enable_errors} errors occured while enabling the disks")
+  end
+end
+
+def manage_disks(options)
+  error(2, 'No compliant RAID controller was found') unless megacli_compliant?
+
+  physical_disks, _virtual_disks = get_disks
+  disks_to_enable = disks_locations(physical_disks, options[:enable])
+  disks_to_disable = disks_locations(physical_disks, options[:disable])
+
+  # Array intersection
+  if (disks_to_enable & disks_to_disable) != []
+    error(3, 'You provided the same disk to enable and disable')
+  end
+
+  # First, we disable the disks (we will maybe re-enable them after)
+  unless disks_to_disable.empty?
+    num_disable_errors = disable(disks_to_disable)
+    error(1, "#{num_disable_errors} errors occured while disabling the disks") 
unless num_disable_errors.zero?
+  end
+  unless disks_to_enable.empty?
+    num_enable_errors = enable(disks_to_enable)
+    error(1, "#{num_enable_errors} errors occured while enabling the disks") 
unless num_enable_errors.zero?
+  end
+end
+
+def disks_locations(physical_disks, ids)
+  return [] if ids.nil?
+
+  ids = ids.split(',').map { |e| e.strip.to_i }
+  disks = []
+  ids.each do |id|
+    # id == 0 corresponds to the main disk sda
+    error(4, "Wrong disk id: #{id}") if id <= 0 || id >= physical_disks.length
+    disks.push(physical_disks[id])
+  end
+  disks
+end
+
+# Clusters with reservable disks are clusters whose
+# reference-repository storage_devices property contains property
+# reservation: true
+def reservable_disks?
+  ref_api = File.read('/etc/grid5000/ref-api.json')
+  JSON.parse(ref_api)['storage_devices'].select { |sd| sd['reservation'] }.any?
+end
+
+def megacli_compliant?
+  # Get the number or RAID controllers supported by megacli
+  # The return code of the command is the number of controllers supported
+  num_controllers, _output = sh("#{MEGACLI} -AdpCount")
+  num_controllers != 0
+end
+
+# This function retrieves the physical and virtual disk identifiers from
+# the output of the megacli command.
+# For both type of drives, the adapter is printed once on a single line
+# and then are printed out the drives who belong to this adapter.
+#
+# A physical drive output looks like:
+#
+# Enclosure Device ID: 8
+# Slot Number: 0
+# Enclosure position: 1
+# Device Id: 14
+# WWN: 5002538c40be7492
+# Sequence Number: 2
+# Media Error Count: 0
+# ... other lines
+#
+# A virtual one:
+#
+# Virtual Drive: 0 (Target Id: 0)
+# Name                :SYSTEM
+# RAID Level          : Primary-1, Secondary-0, RAID Level Qualifier-0
+# Size                : 558.375 GB
+# Sector Size         : 512
+# ... other lines
+#
+# The physical drives have to be sorted by the Device ID to match the way
+# Linux create the /dev/ devices special files (pci-scsi path order)
+def get_disks
+  status, output_pdlist = sh("#{MEGACLI} -PDList -aALL")
+  unless status.zero?
+    notice 'The command megacli failed to list physical drives'
+    close
+  end
+
+  status, output_vdlist = sh("#{MEGACLI} -LDInfo -Lall -aall")
+  unless status.zero?
+    notice 'The command megacli failed to list virtual drives'
+    close
+  end
+
+  physical_disks = []
+  virtual_disks = []
+
+  adapter_regexp = /^Adapter\s#?(\d+).*$/
+  enclosure_regexp = /^Enclosure\sDevice\sID:\s+(\d+)$/
+  slot_regexp = /^Slot\sNumber:\s+(\d+)$/
+  device_id_regexp = /^Device\sId:\s+(\d+)$/
+  virtual_drive_regexp = /^Virtual\sDrive:\s+(\d+).+$/
+
+  adapter = ''
+  enclosure = ''
+  slot = ''
+
+  output_pdlist.each_line do |line|
+    if (m = adapter_regexp.match(line))
+      adapter = m[1].to_i
+    elsif (m = enclosure_regexp.match(line))
+      enclosure = m[1].to_i
+    elsif (m = slot_regexp.match(line))
+      slot = m[1].to_i
+    elsif (m = device_id_regexp.match(line))
+      physical_disks << { adapter: adapter, enclosure: enclosure, slot: slot, 
device_id: m[1].to_i }
+    end
+
+    physical_disks.sort_by! { |p_disk| p_disk[:device_id] }
+  end
+
+  adapter = ''
+
+  output_vdlist.each_line do |line|
+    if (m = adapter_regexp.match(line))
+      adapter = m[1].to_i
+    elsif (m = virtual_drive_regexp.match(line))
+      virtual_disks << { adapter: adapter, drive: m[1].to_i }
+    end
+  end
+
+  [physical_disks, virtual_disks]
+end
+
+# Enable the disks
+# The megacli command changes the the state of the drive from Unconfigured Good
+# to JBOD (Just a Bunch of Disks).
+# A new drive in JBOD state is exposed to the host operating system as a
+# stand-alone drive. Drives in JBOD drive state are not part of the RAID
+# configuration.
+def enable(physical_disks)
+  num_enable_errors = 0
+  physical_disks.each do |disk|
+    # Sleep a bit before enabling to give the kernel time to detect disks that 
were
+    # previously removed, or disks that were just enabled.
+    # If we do that too fast, the kernel might pick up disks in a random order.
+    # See bug https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=9238 for 
details.
+    sleep(ENABLE_DELAY)
+    status, _output = sh("#{MEGACLI} -PDMakeJBOD -PhysDrv 
[#{disk[:enclosure]}:#{disk[:slot]}] -a#{disk[:adapter]}")
+    debug "Enabling disk #{disk} => Return code = #{status}"
+    num_enable_errors += 1 unless status.zero?
+  end
+  # Also sleep after enabling the last disk
+  sleep(ENABLE_LAST_DELAY)
+  num_enable_errors
+end
+
+# Disable the disks
+# The megacli command changes the state of the drive from JBOD to
+# Unconfigured Good. When in Unconfigured Good state, the disk is accessible
+# to the RAID controller but not configured as a part of a virtual disk
+# or as a hot spare.
+def disable(physical_disks)
+  num_disable_errors = 0
+  physical_disks.each do |disk|
+    status, _output = sh("#{MEGACLI} -PDMakeGood -PhysDrv 
[#{disk[:enclosure]}:#{disk[:slot]}] -force -a#{disk[:adapter]}")
+    debug "Disabling disk #{disk} => Return code = #{status}"
+    num_disable_errors += 1 unless status.zero?
+  end
+  sleep(DISABLE_DELAY)
+  num_disable_errors
+end
+
+# Main program
+
+MEGACLI = '/usr/sbin/megacli'.freeze
+
+_status, TMPDIR = sh('mktemp -d /tmp/tmp.g5k-disk-manager-backend.XXXXXX')
+main
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-disk-manager-backend.service
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-disk-manager-backend.service
new file mode 100644
index 0000000..1172832
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-disk-manager-backend.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=activation of all disks before g5k-checks is launched
+After=network-online.target
+Before=oar-node.service
+
+[Service]
+Type=oneshot
+ExecStart=/usr/local/libexec/g5k-disk-manager-backend --on-boot
+
+[Install]
+WantedBy=multi-user.target
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-pmem-manager
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-pmem-manager
new file mode 100644
index 0000000..b9c12ed
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-pmem-manager
@@ -0,0 +1,115 @@
+#!/usr/bin/env ruby
+# coding: utf-8
+
+# INSTALLED BY PUPPET
+# Location : puppet/modules/env/files/std/g5k-manager/g5k-pmem-manager
+
+require 'open-uri'
+require 'json'
+require 'optparse'
+require 'fileutils'
+require_relative '../lib/g5k/g5k-manager.rb'
+
+$reboot_lock_dir  = '/var/lib/g5k-pmem-manager'
+$reboot_lock_file = File.join($reboot_lock_dir, 'run.lock')
+
+def main
+  hostname = `hostname`.chomp
+
+  unless have_pmem?
+    notice('No PMEM in this system, nothing to do')
+    exit
+  end
+
+  if user_deploy?(hostname)
+    notice('The environment is deployed by a user: PMEM configuration left 
unchanged')
+    exit
+  end
+
+  if rebooted?
+    if !defined_regions? && !defined_namespaces? && !defined_goals?
+      notice('PMEM switch to Memory Mode is effective after reboot')
+      FileUtils.rm_f($reboot_lock_file)
+      exit
+    else
+      error(1, 'PMEM was not changed to Memory Mode after reboot, something 
went wrong')
+    end
+  end
+
+  if defined_goals?
+    error(1, 'Some PMEM configuration goals are defined. This is unexpected')
+  elsif defined_regions? || defined_namespaces?
+    notice('PMEM App Direct configuration found, removing and switching to 
full Memory Mode')
+    memory_mode
+  else
+    notice('The PMEM is in Memory Mode as expected')
+    exit
+  end
+end
+
+def have_pmem?
+  ref_api = JSON.parse(File.read('/etc/grid5000/ref-api.json'))
+  ref_api['main_memory'].key?('pmem_size')
+end
+
+def defined_regions?
+  status, stdout = sh('ipmctl show -region')
+  unless status.zero?
+    error(1, 'impctl failed to list regions')
+    return false
+  end
+
+  stdout.match(/There are no Regions defined/) ? false : true
+end
+
+def defined_namespaces?
+  status, stdout = sh('ndctl list')
+  unless status.zero?
+    error(1, 'ndctl failed to list namespaces')
+  end
+
+  stdout.empty? ? false : true
+end
+
+def defined_goals?
+  status, stdout = sh('ipmctl show -goal')
+  unless status.zero?
+    error(1, 'ipmctl failed to list goals')
+  end
+
+  stdout.match(/no goal configs defined in the system/) ? false : true
+end
+
+def rebooted?
+  File.exist?($reboot_lock_file)
+end
+
+def memory_mode
+  # ndctl destroy-namespace return code isn't 0 when there's no namespace,
+  # even with the --force-all argument
+  if defined_namespaces?
+    status, _stdout = sh('ndctl destroy-namespace --force all')
+    unless status.zero?
+      error(1, 'ndctl failed to destroy namespaces')
+    end
+  end
+
+  status, _stdout = sh('ipmctl create -force -goal MemoryMode=100')
+  unless status.zero?
+    error(1, 'ipmctl failed to provision Memory Mode')
+  end
+
+  begin
+    FileUtils.mkdir_p($reboot_lock_dir)
+    FileUtils.touch($reboot_lock_file)
+  rescue => e
+    error(1, "Unable to create #{$reboot_lock_file}, error: #{e}")
+  end
+
+  status, _stdout = sh('reboot')
+  unless status.zero?
+    error(1, 'System failed to reboot')
+  end
+end
+
+main
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-pmem-manager.service
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-pmem-manager.service
new file mode 100644
index 0000000..2bc296d
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/g5k-pmem-manager.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Check pmem configuration and put in memory mode
+After=network-online.target
+Before=oar-node.service
+
+[Service]
+Type=oneshot
+ExecStart=/usr/local/libexec/g5k-pmem-manager
+StandardOutput=journal+console
+
+[Install]
+WantedBy=multi-user.target
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/lib/g5k-manager.rb
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/lib/g5k-manager.rb
new file mode 100644
index 0000000..7569501
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k-manager/lib/g5k-manager.rb
@@ -0,0 +1,79 @@
+# coding: utf-8
+
+# INSTALLED BY PUPPET
+# Location : puppet/modules/env/files/std/g5k-manager/lib/g5k-manager.rb
+
+require 'open-uri'
+require 'json'
+require 'optparse'
+
+def sh(cmd)
+  output = `#{cmd}`.chomp
+  status = $?.exitstatus
+  return [status, output]
+end
+
+# systemd log levels:
+# see http://0pointer.net/blog/projects/journal-submit.html
+# and http://man7.org/linux/man-pages/man3/syslog.3.html
+def notice(msg)
+  log_notice = 5 # normal, but significant, condition
+  puts "<#{log_notice}> #{msg}"
+end
+
+def debug(msg)
+  log_debug = 7 # debug-level message
+  puts "<#{log_debug}> #{msg}" if DEBUG
+end
+
+def error(status, msg)
+  log_err = 3 # error conditions
+  puts "<#{log_err}> #{msg}"
+  rmtmp
+  exit status
+end
+
+def rmtmp
+  if defined?(TMPDIR)
+    Dir.chdir('/root')
+    sh("rm -rf #{TMPDIR}")
+  end
+end
+
+# If property 'soft'='free', the standard environment is being
+# deployed by an admin (outside a job) or phoenix.
+# Else, it is a user that is deploying the standard environment
+# For the different states, see:
+# https://github.com/grid5000/g5k-api/blob/master/lib/oar/resource.rb#L45
+def user_deploy?(hostname)
+  tries = 3
+  begin
+    url = G5K_API + '/sites/' + site(hostname) + 
'/status?disks=no&job_details=no&waiting=no&network_address=' + hostname
+    hash = JSON::parse(open(url, 'User-Agent' => 'g5k-manager (for disk and 
pmem)').read)
+  rescue
+    tries -= 1
+    if tries > 0
+      debug("Fetching #{url} failed. Sleeping 1s and retry.")
+      sleep(1)
+      retry
+    else
+      error(1, "Fetching #{url} failed too many times")
+    end
+  end
+
+  status = hash['nodes'][hostname]
+  debug("Node status: soft=#{status['soft']}, hard=#{status['hard']}")
+  user_deploy = (status['hard'] == 'alive' and status['soft'] != 'free')
+  return user_deploy
+end
+
+def cluster(hostname)
+  return hostname.split('-')[0]
+end
+
+def site(hostname)
+  return hostname.split('.')[1]
+end
+
+G5K_API = 'https://api.grid5000.fr/stable'
+DEBUG = true
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k_generator/g5k_generator
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k_generator/g5k_generator
new file mode 100644
index 0000000..675df66
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5k_generator/g5k_generator
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+#for tests
+DEST=/tmp
+
+# Standard generator setup, they take three arguments but allow none too.
+if [[ $# -gt 0 && $# -ne 3 ]]; then
+    echo "This program takes three or no arguments."
+    exit 0
+fi
+
+# See https://www.freedesktop.org/wiki/Software/systemd/Generators/
+# Using $2 to override generated services by systemd-sysv-generator in $3
+DEST="$2"
+
+##### Disable the Dell dataeng service if not a Dell Machine #####
+
+SMBIOS_UTIL="/usr/sbin/smbios-sys-info-lite"
+
+if [[ ! -x "$SMBIOS_UTIL" ]];
+then
+    echo "G5k systemd generator, ${SMBIOS_UTIL} cannot be executed" > /dev/kmsg
+    exit 0
+fi
+
+#Simple check, copied from Dell tools (CheckSystemType).
+"$SMBIOS_UTIL" | /bin/egrep "(Vendor|OEM String 1)" | /usr/bin/awk -F":" 
'{print $2}' | /bin/grep -qi "Dell"
+
+if [[ "$?" != "0" ]]; then
+    /bin/ln -sf "/dev/null" "${DEST}/dataeng.service"
+else
+    #Remove possibly existing symlink
+    /bin/rm -f "${DEST}/dataeng.service"
+fi
+
+##### End dataeng service override #####
+
+exit 0
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/g5kchecks/g5k-checks.conf
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5kchecks/g5k-checks.conf
new file mode 100644
index 0000000..32c7a76
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/g5kchecks/g5k-checks.conf
@@ -0,0 +1,39 @@
+# SET BY PUPPET
+# This is a yaml file for G5kchecks configuration 
+---
+
+# directory destination of g5kchecks output file in failure case
+output_dir: /var/lib/g5kchecks/
+
+#List of Strings/regexp to exclude from test list
+# It is of the form of the access path to an API property.
+# Example1: to skip the rate check of the eth0 network adapter:
+# network_adapters.eth0.rate
+# Example2: to skip everything about eth0:
+# network_adapters.eth0
+# Example2: to skip test matching a regexp:
+# network_adapters.ib\d+.mac
+removetestlist:
+ - "storage_devices..+.timeread"
+ - "storage_devices..+.timewrite"
+
+# check if directory bellow are mounted
+#mountpoint:
+#  - /export/home 
+#  - /dev/sda5 
+
+# type of input description (retrieve json from rest or file)
+retrieve_from: rest
+
+# in case of rest, should provide an url
+retrieve_url: https://api.grid5000.fr/3.0
+
+# specify a branch at the end of uri (syntax will be: ?branch=mybranch at the
+# end of uri)
+#branch: mybranch
+
+# Add another branch if the first url doesn't work
+fallback_branch: testing
+
+# in case of file, should provide a directory
+#retrieve_dir: /tmp/
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/lvm/lvm.conf 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/lvm/lvm.conf
new file mode 100644
index 0000000..db20b9b
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/std/lvm/lvm.conf
@@ -0,0 +1,2096 @@
+# This is an example configuration file for the LVM2 system.
+# It contains the default settings that would be used if there was no
+# /etc/lvm/lvm.conf file.
+#
+# Refer to 'man lvm.conf' for further information including the file layout.
+#
+# Refer to 'man lvm.conf' for information about how settings configured in
+# this file are combined with built-in values and command line options to
+# arrive at the final values used by LVM.
+#
+# Refer to 'man lvmconfig' for information about displaying the built-in
+# and configured values used by LVM.
+#
+# If a default value is set in this file (not commented out), then a
+# new version of LVM using this file will continue using that value,
+# even if the new version of LVM changes the built-in default value.
+#
+# To put this file in a different directory and override /etc/lvm set
+# the environment variable LVM_SYSTEM_DIR before running the tools.
+#
+# N.B. Take care that each setting only appears once if uncommenting
+# example settings in this file.
+
+
+# Configuration section config.
+# How LVM configuration settings are handled.
+config {
+
+       # Configuration option config/checks.
+       # If enabled, any LVM configuration mismatch is reported.
+       # This implies checking that the configuration key is understood by
+       # LVM and that the value of the key is the proper type. If disabled,
+       # any configuration mismatch is ignored and the default value is used
+       # without any warning (a message about the configuration key not being
+       # found is issued in verbose mode only).
+       checks = 1
+
+       # Configuration option config/abort_on_errors.
+       # Abort the LVM process if a configuration mismatch is found.
+       abort_on_errors = 0
+
+       # Configuration option config/profile_dir.
+       # Directory where LVM looks for configuration profiles.
+       profile_dir = "/etc/lvm/profile"
+}
+
+# Configuration section devices.
+# How LVM uses block devices.
+devices {
+
+       # Configuration option devices/dir.
+       # Directory in which to create volume group device nodes.
+       # Commands also accept this as a prefix on volume group names.
+       # This configuration option is advanced.
+       dir = "/dev"
+
+       # Configuration option devices/scan.
+       # Directories containing device nodes to use with LVM.
+       # This configuration option is advanced.
+       scan = [ "/dev" ]
+
+       # Configuration option devices/obtain_device_list_from_udev.
+       # Obtain the list of available devices from udev.
+       # This avoids opening or using any inapplicable non-block devices or
+       # subdirectories found in the udev directory. Any device node or
+       # symlink not managed by udev in the udev directory is ignored. This
+       # setting applies only to the udev-managed device directory; other
+       # directories will be scanned fully. LVM needs to be compiled with
+       # udev support for this setting to apply.
+       obtain_device_list_from_udev = 1
+
+       # Configuration option devices/external_device_info_source.
+       # Select an external device information source.
+       # Some information may already be available in the system and LVM can
+       # use this information to determine the exact type or use of devices it
+       # processes. Using an existing external device information source can
+       # speed up device processing as LVM does not need to run its own native
+       # routines to acquire this information. For example, this information
+       # is used to drive LVM filtering like MD component detection, multipath
+       # component detection, partition detection and others.
+       # 
+       # Accepted values:
+       #   none
+       #     No external device information source is used.
+       #   udev
+       #     Reuse existing udev database records. Applicable only if LVM is
+       #     compiled with udev support.
+       # 
+       external_device_info_source = "none"
+
+       # Configuration option devices/preferred_names.
+       # Select which path name to display for a block device.
+       # If multiple path names exist for a block device, and LVM needs to
+       # display a name for the device, the path names are matched against
+       # each item in this list of regular expressions. The first match is
+       # used. Try to avoid using undescriptive /dev/dm-N names, if present.
+       # If no preferred name matches, or if preferred_names are not defined,
+       # the following built-in preferences are applied in order until one
+       # produces a preferred name:
+       # Prefer names with path prefixes in the order of:
+       # /dev/mapper, /dev/disk, /dev/dm-*, /dev/block.
+       # Prefer the name with the least number of slashes.
+       # Prefer a name that is a symlink.
+       # Prefer the path with least value in lexicographical order.
+       # 
+       # Example
+       # preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", 
"^/dev/[hs]d" ]
+       # 
+       # This configuration option does not have a default value defined.
+
+       # Configuration option devices/filter.
+       # Limit the block devices that are used by LVM commands.
+       # This is a list of regular expressions used to accept or reject block
+       # device path names. Each regex is delimited by a vertical bar '|'
+       # (or any character) and is preceded by 'a' to accept the path, or
+       # by 'r' to reject the path. The first regex in the list to match the
+       # path is used, producing the 'a' or 'r' result for the device.
+       # When multiple path names exist for a block device, if any path name
+       # matches an 'a' pattern before an 'r' pattern, then the device is
+       # accepted. If all the path names match an 'r' pattern first, then the
+       # device is rejected. Unmatching path names do not affect the accept
+       # or reject decision. If no path names for a device match a pattern,
+       # then the device is accepted. Be careful mixing 'a' and 'r' patterns,
+       # as the combination might produce unexpected results (test changes.)
+       # Run vgscan after changing the filter to regenerate the cache.
+       # See the use_lvmetad comment for a special case regarding filters.
+       # 
+       # Example
+       # Accept every block device:
+       # filter = [ "a|.*/|" ]
+       # Reject the cdrom drive:
+       # filter = [ "r|/dev/cdrom|" ]
+       # Work with just loopback devices, e.g. for testing:
+       # filter = [ "a|loop|", "r|.*|" ]
+       # Accept all loop devices and ide drives except hdc:
+       # filter = [ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
+       # Use anchors to be very specific:
+       # filter = [ "a|^/dev/hda8$|", "r|.*/|" ]
+       # 
+       # This configuration option has an automatic default value.
+       # filter = [ "a|.*/|" ]
+global_filter = [ "r|.*/|" ]
+
+       # Configuration option devices/global_filter.
+       # Limit the block devices that are used by LVM system components.
+       # Because devices/filter may be overridden from the command line, it is
+       # not suitable for system-wide device filtering, e.g. udev and lvmetad.
+       # Use global_filter to hide devices from these LVM system components.
+       # The syntax is the same as devices/filter. Devices rejected by
+       # global_filter are not opened by LVM.
+       # This configuration option has an automatic default value.
+       # global_filter = [ "a|.*/|" ]
+
+       # Configuration option devices/cache_dir.
+       # Directory in which to store the device cache file.
+       # The results of filtering are cached on disk to avoid rescanning dud
+       # devices (which can take a very long time). By default this cache is
+       # stored in a file named .cache. It is safe to delete this file; the
+       # tools regenerate it. If obtain_device_list_from_udev is enabled, the
+       # list of devices is obtained from udev and any existing .cache file
+       # is removed.
+       cache_dir = "/run/lvm"
+
+       # Configuration option devices/cache_file_prefix.
+       # A prefix used before the .cache file name. See devices/cache_dir.
+       cache_file_prefix = ""
+
+       # Configuration option devices/write_cache_state.
+       # Enable/disable writing the cache file. See devices/cache_dir.
+       write_cache_state = 1
+
+       # Configuration option devices/types.
+       # List of additional acceptable block device types.
+       # These are of device type names from /proc/devices, followed by the
+       # maximum number of partitions.
+       # 
+       # Example
+       # types = [ "fd", 16 ]
+       # 
+       # This configuration option is advanced.
+       # This configuration option does not have a default value defined.
+
+       # Configuration option devices/sysfs_scan.
+       # Restrict device scanning to block devices appearing in sysfs.
+       # This is a quick way of filtering out block devices that are not
+       # present on the system. sysfs must be part of the kernel and mounted.)
+       sysfs_scan = 1
+
+       # Configuration option devices/multipath_component_detection.
+       # Ignore devices that are components of DM multipath devices.
+       multipath_component_detection = 1
+
+       # Configuration option devices/md_component_detection.
+       # Ignore devices that are components of software RAID (md) devices.
+       md_component_detection = 1
+
+       # Configuration option devices/fw_raid_component_detection.
+       # Ignore devices that are components of firmware RAID devices.
+       # LVM must use an external_device_info_source other than none for this
+       # detection to execute.
+       fw_raid_component_detection = 0
+
+       # Configuration option devices/md_chunk_alignment.
+       # Align PV data blocks with md device's stripe-width.
+       # This applies if a PV is placed directly on an md device.
+       md_chunk_alignment = 1
+
+       # Configuration option devices/default_data_alignment.
+       # Default alignment of the start of a PV data area in MB.
+       # If set to 0, a value of 64KiB will be used.
+       # Set to 1 for 1MiB, 2 for 2MiB, etc.
+       # This configuration option has an automatic default value.
+       # default_data_alignment = 1
+
+       # Configuration option devices/data_alignment_detection.
+       # Detect PV data alignment based on sysfs device information.
+       # The start of a PV data area will be a multiple of minimum_io_size or
+       # optimal_io_size exposed in sysfs. minimum_io_size is the smallest
+       # request the device can perform without incurring a read-modify-write
+       # penalty, e.g. MD chunk size. optimal_io_size is the device's
+       # preferred unit of receiving I/O, e.g. MD stripe width.
+       # minimum_io_size is used if optimal_io_size is undefined (0).
+       # If md_chunk_alignment is enabled, that detects the optimal_io_size.
+       # This setting takes precedence over md_chunk_alignment.
+       data_alignment_detection = 1
+
+       # Configuration option devices/data_alignment.
+       # Alignment of the start of a PV data area in KiB.
+       # If a PV is placed directly on an md device and md_chunk_alignment or
+       # data_alignment_detection are enabled, then this setting is ignored.
+       # Otherwise, md_chunk_alignment and data_alignment_detection are
+       # disabled if this is set. Set to 0 to use the default alignment or the
+       # page size, if larger.
+       data_alignment = 0
+
+       # Configuration option devices/data_alignment_offset_detection.
+       # Detect PV data alignment offset based on sysfs device information.
+       # The start of a PV aligned data area will be shifted by the
+       # alignment_offset exposed in sysfs. This offset is often 0, but may
+       # be non-zero. Certain 4KiB sector drives that compensate for windows
+       # partitioning will have an alignment_offset of 3584 bytes (sector 7
+       # is the lowest aligned logical block, the 4KiB sectors start at
+       # LBA -1, and consequently sector 63 is aligned on a 4KiB boundary).
+       # pvcreate --dataalignmentoffset will skip this detection.
+       data_alignment_offset_detection = 1
+
+       # Configuration option devices/ignore_suspended_devices.
+       # Ignore DM devices that have I/O suspended while scanning devices.
+       # Otherwise, LVM waits for a suspended device to become accessible.
+       # This should only be needed in recovery situations.
+       ignore_suspended_devices = 0
+
+       # Configuration option devices/ignore_lvm_mirrors.
+       # Do not scan 'mirror' LVs to avoid possible deadlocks.
+       # This avoids possible deadlocks when using the 'mirror' segment type.
+       # This setting determines whether LVs using the 'mirror' segment type
+       # are scanned for LVM labels. This affects the ability of mirrors to
+       # be used as physical volumes. If this setting is enabled, it is
+       # impossible to create VGs on top of mirror LVs, i.e. to stack VGs on
+       # mirror LVs. If this setting is disabled, allowing mirror LVs to be
+       # scanned, it may cause LVM processes and I/O to the mirror to become
+       # blocked. This is due to the way that the mirror segment type handles
+       # failures. In order for the hang to occur, an LVM command must be run
+       # just after a failure and before the automatic LVM repair process
+       # takes place, or there must be failures in multiple mirrors in the
+       # same VG at the same time with write failures occurring moments before
+       # a scan of the mirror's labels. The 'mirror' scanning problems do not
+       # apply to LVM RAID types like 'raid1' which handle failures in a
+       # different way, making them a better choice for VG stacking.
+       ignore_lvm_mirrors = 1
+
+       # Configuration option devices/disable_after_error_count.
+       # Number of I/O errors after which a device is skipped.
+       # During each LVM operation, errors received from each device are
+       # counted. If the counter of a device exceeds the limit set here,
+       # no further I/O is sent to that device for the remainder of the
+       # operation. Setting this to 0 disables the counters altogether.
+       disable_after_error_count = 0
+
+       # Configuration option devices/require_restorefile_with_uuid.
+       # Allow use of pvcreate --uuid without requiring --restorefile.
+       require_restorefile_with_uuid = 1
+
+       # Configuration option devices/pv_min_size.
+       # Minimum size in KiB of block devices which can be used as PVs.
+       # In a clustered environment all nodes must use the same value.
+       # Any value smaller than 512KiB is ignored. The previous built-in
+       # value was 512.
+       pv_min_size = 2048
+
+       # Configuration option devices/issue_discards.
+       # Issue discards to PVs that are no longer used by an LV.
+       # Discards are sent to an LV's underlying physical volumes when the LV
+       # is no longer using the physical volumes' space, e.g. lvremove,
+       # lvreduce. Discards inform the storage that a region is no longer
+       # used. Storage that supports discards advertise the protocol-specific
+       # way discards should be issued by the kernel (TRIM, UNMAP, or
+       # WRITE SAME with UNMAP bit set). Not all storage will support or
+       # benefit from discards, but SSDs and thinly provisioned LUNs
+       # generally do. If enabled, discards will only be issued if both the
+       # storage and kernel provide support.
+       issue_discards = 0
+
+       # Configuration option devices/allow_changes_with_duplicate_pvs.
+       # Allow VG modification while a PV appears on multiple devices.
+       # When a PV appears on multiple devices, LVM attempts to choose the
+       # best device to use for the PV. If the devices represent the same
+       # underlying storage, the choice has minimal consequence. If the
+       # devices represent different underlying storage, the wrong choice
+       # can result in data loss if the VG is modified. Disabling this
+       # setting is the safest option because it prevents modifying a VG
+       # or activating LVs in it while a PV appears on multiple devices.
+       # Enabling this setting allows the VG to be used as usual even with
+       # uncertain devices.
+       allow_changes_with_duplicate_pvs = 0
+}
+
+# Configuration section allocation.
+# How LVM selects space and applies properties to LVs.
+allocation {
+
+       # Configuration option allocation/cling_tag_list.
+       # Advise LVM which PVs to use when searching for new space.
+       # When searching for free space to extend an LV, the 'cling' allocation
+       # policy will choose space on the same PVs as the last segment of the
+       # existing LV. If there is insufficient space and a list of tags is
+       # defined here, it will check whether any of them are attached to the
+       # PVs concerned and then seek to match those PV tags between existing
+       # extents and new extents.
+       # 
+       # Example
+       # Use the special tag "@*" as a wildcard to match any PV tag:
+       # cling_tag_list = [ "@*" ]
+       # LVs are mirrored between two sites within a single VG, and
+       # PVs are tagged with either @site1 or @site2 to indicate where
+       # they are situated:
+       # cling_tag_list = [ "@site1", "@site2" ]
+       # 
+       # This configuration option does not have a default value defined.
+
+       # Configuration option allocation/maximise_cling.
+       # Use a previous allocation algorithm.
+       # Changes made in version 2.02.85 extended the reach of the 'cling'
+       # policies to detect more situations where data can be grouped onto
+       # the same disks. This setting can be used to disable the changes
+       # and revert to the previous algorithm.
+       maximise_cling = 1
+
+       # Configuration option allocation/use_blkid_wiping.
+       # Use blkid to detect existing signatures on new PVs and LVs.
+       # The blkid library can detect more signatures than the native LVM
+       # detection code, but may take longer. LVM needs to be compiled with
+       # blkid wiping support for this setting to apply. LVM native detection
+       # code is currently able to recognize: MD device signatures,
+       # swap signature, and LUKS signatures. To see the list of signatures
+       # recognized by blkid, check the output of the 'blkid -k' command.
+       use_blkid_wiping = 1
+
+       # Configuration option allocation/wipe_signatures_when_zeroing_new_lvs.
+       # Look for and erase any signatures while zeroing a new LV.
+       # The --wipesignatures option overrides this setting.
+       # Zeroing is controlled by the -Z/--zero option, and if not specified,
+       # zeroing is used by default if possible. Zeroing simply overwrites the
+       # first 4KiB of a new LV with zeroes and does no signature detection or
+       # wiping. Signature wiping goes beyond zeroing and detects exact types
+       # and positions of signatures within the whole LV. It provides a
+       # cleaner LV after creation as all known signatures are wiped. The LV
+       # is not claimed incorrectly by other tools because of old signatures
+       # from previous use. The number of signatures that LVM can detect
+       # depends on the detection code that is selected (see
+       # use_blkid_wiping.) Wiping each detected signature must be confirmed.
+       # When this setting is disabled, signatures on new LVs are not detected
+       # or erased unless the --wipesignatures option is used directly.
+       wipe_signatures_when_zeroing_new_lvs = 1
+
+       # Configuration option allocation/mirror_logs_require_separate_pvs.
+       # Mirror logs and images will always use different PVs.
+       # The default setting changed in version 2.02.85.
+       mirror_logs_require_separate_pvs = 0
+
+       # Configuration option allocation/raid_stripe_all_devices.
+       # Stripe across all PVs when RAID stripes are not specified.
+       # If enabled, all PVs in the VG or on the command line are used for 
raid0/4/5/6/10
+       # when the command does not specify the number of stripes to use.
+       # This was the default behaviour until release 2.02.162.
+       # This configuration option has an automatic default value.
+       # raid_stripe_all_devices = 0
+
+       # Configuration option 
allocation/cache_pool_metadata_require_separate_pvs.
+       # Cache pool metadata and data will always use different PVs.
+       cache_pool_metadata_require_separate_pvs = 0
+
+       # Configuration option allocation/cache_mode.
+       # The default cache mode used for new cache.
+       # 
+       # Accepted values:
+       #   writethrough
+       #     Data blocks are immediately written from the cache to disk.
+       #   writeback
+       #     Data blocks are written from the cache back to disk after some
+       #     delay to improve performance.
+       # 
+       # This setting replaces allocation/cache_pool_cachemode.
+       # This configuration option has an automatic default value.
+       # cache_mode = "writethrough"
+
+       # Configuration option allocation/cache_policy.
+       # The default cache policy used for new cache volume.
+       # Since kernel 4.2 the default policy is smq (Stochastic multique),
+       # otherwise the older mq (Multiqueue) policy is selected.
+       # This configuration option does not have a default value defined.
+
+       # Configuration section allocation/cache_settings.
+       # Settings for the cache policy.
+       # See documentation for individual cache policies for more info.
+       # This configuration section has an automatic default value.
+       # cache_settings {
+       # }
+
+       # Configuration option allocation/cache_pool_chunk_size.
+       # The minimal chunk size in KiB for cache pool volumes.
+       # Using a chunk_size that is too large can result in wasteful use of
+       # the cache, where small reads and writes can cause large sections of
+       # an LV to be mapped into the cache. However, choosing a chunk_size
+       # that is too small can result in more overhead trying to manage the
+       # numerous chunks that become mapped into the cache. The former is
+       # more of a problem than the latter in most cases, so the default is
+       # on the smaller end of the spectrum. Supported values range from
+       # 32KiB to 1GiB in multiples of 32.
+       # This configuration option does not have a default value defined.
+
+       # Configuration option allocation/cache_pool_max_chunks.
+       # The maximum number of chunks in a cache pool.
+       # For cache target v1.9 the recommended maximumm is 1000000 chunks.
+       # Using cache pool with more chunks may degrade cache performance.
+       # This configuration option does not have a default value defined.
+
+       # Configuration option 
allocation/thin_pool_metadata_require_separate_pvs.
+       # Thin pool metdata and data will always use different PVs.
+       thin_pool_metadata_require_separate_pvs = 0
+
+       # Configuration option allocation/thin_pool_zero.
+       # Thin pool data chunks are zeroed before they are first used.
+       # Zeroing with a larger thin pool chunk size reduces performance.
+       # This configuration option has an automatic default value.
+       # thin_pool_zero = 1
+
+       # Configuration option allocation/thin_pool_discards.
+       # The discards behaviour of thin pool volumes.
+       # 
+       # Accepted values:
+       #   ignore
+       #   nopassdown
+       #   passdown
+       # 
+       # This configuration option has an automatic default value.
+       # thin_pool_discards = "passdown"
+
+       # Configuration option allocation/thin_pool_chunk_size_policy.
+       # The chunk size calculation policy for thin pool volumes.
+       # 
+       # Accepted values:
+       #   generic
+       #     If thin_pool_chunk_size is defined, use it. Otherwise, calculate
+       #     the chunk size based on estimation and device hints exposed in
+       #     sysfs - the minimum_io_size. The chunk size is always at least
+       #     64KiB.
+       #   performance
+       #     If thin_pool_chunk_size is defined, use it. Otherwise, calculate
+       #     the chunk size for performance based on device hints exposed in
+       #     sysfs - the optimal_io_size. The chunk size is always at least
+       #     512KiB.
+       # 
+       # This configuration option has an automatic default value.
+       # thin_pool_chunk_size_policy = "generic"
+
+       # Configuration option allocation/thin_pool_chunk_size.
+       # The minimal chunk size in KiB for thin pool volumes.
+       # Larger chunk sizes may improve performance for plain thin volumes,
+       # however using them for snapshot volumes is less efficient, as it
+       # consumes more space and takes extra time for copying. When unset,
+       # lvm tries to estimate chunk size starting from 64KiB. Supported
+       # values are in the range 64KiB to 1GiB.
+       # This configuration option does not have a default value defined.
+
+       # Configuration option allocation/physical_extent_size.
+       # Default physical extent size in KiB to use for new VGs.
+       # This configuration option has an automatic default value.
+       # physical_extent_size = 4096
+}
+
+# Configuration section log.
+# How LVM log information is reported.
+log {
+
+       # Configuration option log/report_command_log.
+       # Enable or disable LVM log reporting.
+       # If enabled, LVM will collect a log of operations, messages,
+       # per-object return codes with object identification and associated
+       # error numbers (errnos) during LVM command processing. Then the
+       # log is either reported solely or in addition to any existing
+       # reports, depending on LVM command used. If it is a reporting command
+       # (e.g. pvs, vgs, lvs, lvm fullreport), then the log is reported in
+       # addition to any existing reports. Otherwise, there's only log report
+       # on output. For all applicable LVM commands, you can request that
+       # the output has only log report by using --logonly command line
+       # option. Use log/command_log_cols and log/command_log_sort settings
+       # to define fields to display and sort fields for the log report.
+       # You can also use log/command_log_selection to define selection
+       # criteria used each time the log is reported.
+       # This configuration option has an automatic default value.
+       # report_command_log = 0
+
+       # Configuration option log/command_log_sort.
+       # List of columns to sort by when reporting command log.
+       # See <lvm command> --logonly --configreport log -o help
+       # for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # command_log_sort = "log_seq_num"
+
+       # Configuration option log/command_log_cols.
+       # List of columns to report when reporting command log.
+       # See <lvm command> --logonly --configreport log -o help
+       # for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # command_log_cols = 
"log_seq_num,log_type,log_context,log_object_type,log_object_name,log_object_id,log_object_group,log_object_group_id,log_message,log_errno,log_ret_code"
+
+       # Configuration option log/command_log_selection.
+       # Selection criteria used when reporting command log.
+       # You can define selection criteria that are applied each
+       # time log is reported. This way, it is possible to control the
+       # amount of log that is displayed on output and you can select
+       # only parts of the log that are important for you. To define
+       # selection criteria, use fields from log report. See also
+       # <lvm command> --logonly --configreport log -S help for the
+       # list of possible fields and selection operators. You can also
+       # define selection criteria for log report on command line directly
+       # using <lvm command> --configreport log -S <selection criteria>
+       # which has precedence over log/command_log_selection setting.
+       # For more information about selection criteria in general, see
+       # lvm(8) man page.
+       # This configuration option has an automatic default value.
+       # command_log_selection = "!(log_type=status && message=success)"
+
+       # Configuration option log/verbose.
+       # Controls the messages sent to stdout or stderr.
+       verbose = 0
+
+       # Configuration option log/silent.
+       # Suppress all non-essential messages from stdout.
+       # This has the same effect as -qq. When enabled, the following commands
+       # still produce output: dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck,
+       # pvdisplay, pvs, version, vgcfgrestore -l, vgdisplay, vgs.
+       # Non-essential messages are shifted from log level 4 to log level 5
+       # for syslog and lvm2_log_fn purposes.
+       # Any 'yes' or 'no' questions not overridden by other arguments are
+       # suppressed and default to 'no'.
+       silent = 0
+
+       # Configuration option log/syslog.
+       # Send log messages through syslog.
+       syslog = 1
+
+       # Configuration option log/file.
+       # Write error and debug log messages to a file specified here.
+       # This configuration option does not have a default value defined.
+
+       # Configuration option log/overwrite.
+       # Overwrite the log file each time the program is run.
+       overwrite = 0
+
+       # Configuration option log/level.
+       # The level of log messages that are sent to the log file or syslog.
+       # There are 6 syslog-like log levels currently in use: 2 to 7 inclusive.
+       # 7 is the most verbose (LOG_DEBUG).
+       level = 0
+
+       # Configuration option log/indent.
+       # Indent messages according to their severity.
+       indent = 1
+
+       # Configuration option log/command_names.
+       # Display the command name on each line of output.
+       command_names = 0
+
+       # Configuration option log/prefix.
+       # A prefix to use before the log message text.
+       # (After the command name, if selected).
+       # Two spaces allows you to see/grep the severity of each message.
+       # To make the messages look similar to the original LVM tools use:
+       # indent = 0, command_names = 1, prefix = " -- "
+       prefix = "  "
+
+       # Configuration option log/activation.
+       # Log messages during activation.
+       # Don't use this in low memory situations (can deadlock).
+       activation = 0
+
+       # Configuration option log/debug_classes.
+       # Select log messages by class.
+       # Some debugging messages are assigned to a class and only appear in
+       # debug output if the class is listed here. Classes currently
+       # available: memory, devices, activation, allocation, lvmetad,
+       # metadata, cache, locking, lvmpolld. Use "all" to see everything.
+       debug_classes = [ "memory", "devices", "activation", "allocation", 
"lvmetad", "metadata", "cache", "locking", "lvmpolld", "dbus" ]
+}
+
+# Configuration section backup.
+# How LVM metadata is backed up and archived.
+# In LVM, a 'backup' is a copy of the metadata for the current system,
+# and an 'archive' contains old metadata configurations. They are
+# stored in a human readable text format.
+backup {
+
+       # Configuration option backup/backup.
+       # Maintain a backup of the current metadata configuration.
+       # Think very hard before turning this off!
+       backup = 1
+
+       # Configuration option backup/backup_dir.
+       # Location of the metadata backup files.
+       # Remember to back up this directory regularly!
+       backup_dir = "/etc/lvm/backup"
+
+       # Configuration option backup/archive.
+       # Maintain an archive of old metadata configurations.
+       # Think very hard before turning this off.
+       archive = 1
+
+       # Configuration option backup/archive_dir.
+       # Location of the metdata archive files.
+       # Remember to back up this directory regularly!
+       archive_dir = "/etc/lvm/archive"
+
+       # Configuration option backup/retain_min.
+       # Minimum number of archives to keep.
+       retain_min = 10
+
+       # Configuration option backup/retain_days.
+       # Minimum number of days to keep archive files.
+       retain_days = 30
+}
+
+# Configuration section shell.
+# Settings for running LVM in shell (readline) mode.
+shell {
+
+       # Configuration option shell/history_size.
+       # Number of lines of history to store in ~/.lvm_history.
+       history_size = 100
+}
+
+# Configuration section global.
+# Miscellaneous global LVM settings.
+global {
+
+       # Configuration option global/umask.
+       # The file creation mask for any files and directories created.
+       # Interpreted as octal if the first digit is zero.
+       umask = 077
+
+       # Configuration option global/test.
+       # No on-disk metadata changes will be made in test mode.
+       # Equivalent to having the -t option on every command.
+       test = 0
+
+       # Configuration option global/units.
+       # Default value for --units argument.
+       units = "h"
+
+       # Configuration option global/si_unit_consistency.
+       # Distinguish between powers of 1024 and 1000 bytes.
+       # The LVM commands distinguish between powers of 1024 bytes,
+       # e.g. KiB, MiB, GiB, and powers of 1000 bytes, e.g. KB, MB, GB.
+       # If scripts depend on the old behaviour, disable this setting
+       # temporarily until they are updated.
+       si_unit_consistency = 1
+
+       # Configuration option global/suffix.
+       # Display unit suffix for sizes.
+       # This setting has no effect if the units are in human-readable form
+       # (global/units = "h") in which case the suffix is always displayed.
+       suffix = 1
+
+       # Configuration option global/activation.
+       # Enable/disable communication with the kernel device-mapper.
+       # Disable to use the tools to manipulate LVM metadata without
+       # activating any logical volumes. If the device-mapper driver
+       # is not present in the kernel, disabling this should suppress
+       # the error messages.
+       activation = 1
+
+       # Configuration option global/fallback_to_lvm1.
+       # Try running LVM1 tools if LVM cannot communicate with DM.
+       # This option only applies to 2.4 kernels and is provided to help
+       # switch between device-mapper kernels and LVM1 kernels. The LVM1
+       # tools need to be installed with .lvm1 suffices, e.g. vgscan.lvm1.
+       # They will stop working once the lvm2 on-disk metadata format is used.
+       # This configuration option has an automatic default value.
+       # fallback_to_lvm1 = 0
+
+       # Configuration option global/format.
+       # The default metadata format that commands should use.
+       # The -M 1|2 option overrides this setting.
+       # 
+       # Accepted values:
+       #   lvm1
+       #   lvm2
+       # 
+       # This configuration option has an automatic default value.
+       # format = "lvm2"
+
+       # Configuration option global/format_libraries.
+       # Shared libraries that process different metadata formats.
+       # If support for LVM1 metadata was compiled as a shared library use
+       # format_libraries = "liblvm2format1.so"
+       # This configuration option does not have a default value defined.
+
+       # Configuration option global/segment_libraries.
+       # This configuration option does not have a default value defined.
+
+       # Configuration option global/proc.
+       # Location of proc filesystem.
+       # This configuration option is advanced.
+       proc = "/proc"
+
+       # Configuration option global/etc.
+       # Location of /etc system configuration directory.
+       etc = "/etc"
+
+       # Configuration option global/locking_type.
+       # Type of locking to use.
+       # 
+       # Accepted values:
+       #   0
+       #     Turns off locking. Warning: this risks metadata corruption if
+       #     commands run concurrently.
+       #   1
+       #     LVM uses local file-based locking, the standard mode.
+       #   2
+       #     LVM uses the external shared library locking_library.
+       #   3
+       #     LVM uses built-in clustered locking with clvmd.
+       #     This is incompatible with lvmetad. If use_lvmetad is enabled,
+       #     LVM prints a warning and disables lvmetad use.
+       #   4
+       #     LVM uses read-only locking which forbids any operations that
+       #     might change metadata.
+       #   5
+       #     Offers dummy locking for tools that do not need any locks.
+       #     You should not need to set this directly; the tools will select
+       #     when to use it instead of the configured locking_type.
+       #     Do not use lvmetad or the kernel device-mapper driver with this
+       #     locking type. It is used by the --readonly option that offers
+       #     read-only access to Volume Group metadata that cannot be locked
+       #     safely because it belongs to an inaccessible domain and might be
+       #     in use, for example a virtual machine image or a disk that is
+       #     shared by a clustered machine.
+       # 
+       locking_type = 1
+
+       # Configuration option global/wait_for_locks.
+       # When disabled, fail if a lock request would block.
+       wait_for_locks = 1
+
+       # Configuration option global/fallback_to_clustered_locking.
+       # Attempt to use built-in cluster locking if locking_type 2 fails.
+       # If using external locking (type 2) and initialisation fails, with
+       # this enabled, an attempt will be made to use the built-in clustered
+       # locking. Disable this if using a customised locking_library.
+       fallback_to_clustered_locking = 1
+
+       # Configuration option global/fallback_to_local_locking.
+       # Use locking_type 1 (local) if locking_type 2 or 3 fail.
+       # If an attempt to initialise type 2 or type 3 locking failed, perhaps
+       # because cluster components such as clvmd are not running, with this
+       # enabled, an attempt will be made to use local file-based locking
+       # (type 1). If this succeeds, only commands against local VGs will
+       # proceed. VGs marked as clustered will be ignored.
+       fallback_to_local_locking = 1
+
+       # Configuration option global/locking_dir.
+       # Directory to use for LVM command file locks.
+       # Local non-LV directory that holds file-based locks while commands are
+       # in progress. A directory like /tmp that may get wiped on reboot is OK.
+       locking_dir = "/run/lock/lvm"
+
+       # Configuration option global/prioritise_write_locks.
+       # Allow quicker VG write access during high volume read access.
+       # When there are competing read-only and read-write access requests for
+       # a volume group's metadata, instead of always granting the read-only
+       # requests immediately, delay them to allow the read-write requests to
+       # be serviced. Without this setting, write access may be stalled by a
+       # high volume of read-only requests. This option only affects
+       # locking_type 1 viz. local file-based locking.
+       prioritise_write_locks = 1
+
+       # Configuration option global/library_dir.
+       # Search this directory first for shared libraries.
+       # This configuration option does not have a default value defined.
+
+       # Configuration option global/locking_library.
+       # The external locking library to use for locking_type 2.
+       # This configuration option has an automatic default value.
+       # locking_library = "liblvm2clusterlock.so"
+
+       # Configuration option global/abort_on_internal_errors.
+       # Abort a command that encounters an internal error.
+       # Treat any internal errors as fatal errors, aborting the process that
+       # encountered the internal error. Please only enable for debugging.
+       abort_on_internal_errors = 0
+
+       # Configuration option global/detect_internal_vg_cache_corruption.
+       # Internal verification of VG structures.
+       # Check if CRC matches when a parsed VG is used multiple times. This
+       # is useful to catch unexpected changes to cached VG structures.
+       # Please only enable for debugging.
+       detect_internal_vg_cache_corruption = 0
+
+       # Configuration option global/metadata_read_only.
+       # No operations that change on-disk metadata are permitted.
+       # Additionally, read-only commands that encounter metadata in need of
+       # repair will still be allowed to proceed exactly as if the repair had
+       # been performed (except for the unchanged vg_seqno). Inappropriate
+       # use could mess up your system, so seek advice first!
+       metadata_read_only = 0
+
+       # Configuration option global/mirror_segtype_default.
+       # The segment type used by the short mirroring option -m.
+       # The --type mirror|raid1 option overrides this setting.
+       # 
+       # Accepted values:
+       #   mirror
+       #     The original RAID1 implementation from LVM/DM. It is
+       #     characterized by a flexible log solution (core, disk, mirrored),
+       #     and by the necessity to block I/O while handling a failure.
+       #     There is an inherent race in the dmeventd failure handling logic
+       #     with snapshots of devices using this type of RAID1 that in the
+       #     worst case could cause a deadlock. (Also see
+       #     devices/ignore_lvm_mirrors.)
+       #   raid1
+       #     This is a newer RAID1 implementation using the MD RAID1
+       #     personality through device-mapper. It is characterized by a
+       #     lack of log options. (A log is always allocated for every
+       #     device and they are placed on the same device as the image,
+       #     so no separate devices are required.) This mirror
+       #     implementation does not require I/O to be blocked while
+       #     handling a failure. This mirror implementation is not
+       #     cluster-aware and cannot be used in a shared (active/active)
+       #     fashion in a cluster.
+       # 
+       mirror_segtype_default = "raid1"
+
+       # Configuration option global/raid10_segtype_default.
+       # The segment type used by the -i -m combination.
+       # The --type raid10|mirror option overrides this setting.
+       # The --stripes/-i and --mirrors/-m options can both be specified
+       # during the creation of a logical volume to use both striping and
+       # mirroring for the LV. There are two different implementations.
+       # 
+       # Accepted values:
+       #   raid10
+       #     LVM uses MD's RAID10 personality through DM. This is the
+       #     preferred option.
+       #   mirror
+       #     LVM layers the 'mirror' and 'stripe' segment types. The layering
+       #     is done by creating a mirror LV on top of striped sub-LVs,
+       #     effectively creating a RAID 0+1 array. The layering is suboptimal
+       #     in terms of providing redundancy and performance.
+       # 
+       raid10_segtype_default = "raid10"
+
+       # Configuration option global/sparse_segtype_default.
+       # The segment type used by the -V -L combination.
+       # The --type snapshot|thin option overrides this setting.
+       # The combination of -V and -L options creates a sparse LV. There are
+       # two different implementations.
+       # 
+       # Accepted values:
+       #   snapshot
+       #     The original snapshot implementation from LVM/DM. It uses an old
+       #     snapshot that mixes data and metadata within a single COW
+       #     storage volume and performs poorly when the size of stored data
+       #     passes hundreds of MB.
+       #   thin
+       #     A newer implementation that uses thin provisioning. It has a
+       #     bigger minimal chunk size (64KiB) and uses a separate volume for
+       #     metadata. It has better performance, especially when more data
+       #     is used. It also supports full snapshots.
+       # 
+       sparse_segtype_default = "thin"
+
+       # Configuration option global/lvdisplay_shows_full_device_path.
+       # Enable this to reinstate the previous lvdisplay name format.
+       # The default format for displaying LV names in lvdisplay was changed
+       # in version 2.02.89 to show the LV name and path separately.
+       # Previously this was always shown as /dev/vgname/lvname even when that
+       # was never a valid path in the /dev filesystem.
+       # This configuration option has an automatic default value.
+       # lvdisplay_shows_full_device_path = 0
+
+       # Configuration option global/use_lvmetad.
+       # Use lvmetad to cache metadata and reduce disk scanning.
+       # When enabled (and running), lvmetad provides LVM commands with VG
+       # metadata and PV state. LVM commands then avoid reading this
+       # information from disks which can be slow. When disabled (or not
+       # running), LVM commands fall back to scanning disks to obtain VG
+       # metadata. lvmetad is kept updated via udev rules which must be set
+       # up for LVM to work correctly. (The udev rules should be installed
+       # by default.) Without a proper udev setup, changes in the system's
+       # block device configuration will be unknown to LVM, and ignored
+       # until a manual 'pvscan --cache' is run. If lvmetad was running
+       # while use_lvmetad was disabled, it must be stopped, use_lvmetad
+       # enabled, and then started. When using lvmetad, LV activation is
+       # switched to an automatic, event-based mode. In this mode, LVs are
+       # activated based on incoming udev events that inform lvmetad when
+       # PVs appear on the system. When a VG is complete (all PVs present),
+       # it is auto-activated. The auto_activation_volume_list setting
+       # controls which LVs are auto-activated (all by default.)
+       # When lvmetad is updated (automatically by udev events, or directly
+       # by pvscan --cache), devices/filter is ignored and all devices are
+       # scanned by default. lvmetad always keeps unfiltered information
+       # which is provided to LVM commands. Each LVM command then filters
+       # based on devices/filter. This does not apply to other, non-regexp,
+       # filtering settings: component filters such as multipath and MD
+       # are checked during pvscan --cache. To filter a device and prevent
+       # scanning from the LVM system entirely, including lvmetad, use
+       # devices/global_filter.
+       use_lvmetad = 1
+
+       # Configuration option global/lvmetad_update_wait_time.
+       # The number of seconds a command will wait for lvmetad update to 
finish.
+       # After waiting for this period, a command will not use lvmetad, and
+       # will revert to disk scanning.
+       # This configuration option has an automatic default value.
+       # lvmetad_update_wait_time = 10
+
+       # Configuration option global/use_lvmlockd.
+       # Use lvmlockd for locking among hosts using LVM on shared storage.
+       # Applicable only if LVM is compiled with lockd support in which
+       # case there is also lvmlockd(8) man page available for more
+       # information.
+       use_lvmlockd = 0
+
+       # Configuration option global/lvmlockd_lock_retries.
+       # Retry lvmlockd lock requests this many times.
+       # Applicable only if LVM is compiled with lockd support
+       # This configuration option has an automatic default value.
+       # lvmlockd_lock_retries = 3
+
+       # Configuration option global/sanlock_lv_extend.
+       # Size in MiB to extend the internal LV holding sanlock locks.
+       # The internal LV holds locks for each LV in the VG, and after enough
+       # LVs have been created, the internal LV needs to be extended. lvcreate
+       # will automatically extend the internal LV when needed by the amount
+       # specified here. Setting this to 0 disables the automatic extension
+       # and can cause lvcreate to fail. Applicable only if LVM is compiled
+       # with lockd support
+       # This configuration option has an automatic default value.
+       # sanlock_lv_extend = 256
+
+       # Configuration option global/thin_check_executable.
+       # The full path to the thin_check command.
+       # LVM uses this command to check that a thin metadata device is in a
+       # usable state. When a thin pool is activated and after it is
+       # deactivated, this command is run. Activation will only proceed if
+       # the command has an exit status of 0. Set to "" to skip this check.
+       # (Not recommended.) Also see thin_check_options.
+       # (See package device-mapper-persistent-data or thin-provisioning-tools)
+       # This configuration option has an automatic default value.
+       # thin_check_executable = "/usr/sbin/thin_check"
+
+       # Configuration option global/thin_dump_executable.
+       # The full path to the thin_dump command.
+       # LVM uses this command to dump thin pool metadata.
+       # (See package device-mapper-persistent-data or thin-provisioning-tools)
+       # This configuration option has an automatic default value.
+       # thin_dump_executable = "/usr/sbin/thin_dump"
+
+       # Configuration option global/thin_repair_executable.
+       # The full path to the thin_repair command.
+       # LVM uses this command to repair a thin metadata device if it is in
+       # an unusable state. Also see thin_repair_options.
+       # (See package device-mapper-persistent-data or thin-provisioning-tools)
+       # This configuration option has an automatic default value.
+       # thin_repair_executable = "/usr/sbin/thin_repair"
+
+       # Configuration option global/thin_check_options.
+       # List of options passed to the thin_check command.
+       # With thin_check version 2.1 or newer you can add the option
+       # --ignore-non-fatal-errors to let it pass through ignorable errors
+       # and fix them later. With thin_check version 3.2 or newer you should
+       # include the option --clear-needs-check-flag.
+       # This configuration option has an automatic default value.
+       # thin_check_options = [ "-q", "--clear-needs-check-flag" ]
+
+       # Configuration option global/thin_repair_options.
+       # List of options passed to the thin_repair command.
+       # This configuration option has an automatic default value.
+       # thin_repair_options = [ "" ]
+
+       # Configuration option global/thin_disabled_features.
+       # Features to not use in the thin driver.
+       # This can be helpful for testing, or to avoid using a feature that is
+       # causing problems. Features include: block_size, discards,
+       # discards_non_power_2, external_origin, metadata_resize,
+       # external_origin_extend, error_if_no_space.
+       # 
+       # Example
+       # thin_disabled_features = [ "discards", "block_size" ]
+       # 
+       # This configuration option does not have a default value defined.
+
+       # Configuration option global/cache_disabled_features.
+       # Features to not use in the cache driver.
+       # This can be helpful for testing, or to avoid using a feature that is
+       # causing problems. Features include: policy_mq, policy_smq.
+       # 
+       # Example
+       # cache_disabled_features = [ "policy_smq" ]
+       # 
+       # This configuration option does not have a default value defined.
+
+       # Configuration option global/cache_check_executable.
+       # The full path to the cache_check command.
+       # LVM uses this command to check that a cache metadata device is in a
+       # usable state. When a cached LV is activated and after it is
+       # deactivated, this command is run. Activation will only proceed if the
+       # command has an exit status of 0. Set to "" to skip this check.
+       # (Not recommended.) Also see cache_check_options.
+       # (See package device-mapper-persistent-data or thin-provisioning-tools)
+       # This configuration option has an automatic default value.
+       # cache_check_executable = "/usr/sbin/cache_check"
+
+       # Configuration option global/cache_dump_executable.
+       # The full path to the cache_dump command.
+       # LVM uses this command to dump cache pool metadata.
+       # (See package device-mapper-persistent-data or thin-provisioning-tools)
+       # This configuration option has an automatic default value.
+       # cache_dump_executable = "/usr/sbin/cache_dump"
+
+       # Configuration option global/cache_repair_executable.
+       # The full path to the cache_repair command.
+       # LVM uses this command to repair a cache metadata device if it is in
+       # an unusable state. Also see cache_repair_options.
+       # (See package device-mapper-persistent-data or thin-provisioning-tools)
+       # This configuration option has an automatic default value.
+       # cache_repair_executable = "/usr/sbin/cache_repair"
+
+       # Configuration option global/cache_check_options.
+       # List of options passed to the cache_check command.
+       # With cache_check version 5.0 or newer you should include the option
+       # --clear-needs-check-flag.
+       # This configuration option has an automatic default value.
+       # cache_check_options = [ "-q", "--clear-needs-check-flag" ]
+
+       # Configuration option global/cache_repair_options.
+       # List of options passed to the cache_repair command.
+       # This configuration option has an automatic default value.
+       # cache_repair_options = [ "" ]
+
+       # Configuration option global/system_id_source.
+       # The method LVM uses to set the local system ID.
+       # Volume Groups can also be given a system ID (by vgcreate, vgchange,
+       # or vgimport.) A VG on shared storage devices is accessible only to
+       # the host with a matching system ID. See 'man lvmsystemid' for
+       # information on limitations and correct usage.
+       # 
+       # Accepted values:
+       #   none
+       #     The host has no system ID.
+       #   lvmlocal
+       #     Obtain the system ID from the system_id setting in the 'local'
+       #     section of an lvm configuration file, e.g. lvmlocal.conf.
+       #   uname
+       #     Set the system ID from the hostname (uname) of the system.
+       #     System IDs beginning localhost are not permitted.
+       #   machineid
+       #     Use the contents of the machine-id file to set the system ID.
+       #     Some systems create this file at installation time.
+       #     See 'man machine-id' and global/etc.
+       #   file
+       #     Use the contents of another file (system_id_file) to set the
+       #     system ID.
+       # 
+       system_id_source = "none"
+
+       # Configuration option global/system_id_file.
+       # The full path to the file containing a system ID.
+       # This is used when system_id_source is set to 'file'.
+       # Comments starting with the character # are ignored.
+       # This configuration option does not have a default value defined.
+
+       # Configuration option global/use_lvmpolld.
+       # Use lvmpolld to supervise long running LVM commands.
+       # When enabled, control of long running LVM commands is transferred
+       # from the original LVM command to the lvmpolld daemon. This allows
+       # the operation to continue independent of the original LVM command.
+       # After lvmpolld takes over, the LVM command displays the progress
+       # of the ongoing operation. lvmpolld itself runs LVM commands to
+       # manage the progress of ongoing operations. lvmpolld can be used as
+       # a native systemd service, which allows it to be started on demand,
+       # and to use its own control group. When this option is disabled, LVM
+       # commands will supervise long running operations by forking themselves.
+       # Applicable only if LVM is compiled with lvmpolld support.
+       use_lvmpolld = 1
+
+       # Configuration option global/notify_dbus.
+       # Enable D-Bus notification from LVM commands.
+       # When enabled, an LVM command that changes PVs, changes VG metadata,
+       # or changes the activation state of an LV will send a notification.
+       notify_dbus = 1
+}
+
+# Configuration section activation.
+activation {
+
+       # Configuration option activation/checks.
+       # Perform internal checks of libdevmapper operations.
+       # Useful for debugging problems with activation. Some of the checks may
+       # be expensive, so it's best to use this only when there seems to be a
+       # problem.
+       checks = 0
+
+       # Configuration option activation/udev_sync.
+       # Use udev notifications to synchronize udev and LVM.
+       # The --nodevsync option overrides this setting.
+       # When disabled, LVM commands will not wait for notifications from
+       # udev, but continue irrespective of any possible udev processing in
+       # the background. Only use this if udev is not running or has rules
+       # that ignore the devices LVM creates. If enabled when udev is not
+       # running, and LVM processes are waiting for udev, run the command
+       # 'dmsetup udevcomplete_all' to wake them up.
+       udev_sync = 1
+
+       # Configuration option activation/udev_rules.
+       # Use udev rules to manage LV device nodes and symlinks.
+       # When disabled, LVM will manage the device nodes and symlinks for
+       # active LVs itself. Manual intervention may be required if this
+       # setting is changed while LVs are active.
+       udev_rules = 1
+
+       # Configuration option activation/verify_udev_operations.
+       # Use extra checks in LVM to verify udev operations.
+       # This enables additional checks (and if necessary, repairs) on entries
+       # in the device directory after udev has completed processing its
+       # events. Useful for diagnosing problems with LVM/udev interactions.
+       verify_udev_operations = 0
+
+       # Configuration option activation/retry_deactivation.
+       # Retry failed LV deactivation.
+       # If LV deactivation fails, LVM will retry for a few seconds before
+       # failing. This may happen because a process run from a quick udev rule
+       # temporarily opened the device.
+       retry_deactivation = 1
+
+       # Configuration option activation/missing_stripe_filler.
+       # Method to fill missing stripes when activating an incomplete LV.
+       # Using 'error' will make inaccessible parts of the device return I/O
+       # errors on access. You can instead use a device path, in which case,
+       # that device will be used in place of missing stripes. Using anything
+       # other than 'error' with mirrored or snapshotted volumes is likely to
+       # result in data corruption.
+       # This configuration option is advanced.
+       missing_stripe_filler = "error"
+
+       # Configuration option activation/use_linear_target.
+       # Use the linear target to optimize single stripe LVs.
+       # When disabled, the striped target is used. The linear target is an
+       # optimised version of the striped target that only handles a single
+       # stripe.
+       use_linear_target = 1
+
+       # Configuration option activation/reserved_stack.
+       # Stack size in KiB to reserve for use while devices are suspended.
+       # Insufficent reserve risks I/O deadlock during device suspension.
+       reserved_stack = 64
+
+       # Configuration option activation/reserved_memory.
+       # Memory size in KiB to reserve for use while devices are suspended.
+       # Insufficent reserve risks I/O deadlock during device suspension.
+       reserved_memory = 8192
+
+       # Configuration option activation/process_priority.
+       # Nice value used while devices are suspended.
+       # Use a high priority so that LVs are suspended
+       # for the shortest possible time.
+       process_priority = -18
+
+       # Configuration option activation/volume_list.
+       # Only LVs selected by this list are activated.
+       # If this list is defined, an LV is only activated if it matches an
+       # entry in this list. If this list is undefined, it imposes no limits
+       # on LV activation (all are allowed).
+       # 
+       # Accepted values:
+       #   vgname
+       #     The VG name is matched exactly and selects all LVs in the VG.
+       #   vgname/lvname
+       #     The VG name and LV name are matched exactly and selects the LV.
+       #   @tag
+       #     Selects an LV if the specified tag matches a tag set on the LV
+       #     or VG.
+       #   @*
+       #     Selects an LV if a tag defined on the host is also set on the LV
+       #     or VG. See tags/hosttags. If any host tags exist but volume_list
+       #     is not defined, a default single-entry list containing '@*'
+       #     is assumed.
+       # 
+       # Example
+       # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
+       # 
+       # This configuration option does not have a default value defined.
+
+       # Configuration option activation/auto_activation_volume_list.
+       # Only LVs selected by this list are auto-activated.
+       # This list works like volume_list, but it is used only by
+       # auto-activation commands. It does not apply to direct activation
+       # commands. If this list is defined, an LV is only auto-activated
+       # if it matches an entry in this list. If this list is undefined, it
+       # imposes no limits on LV auto-activation (all are allowed.) If this
+       # list is defined and empty, i.e. "[]", then no LVs are selected for
+       # auto-activation. An LV that is selected by this list for
+       # auto-activation, must also be selected by volume_list (if defined)
+       # before it is activated. Auto-activation is an activation command that
+       # includes the 'a' argument: --activate ay or -a ay. The 'a' (auto)
+       # argument for auto-activation is meant to be used by activation
+       # commands that are run automatically by the system, as opposed to LVM
+       # commands run directly by a user. A user may also use the 'a' flag
+       # directly to perform auto-activation. Also see pvscan(8) for more
+       # information about auto-activation.
+       # 
+       # Accepted values:
+       #   vgname
+       #     The VG name is matched exactly and selects all LVs in the VG.
+       #   vgname/lvname
+       #     The VG name and LV name are matched exactly and selects the LV.
+       #   @tag
+       #     Selects an LV if the specified tag matches a tag set on the LV
+       #     or VG.
+       #   @*
+       #     Selects an LV if a tag defined on the host is also set on the LV
+       #     or VG. See tags/hosttags. If any host tags exist but volume_list
+       #     is not defined, a default single-entry list containing '@*'
+       #     is assumed.
+       # 
+       # Example
+       # auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
+       # 
+       # This configuration option does not have a default value defined.
+
+       # Configuration option activation/read_only_volume_list.
+       # LVs in this list are activated in read-only mode.
+       # If this list is defined, each LV that is to be activated is checked
+       # against this list, and if it matches, it is activated in read-only
+       # mode. This overrides the permission setting stored in the metadata,
+       # e.g. from --permission rw.
+       # 
+       # Accepted values:
+       #   vgname
+       #     The VG name is matched exactly and selects all LVs in the VG.
+       #   vgname/lvname
+       #     The VG name and LV name are matched exactly and selects the LV.
+       #   @tag
+       #     Selects an LV if the specified tag matches a tag set on the LV
+       #     or VG.
+       #   @*
+       #     Selects an LV if a tag defined on the host is also set on the LV
+       #     or VG. See tags/hosttags. If any host tags exist but volume_list
+       #     is not defined, a default single-entry list containing '@*'
+       #     is assumed.
+       # 
+       # Example
+       # read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
+       # 
+       # This configuration option does not have a default value defined.
+
+       # Configuration option activation/raid_region_size.
+       # Size in KiB of each raid or mirror synchronization region.
+       # For raid or mirror segment types, this is the amount of data that is
+       # copied at once when initializing, or moved at once by pvmove.
+       raid_region_size = 512
+
+       # Configuration option activation/error_when_full.
+       # Return errors if a thin pool runs out of space.
+       # The --errorwhenfull option overrides this setting.
+       # When enabled, writes to thin LVs immediately return an error if the
+       # thin pool is out of data space. When disabled, writes to thin LVs
+       # are queued if the thin pool is out of space, and processed when the
+       # thin pool data space is extended. New thin pools are assigned the
+       # behavior defined here.
+       # This configuration option has an automatic default value.
+       # error_when_full = 0
+
+       # Configuration option activation/readahead.
+       # Setting to use when there is no readahead setting in metadata.
+       # 
+       # Accepted values:
+       #   none
+       #     Disable readahead.
+       #   auto
+       #     Use default value chosen by kernel.
+       # 
+       readahead = "auto"
+
+       # Configuration option activation/raid_fault_policy.
+       # Defines how a device failure in a RAID LV is handled.
+       # This includes LVs that have the following segment types:
+       # raid1, raid4, raid5*, and raid6*.
+       # If a device in the LV fails, the policy determines the steps
+       # performed by dmeventd automatically, and the steps perfomed by the
+       # manual command lvconvert --repair --use-policies.
+       # Automatic handling requires dmeventd to be monitoring the LV.
+       # 
+       # Accepted values:
+       #   warn
+       #     Use the system log to warn the user that a device in the RAID LV
+       #     has failed. It is left to the user to run lvconvert --repair
+       #     manually to remove or replace the failed device. As long as the
+       #     number of failed devices does not exceed the redundancy of the LV
+       #     (1 device for raid4/5, 2 for raid6), the LV will remain usable.
+       #   allocate
+       #     Attempt to use any extra physical volumes in the VG as spares and
+       #     replace faulty devices.
+       # 
+       raid_fault_policy = "warn"
+
+       # Configuration option activation/mirror_image_fault_policy.
+       # Defines how a device failure in a 'mirror' LV is handled.
+       # An LV with the 'mirror' segment type is composed of mirror images
+       # (copies) and a mirror log. A disk log ensures that a mirror LV does
+       # not need to be re-synced (all copies made the same) every time a
+       # machine reboots or crashes. If a device in the LV fails, this policy
+       # determines the steps perfomed by dmeventd automatically, and the steps
+       # performed by the manual command lvconvert --repair --use-policies.
+       # Automatic handling requires dmeventd to be monitoring the LV.
+       # 
+       # Accepted values:
+       #   remove
+       #     Simply remove the faulty device and run without it. If the log
+       #     device fails, the mirror would convert to using an in-memory log.
+       #     This means the mirror will not remember its sync status across
+       #     crashes/reboots and the entire mirror will be re-synced. If a
+       #     mirror image fails, the mirror will convert to a non-mirrored
+       #     device if there is only one remaining good copy.
+       #   allocate
+       #     Remove the faulty device and try to allocate space on a new
+       #     device to be a replacement for the failed device. Using this
+       #     policy for the log is fast and maintains the ability to remember
+       #     sync state through crashes/reboots. Using this policy for a
+       #     mirror device is slow, as it requires the mirror to resynchronize
+       #     the devices, but it will preserve the mirror characteristic of
+       #     the device. This policy acts like 'remove' if no suitable device
+       #     and space can be allocated for the replacement.
+       #   allocate_anywhere
+       #     Not yet implemented. Useful to place the log device temporarily
+       #     on the same physical volume as one of the mirror images. This
+       #     policy is not recommended for mirror devices since it would break
+       #     the redundant nature of the mirror. This policy acts like
+       #     'remove' if no suitable device and space can be allocated for the
+       #     replacement.
+       # 
+       mirror_image_fault_policy = "remove"
+
+       # Configuration option activation/mirror_log_fault_policy.
+       # Defines how a device failure in a 'mirror' log LV is handled.
+       # The mirror_image_fault_policy description for mirrored LVs also
+       # applies to mirrored log LVs.
+       mirror_log_fault_policy = "allocate"
+
+       # Configuration option activation/snapshot_autoextend_threshold.
+       # Auto-extend a snapshot when its usage exceeds this percent.
+       # Setting this to 100 disables automatic extension.
+       # The minimum value is 50 (a smaller value is treated as 50.)
+       # Also see snapshot_autoextend_percent.
+       # Automatic extension requires dmeventd to be monitoring the LV.
+       # 
+       # Example
+       # Using 70% autoextend threshold and 20% autoextend size, when a 1G
+       # snapshot exceeds 700M, it is extended to 1.2G, and when it exceeds
+       # 840M, it is extended to 1.44G:
+       # snapshot_autoextend_threshold = 70
+       # 
+       snapshot_autoextend_threshold = 100
+
+       # Configuration option activation/snapshot_autoextend_percent.
+       # Auto-extending a snapshot adds this percent extra space.
+       # The amount of additional space added to a snapshot is this
+       # percent of its current size.
+       # 
+       # Example
+       # Using 70% autoextend threshold and 20% autoextend size, when a 1G
+       # snapshot exceeds 700M, it is extended to 1.2G, and when it exceeds
+       # 840M, it is extended to 1.44G:
+       # snapshot_autoextend_percent = 20
+       # 
+       snapshot_autoextend_percent = 20
+
+       # Configuration option activation/thin_pool_autoextend_threshold.
+       # Auto-extend a thin pool when its usage exceeds this percent.
+       # Setting this to 100 disables automatic extension.
+       # The minimum value is 50 (a smaller value is treated as 50.)
+       # Also see thin_pool_autoextend_percent.
+       # Automatic extension requires dmeventd to be monitoring the LV.
+       # 
+       # Example
+       # Using 70% autoextend threshold and 20% autoextend size, when a 1G
+       # thin pool exceeds 700M, it is extended to 1.2G, and when it exceeds
+       # 840M, it is extended to 1.44G:
+       # thin_pool_autoextend_threshold = 70
+       # 
+       thin_pool_autoextend_threshold = 100
+
+       # Configuration option activation/thin_pool_autoextend_percent.
+       # Auto-extending a thin pool adds this percent extra space.
+       # The amount of additional space added to a thin pool is this
+       # percent of its current size.
+       # 
+       # Example
+       # Using 70% autoextend threshold and 20% autoextend size, when a 1G
+       # thin pool exceeds 700M, it is extended to 1.2G, and when it exceeds
+       # 840M, it is extended to 1.44G:
+       # thin_pool_autoextend_percent = 20
+       # 
+       thin_pool_autoextend_percent = 20
+
+       # Configuration option activation/mlock_filter.
+       # Do not mlock these memory areas.
+       # While activating devices, I/O to devices being (re)configured is
+       # suspended. As a precaution against deadlocks, LVM pins memory it is
+       # using so it is not paged out, and will not require I/O to reread.
+       # Groups of pages that are known not to be accessed during activation
+       # do not need to be pinned into memory. Each string listed in this
+       # setting is compared against each line in /proc/self/maps, and the
+       # pages corresponding to lines that match are not pinned. On some
+       # systems, locale-archive was found to make up over 80% of the memory
+       # used by the process.
+       # 
+       # Example
+       # mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" 
]
+       # 
+       # This configuration option is advanced.
+       # This configuration option does not have a default value defined.
+
+       # Configuration option activation/use_mlockall.
+       # Use the old behavior of mlockall to pin all memory.
+       # Prior to version 2.02.62, LVM used mlockall() to pin the whole
+       # process's memory while activating devices.
+       use_mlockall = 0
+
+       # Configuration option activation/monitoring.
+       # Monitor LVs that are activated.
+       # The --ignoremonitoring option overrides this setting.
+       # When enabled, LVM will ask dmeventd to monitor activated LVs.
+       monitoring = 1
+
+       # Configuration option activation/polling_interval.
+       # Check pvmove or lvconvert progress at this interval (seconds).
+       # When pvmove or lvconvert must wait for the kernel to finish
+       # synchronising or merging data, they check and report progress at
+       # intervals of this number of seconds. If this is set to 0 and there
+       # is only one thing to wait for, there are no progress reports, but
+       # the process is awoken immediately once the operation is complete.
+       polling_interval = 15
+
+       # Configuration option activation/auto_set_activation_skip.
+       # Set the activation skip flag on new thin snapshot LVs.
+       # The --setactivationskip option overrides this setting.
+       # An LV can have a persistent 'activation skip' flag. The flag causes
+       # the LV to be skipped during normal activation. The lvchange/vgchange
+       # -K option is required to activate LVs that have the activation skip
+       # flag set. When this setting is enabled, the activation skip flag is
+       # set on new thin snapshot LVs.
+       # This configuration option has an automatic default value.
+       # auto_set_activation_skip = 1
+
+       # Configuration option activation/activation_mode.
+       # How LVs with missing devices are activated.
+       # The --activationmode option overrides this setting.
+       # 
+       # Accepted values:
+       #   complete
+       #     Only allow activation of an LV if all of the Physical Volumes it
+       #     uses are present. Other PVs in the Volume Group may be missing.
+       #   degraded
+       #     Like complete, but additionally RAID LVs of segment type raid1,
+       #     raid4, raid5, radid6 and raid10 will be activated if there is no
+       #     data loss, i.e. they have sufficient redundancy to present the
+       #     entire addressable range of the Logical Volume.
+       #   partial
+       #     Allows the activation of any LV even if a missing or failed PV
+       #     could cause data loss with a portion of the LV inaccessible.
+       #     This setting should not normally be used, but may sometimes
+       #     assist with data recovery.
+       # 
+       activation_mode = "degraded"
+
+       # Configuration option activation/lock_start_list.
+       # Locking is started only for VGs selected by this list.
+       # The rules are the same as those for volume_list.
+       # This configuration option does not have a default value defined.
+
+       # Configuration option activation/auto_lock_start_list.
+       # Locking is auto-started only for VGs selected by this list.
+       # The rules are the same as those for auto_activation_volume_list.
+       # This configuration option does not have a default value defined.
+}
+
+# Configuration section metadata.
+# This configuration section has an automatic default value.
+# metadata {
+
+       # Configuration option metadata/check_pv_device_sizes.
+       # Check device sizes are not smaller than corresponding PV sizes.
+       # If device size is less than corresponding PV size found in metadata,
+       # there is always a risk of data loss. If this option is set, then LVM
+       # issues a warning message each time it finds that the device size is
+       # less than corresponding PV size. You should not disable this unless
+       # you are absolutely sure about what you are doing!
+       # This configuration option is advanced.
+       # This configuration option has an automatic default value.
+       # check_pv_device_sizes = 1
+
+       # Configuration option metadata/record_lvs_history.
+       # When enabled, LVM keeps history records about removed LVs in
+       # metadata. The information that is recorded in metadata for
+       # historical LVs is reduced when compared to original
+       # information kept in metadata for live LVs. Currently, this
+       # feature is supported for thin and thin snapshot LVs only.
+       # This configuration option has an automatic default value.
+       # record_lvs_history = 0
+
+       # Configuration option metadata/lvs_history_retention_time.
+       # Retention time in seconds after which a record about individual
+       # historical logical volume is automatically destroyed.
+       # A value of 0 disables this feature.
+       # This configuration option has an automatic default value.
+       # lvs_history_retention_time = 0
+
+       # Configuration option metadata/pvmetadatacopies.
+       # Number of copies of metadata to store on each PV.
+       # The --pvmetadatacopies option overrides this setting.
+       # 
+       # Accepted values:
+       #   2
+       #     Two copies of the VG metadata are stored on the PV, one at the
+       #     front of the PV, and one at the end.
+       #   1
+       #     One copy of VG metadata is stored at the front of the PV.
+       #   0
+       #     No copies of VG metadata are stored on the PV. This may be
+       #     useful for VGs containing large numbers of PVs.
+       # 
+       # This configuration option is advanced.
+       # This configuration option has an automatic default value.
+       # pvmetadatacopies = 1
+
+       # Configuration option metadata/vgmetadatacopies.
+       # Number of copies of metadata to maintain for each VG.
+       # The --vgmetadatacopies option overrides this setting.
+       # If set to a non-zero value, LVM automatically chooses which of the
+       # available metadata areas to use to achieve the requested number of
+       # copies of the VG metadata. If you set a value larger than the the
+       # total number of metadata areas available, then metadata is stored in
+       # them all. The value 0 (unmanaged) disables this automatic management
+       # and allows you to control which metadata areas are used at the
+       # individual PV level using pvchange --metadataignore y|n.
+       # This configuration option has an automatic default value.
+       # vgmetadatacopies = 0
+
+       # Configuration option metadata/pvmetadatasize.
+       # Approximate number of sectors to use for each metadata copy.
+       # VGs with large numbers of PVs or LVs, or VGs containing complex LV
+       # structures, may need additional space for VG metadata. The metadata
+       # areas are treated as circular buffers, so unused space becomes filled
+       # with an archive of the most recent previous versions of the metadata.
+       # This configuration option has an automatic default value.
+       # pvmetadatasize = 255
+
+       # Configuration option metadata/pvmetadataignore.
+       # Ignore metadata areas on a new PV.
+       # The --metadataignore option overrides this setting.
+       # If metadata areas on a PV are ignored, LVM will not store metadata
+       # in them.
+       # This configuration option is advanced.
+       # This configuration option has an automatic default value.
+       # pvmetadataignore = 0
+
+       # Configuration option metadata/stripesize.
+       # This configuration option is advanced.
+       # This configuration option has an automatic default value.
+       # stripesize = 64
+
+       # Configuration option metadata/dirs.
+       # Directories holding live copies of text format metadata.
+       # These directories must not be on logical volumes!
+       # It's possible to use LVM with a couple of directories here,
+       # preferably on different (non-LV) filesystems, and with no other
+       # on-disk metadata (pvmetadatacopies = 0). Or this can be in addition
+       # to on-disk metadata areas. The feature was originally added to
+       # simplify testing and is not supported under low memory situations -
+       # the machine could lock up. Never edit any files in these directories
+       # by hand unless you are absolutely sure you know what you are doing!
+       # Use the supplied toolset to make changes (e.g. vgcfgrestore).
+       # 
+       # Example
+       # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
+       # 
+       # This configuration option is advanced.
+       # This configuration option does not have a default value defined.
+# }
+
+# Configuration section report.
+# LVM report command output formatting.
+# This configuration section has an automatic default value.
+# report {
+
+       # Configuration option report/output_format.
+       # Format of LVM command's report output.
+       # If there is more than one report per command, then the format
+       # is applied for all reports. You can also change output format
+       # directly on command line using --reportformat option which
+       # has precedence over log/output_format setting.
+       # Accepted values:
+       #   basic
+       #     Original format with columns and rows. If there is more than
+       #     one report per command, each report is prefixed with report's
+       #     name for identification.
+       #   json
+       #     JSON format.
+       # This configuration option has an automatic default value.
+       # output_format = "basic"
+
+       # Configuration option report/compact_output.
+       # Do not print empty values for all report fields.
+       # If enabled, all fields that don't have a value set for any of the
+       # rows reported are skipped and not printed. Compact output is
+       # applicable only if report/buffered is enabled. If you need to
+       # compact only specified fields, use compact_output=0 and define
+       # report/compact_output_cols configuration setting instead.
+       # This configuration option has an automatic default value.
+       # compact_output = 0
+
+       # Configuration option report/compact_output_cols.
+       # Do not print empty values for specified report fields.
+       # If defined, specified fields that don't have a value set for any
+       # of the rows reported are skipped and not printed. Compact output
+       # is applicable only if report/buffered is enabled. If you need to
+       # compact all fields, use compact_output=1 instead in which case
+       # the compact_output_cols setting is then ignored.
+       # This configuration option has an automatic default value.
+       # compact_output_cols = ""
+
+       # Configuration option report/aligned.
+       # Align columns in report output.
+       # This configuration option has an automatic default value.
+       # aligned = 1
+
+       # Configuration option report/buffered.
+       # Buffer report output.
+       # When buffered reporting is used, the report's content is appended
+       # incrementally to include each object being reported until the report
+       # is flushed to output which normally happens at the end of command
+       # execution. Otherwise, if buffering is not used, each object is
+       # reported as soon as its processing is finished.
+       # This configuration option has an automatic default value.
+       # buffered = 1
+
+       # Configuration option report/headings.
+       # Show headings for columns on report.
+       # This configuration option has an automatic default value.
+       # headings = 1
+
+       # Configuration option report/separator.
+       # A separator to use on report after each field.
+       # This configuration option has an automatic default value.
+       # separator = " "
+
+       # Configuration option report/list_item_separator.
+       # A separator to use for list items when reported.
+       # This configuration option has an automatic default value.
+       # list_item_separator = ","
+
+       # Configuration option report/prefixes.
+       # Use a field name prefix for each field reported.
+       # This configuration option has an automatic default value.
+       # prefixes = 0
+
+       # Configuration option report/quoted.
+       # Quote field values when using field name prefixes.
+       # This configuration option has an automatic default value.
+       # quoted = 1
+
+       # Configuration option report/columns_as_rows.
+       # Output each column as a row.
+       # If set, this also implies report/prefixes=1.
+       # This configuration option has an automatic default value.
+       # columns_as_rows = 0
+
+       # Configuration option report/binary_values_as_numeric.
+       # Use binary values 0 or 1 instead of descriptive literal values.
+       # For columns that have exactly two valid values to report
+       # (not counting the 'unknown' value which denotes that the
+       # value could not be determined).
+       # This configuration option has an automatic default value.
+       # binary_values_as_numeric = 0
+
+       # Configuration option report/time_format.
+       # Set time format for fields reporting time values.
+       # Format specification is a string which may contain special character
+       # sequences and ordinary character sequences. Ordinary character
+       # sequences are copied verbatim. Each special character sequence is
+       # introduced by the '%' character and such sequence is then
+       # substituted with a value as described below.
+       # 
+       # Accepted values:
+       #   %a
+       #     The abbreviated name of the day of the week according to the
+       #     current locale.
+       #   %A
+       #     The full name of the day of the week according to the current
+       #     locale.
+       #   %b
+       #     The abbreviated month name according to the current locale.
+       #   %B
+       #     The full month name according to the current locale.
+       #   %c
+       #     The preferred date and time representation for the current
+       #     locale (alt E)
+       #   %C
+       #     The century number (year/100) as a 2-digit integer. (alt E)
+       #   %d
+       #     The day of the month as a decimal number (range 01 to 31).
+       #     (alt O)
+       #   %D
+       #     Equivalent to %m/%d/%y. (For Americans only. Americans should
+       #     note that in other countries%d/%m/%y is rather common. This
+       #     means that in international context this format is ambiguous and
+       #     should not be used.
+       #   %e
+       #     Like %d, the day of the month as a decimal number, but a leading
+       #     zero is replaced by a space. (alt O)
+       #   %E
+       #     Modifier: use alternative local-dependent representation if
+       #     available.
+       #   %F
+       #     Equivalent to %Y-%m-%d (the ISO 8601 date format).
+       #   %G
+       #     The ISO 8601 week-based year with century as adecimal number.
+       #     The 4-digit year corresponding to the ISO week number (see %V).
+       #     This has the same format and value as %Y, except that if the
+       #     ISO week number belongs to the previous or next year, that year
+       #     is used instead.
+       #   %g
+       #     Like %G, but without century, that is, with a 2-digit year
+       #     (00-99).
+       #   %h
+       #     Equivalent to %b.
+       #   %H
+       #     The hour as a decimal number using a 24-hour clock
+       #     (range 00 to 23). (alt O)
+       #   %I
+       #     The hour as a decimal number using a 12-hour clock
+       #     (range 01 to 12). (alt O)
+       #   %j
+       #     The day of the year as a decimal number (range 001 to 366).
+       #   %k
+       #     The hour (24-hour clock) as a decimal number (range 0 to 23);
+       #     single digits are preceded by a blank. (See also %H.)
+       #   %l
+       #     The hour (12-hour clock) as a decimal number (range 1 to 12);
+       #     single digits are preceded by a blank. (See also %I.)
+       #   %m
+       #     The month as a decimal number (range 01 to 12). (alt O)
+       #   %M
+       #     The minute as a decimal number (range 00 to 59). (alt O)
+       #   %O
+       #     Modifier: use alternative numeric symbols.
+       #   %p
+       #     Either "AM" or "PM" according to the given time value,
+       #     or the corresponding strings for the current locale. Noon is
+       #     treated as "PM" and midnight as "AM".
+       #   %P
+       #     Like %p but in lowercase: "am" or "pm" or a corresponding
+       #     string for the current locale.
+       #   %r
+       #     The time in a.m. or p.m. notation. In the POSIX locale this is
+       #     equivalent to %I:%M:%S %p.
+       #   %R
+       #     The time in 24-hour notation (%H:%M). For a version including
+       #     the seconds, see %T below.
+       #   %s
+       #     The number of seconds since the Epoch,
+       #     1970-01-01 00:00:00 +0000 (UTC)
+       #   %S
+       #     The second as a decimal number (range 00 to 60). (The range is
+       #     up to 60 to allow for occasional leap seconds.) (alt O)
+       #   %t
+       #     A tab character.
+       #   %T
+       #     The time in 24-hour notation (%H:%M:%S).
+       #   %u
+       #     The day of the week as a decimal, range 1 to 7, Monday being 1.
+       #     See also %w. (alt O)
+       #   %U
+       #     The week number of the current year as a decimal number,
+       #     range 00 to 53, starting with the first Sunday as the first
+       #     day of week 01. See also %V and %W. (alt O)
+       #   %V
+       #     The ISO 8601 week number of the current year as a decimal number,
+       #     range 01 to 53, where week 1 is the first week that has at least
+       #     4 days in the new year. See also %U and %W. (alt O)
+       #   %w
+       #     The day of the week as a decimal, range 0 to 6, Sunday being 0.
+       #     See also %u. (alt O)
+       #   %W
+       #     The week number of the current year as a decimal number,
+       #     range 00 to 53, starting with the first Monday as the first day
+       #     of week 01. (alt O)
+       #   %x
+       #     The preferred date representation for the current locale without
+       #     the time. (alt E)
+       #   %X
+       #     The preferred time representation for the current locale without
+       #     the date. (alt E)
+       #   %y
+       #     The year as a decimal number without a century (range 00 to 99).
+       #     (alt E, alt O)
+       #   %Y
+       #     The year as a decimal number including the century. (alt E)
+       #   %z
+       #     The +hhmm or -hhmm numeric timezone (that is, the hour and minute
+       #     offset from UTC).
+       #   %Z
+       #     The timezone name or abbreviation.
+       #   %%
+       #     A literal '%' character.
+       # 
+       # This configuration option has an automatic default value.
+       # time_format = "%Y-%m-%d %T %z"
+
+       # Configuration option report/devtypes_sort.
+       # List of columns to sort by when reporting 'lvm devtypes' command.
+       # See 'lvm devtypes -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # devtypes_sort = "devtype_name"
+
+       # Configuration option report/devtypes_cols.
+       # List of columns to report for 'lvm devtypes' command.
+       # See 'lvm devtypes -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # devtypes_cols = 
"devtype_name,devtype_max_partitions,devtype_description"
+
+       # Configuration option report/devtypes_cols_verbose.
+       # List of columns to report for 'lvm devtypes' command in verbose mode.
+       # See 'lvm devtypes -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # devtypes_cols_verbose = 
"devtype_name,devtype_max_partitions,devtype_description"
+
+       # Configuration option report/lvs_sort.
+       # List of columns to sort by when reporting 'lvs' command.
+       # See 'lvs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # lvs_sort = "vg_name,lv_name"
+
+       # Configuration option report/lvs_cols.
+       # List of columns to report for 'lvs' command.
+       # See 'lvs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # lvs_cols = 
"lv_name,vg_name,lv_attr,lv_size,pool_lv,origin,data_percent,metadata_percent,move_pv,mirror_log,copy_percent,convert_lv"
+
+       # Configuration option report/lvs_cols_verbose.
+       # List of columns to report for 'lvs' command in verbose mode.
+       # See 'lvs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # lvs_cols_verbose = 
"lv_name,vg_name,seg_count,lv_attr,lv_size,lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,pool_lv,origin,data_percent,metadata_percent,move_pv,copy_percent,mirror_log,convert_lv,lv_uuid,lv_profile"
+
+       # Configuration option report/vgs_sort.
+       # List of columns to sort by when reporting 'vgs' command.
+       # See 'vgs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # vgs_sort = "vg_name"
+
+       # Configuration option report/vgs_cols.
+       # List of columns to report for 'vgs' command.
+       # See 'vgs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # vgs_cols = 
"vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free"
+
+       # Configuration option report/vgs_cols_verbose.
+       # List of columns to report for 'vgs' command in verbose mode.
+       # See 'vgs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # vgs_cols_verbose = 
"vg_name,vg_attr,vg_extent_size,pv_count,lv_count,snap_count,vg_size,vg_free,vg_uuid,vg_profile"
+
+       # Configuration option report/pvs_sort.
+       # List of columns to sort by when reporting 'pvs' command.
+       # See 'pvs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # pvs_sort = "pv_name"
+
+       # Configuration option report/pvs_cols.
+       # List of columns to report for 'pvs' command.
+       # See 'pvs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # pvs_cols = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free"
+
+       # Configuration option report/pvs_cols_verbose.
+       # List of columns to report for 'pvs' command in verbose mode.
+       # See 'pvs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # pvs_cols_verbose = 
"pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,dev_size,pv_uuid"
+
+       # Configuration option report/segs_sort.
+       # List of columns to sort by when reporting 'lvs --segments' command.
+       # See 'lvs --segments -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # segs_sort = "vg_name,lv_name,seg_start"
+
+       # Configuration option report/segs_cols.
+       # List of columns to report for 'lvs --segments' command.
+       # See 'lvs --segments -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # segs_cols = "lv_name,vg_name,lv_attr,stripes,segtype,seg_size"
+
+       # Configuration option report/segs_cols_verbose.
+       # List of columns to report for 'lvs --segments' command in verbose 
mode.
+       # See 'lvs --segments -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # segs_cols_verbose = 
"lv_name,vg_name,lv_attr,seg_start,seg_size,stripes,segtype,stripesize,chunksize"
+
+       # Configuration option report/pvsegs_sort.
+       # List of columns to sort by when reporting 'pvs --segments' command.
+       # See 'pvs --segments -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # pvsegs_sort = "pv_name,pvseg_start"
+
+       # Configuration option report/pvsegs_cols.
+       # List of columns to sort by when reporting 'pvs --segments' command.
+       # See 'pvs --segments -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # pvsegs_cols = 
"pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size"
+
+       # Configuration option report/pvsegs_cols_verbose.
+       # List of columns to sort by when reporting 'pvs --segments' command in 
verbose mode.
+       # See 'pvs --segments -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # pvsegs_cols_verbose = 
"pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size,lv_name,seg_start_pe,segtype,seg_pe_ranges"
+
+       # Configuration option report/vgs_cols_full.
+       # List of columns to report for lvm fullreport's 'vgs' subreport.
+       # See 'vgs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # vgs_cols_full = "vg_all"
+
+       # Configuration option report/pvs_cols_full.
+       # List of columns to report for lvm fullreport's 'vgs' subreport.
+       # See 'pvs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # pvs_cols_full = "pv_all"
+
+       # Configuration option report/lvs_cols_full.
+       # List of columns to report for lvm fullreport's 'lvs' subreport.
+       # See 'lvs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # lvs_cols_full = "lv_all"
+
+       # Configuration option report/pvsegs_cols_full.
+       # List of columns to report for lvm fullreport's 'pvseg' subreport.
+       # See 'pvs --segments -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # pvsegs_cols_full = "pvseg_all,pv_uuid,lv_uuid"
+
+       # Configuration option report/segs_cols_full.
+       # List of columns to report for lvm fullreport's 'seg' subreport.
+       # See 'lvs --segments -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # segs_cols_full = "seg_all,lv_uuid"
+
+       # Configuration option report/vgs_sort_full.
+       # List of columns to sort by when reporting lvm fullreport's 'vgs' 
subreport.
+       # See 'vgs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # vgs_sort_full = "vg_name"
+
+       # Configuration option report/pvs_sort_full.
+       # List of columns to sort by when reporting lvm fullreport's 'vgs' 
subreport.
+       # See 'pvs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # pvs_sort_full = "pv_name"
+
+       # Configuration option report/lvs_sort_full.
+       # List of columns to sort by when reporting lvm fullreport's 'lvs' 
subreport.
+       # See 'lvs -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # lvs_sort_full = "vg_name,lv_name"
+
+       # Configuration option report/pvsegs_sort_full.
+       # List of columns to sort by when reporting for lvm fullreport's 
'pvseg' subreport.
+       # See 'pvs --segments -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # pvsegs_sort_full = "pv_uuid,pvseg_start"
+
+       # Configuration option report/segs_sort_full.
+       # List of columns to sort by when reporting lvm fullreport's 'seg' 
subreport.
+       # See 'lvs --segments -o help' for the list of possible fields.
+       # This configuration option has an automatic default value.
+       # segs_sort_full = "lv_uuid,seg_start"
+
+       # Configuration option report/mark_hidden_devices.
+       # Use brackets [] to mark hidden devices.
+       # This configuration option has an automatic default value.
+       # mark_hidden_devices = 1
+
+       # Configuration option report/two_word_unknown_device.
+       # Use the two words 'unknown device' in place of '[unknown]'.
+       # This is displayed when the device for a PV is not known.
+       # This configuration option has an automatic default value.
+       # two_word_unknown_device = 0
+# }
+
+# Configuration section dmeventd.
+# Settings for the LVM event daemon.
+dmeventd {
+
+       # Configuration option dmeventd/mirror_library.
+       # The library dmeventd uses when monitoring a mirror device.
+       # libdevmapper-event-lvm2mirror.so attempts to recover from
+       # failures. It removes failed devices from a volume group and
+       # reconfigures a mirror as necessary. If no mirror library is
+       # provided, mirrors are not monitored through dmeventd.
+       mirror_library = "libdevmapper-event-lvm2mirror.so"
+
+       # Configuration option dmeventd/raid_library.
+       # This configuration option has an automatic default value.
+       # raid_library = "libdevmapper-event-lvm2raid.so"
+
+       # Configuration option dmeventd/snapshot_library.
+       # The library dmeventd uses when monitoring a snapshot device.
+       # libdevmapper-event-lvm2snapshot.so monitors the filling of snapshots
+       # and emits a warning through syslog when the usage exceeds 80%. The
+       # warning is repeated when 85%, 90% and 95% of the snapshot is filled.
+       snapshot_library = "libdevmapper-event-lvm2snapshot.so"
+
+       # Configuration option dmeventd/thin_library.
+       # The library dmeventd uses when monitoring a thin device.
+       # libdevmapper-event-lvm2thin.so monitors the filling of a pool
+       # and emits a warning through syslog when the usage exceeds 80%. The
+       # warning is repeated when 85%, 90% and 95% of the pool is filled.
+       thin_library = "libdevmapper-event-lvm2thin.so"
+
+       # Configuration option dmeventd/executable.
+       # The full path to the dmeventd binary.
+       # This configuration option has an automatic default value.
+       # executable = "/sbin/dmeventd"
+}
+
+# Configuration section tags.
+# Host tag settings.
+# This configuration section has an automatic default value.
+# tags {
+
+       # Configuration option tags/hosttags.
+       # Create a host tag using the machine name.
+       # The machine name is nodename returned by uname(2).
+       # This configuration option has an automatic default value.
+       # hosttags = 0
+
+       # Configuration section tags/<tag>.
+       # Replace this subsection name with a custom tag name.
+       # Multiple subsections like this can be created. The '@' prefix for
+       # tags is optional. This subsection can contain host_list, which is a
+       # list of machine names. If the name of the local machine is found in
+       # host_list, then the name of this subsection is used as a tag and is
+       # applied to the local machine as a 'host tag'. If this subsection is
+       # empty (has no host_list), then the subsection name is always applied
+       # as a 'host tag'.
+       # 
+       # Example
+       # The host tag foo is given to all hosts, and the host tag
+       # bar is given to the hosts named machine1 and machine2.
+       # tags { foo { } bar { host_list = [ "machine1", "machine2" ] } }
+       # 
+       # This configuration section has variable name.
+       # This configuration section has an automatic default value.
+       # tag {
+
+               # Configuration option tags/<tag>/host_list.
+               # A list of machine names.
+               # These machine names are compared to the nodename returned
+               # by uname(2). If the local machine name matches an entry in
+               # this list, the name of the subsection is applied to the
+               # machine as a 'host tag'.
+               # This configuration option does not have a default value 
defined.
+       # }
+# }
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/iptables 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/iptables
new file mode 100644
index 0000000..9721f72
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/iptables
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+# MANAGED BY PUPPET
+# Module:: env::std::net_access
+#
+
+/sbin/iptables-restore <<EOF
+*filter
+
+:INPUT ACCEPT [0:0]
+:FORWARD ACCEPT [0:0]
+:OUTPUT ACCEPT [0:0]
+
+#Log outgoing traffic to NAT
+# ACCEPT even if it's the default policy : Avoid having these destinations in 
the logs
+-A OUTPUT -d 127.0.0.1 -j ACCEPT
+-A OUTPUT -d 172.16.0.0/12 -j ACCEPT
+-A OUTPUT -d 10.0.0.0/8 -j ACCEPT
+-A OUTPUT -d 192.168.4.0/24 -j ACCEPT
+-A OUTPUT -d 192.168.66.0/24 -j ACCEPT
+# Multicast traffic
+-A OUTPUT -d 224.0.0.0/4 -j ACCEPT
+
+# Rate-limit UDP logging to 10 pkt/s per destination IP
+# https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=12295
+-A OUTPUT -p udp -m hashlimit --hashlimit-name UDPG5K --hashlimit-rate-match 
--hashlimit-above 10/s --hashlimit-mode dstip -j ACCEPT
+
+# Log everything else : it's going outside g5k
+-A OUTPUT -m conntrack --ctstate NEW -j LOG --log-level 7 --log-uid 
--log-prefix "outgoing traffic "
+COMMIT
+EOF
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/iptables.stretch
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/iptables.stretch
new file mode 100644
index 0000000..ab5e59e
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/iptables.stretch
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+# MANAGED BY PUPPET
+# Module:: env::std::net_access
+#
+
+/sbin/iptables-restore <<EOF
+*filter
+
+:INPUT ACCEPT [0:0]
+:FORWARD ACCEPT [0:0]
+:OUTPUT ACCEPT [0:0]
+
+#Log outgoing traffic to NAT
+# ACCEPT even if it's the default policy : Avoid having these destinations in 
the logs
+-A OUTPUT -d 127.0.0.1 -j ACCEPT
+-A OUTPUT -d 172.16.0.0/12 -j ACCEPT
+-A OUTPUT -d 10.0.0.0/8 -j ACCEPT
+-A OUTPUT -d 192.168.4.0/24 -j ACCEPT
+-A OUTPUT -d 192.168.66.0/24 -j ACCEPT
+# Multicast traffic
+-A OUTPUT -d 224.0.0.0/4 -j ACCEPT
+
+# Log everything else : it's going outside g5k
+-A OUTPUT -m conntrack --ctstate NEW -j LOG --log-level 7 --log-uid 
--log-prefix "outgoing traffic "
+COMMIT
+EOF
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/rsyslog.conf
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/rsyslog.conf
new file mode 100644
index 0000000..7ccecda
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/rsyslog.conf
@@ -0,0 +1,113 @@
+# INSTALLED BY PUPPET
+# File : puppet:///modules/syslogg5k/frontend/rsyslog.conf
+#
+#  /etc/rsyslog.conf   Configuration file for rsyslog.
+#
+#                      For more information see
+#                      /usr/share/doc/rsyslog-doc/html/rsyslog_conf.html
+
+
+#################
+#### MODULES ####
+#################
+
+$ModLoad imuxsock # provides support for local system logging
+$ModLoad imklog   # provides kernel logging support
+#$ModLoad immark  # provides --MARK-- message capability
+
+# provides UDP syslog reception
+#$ModLoad imudp
+#$UDPServerRun 514
+
+# provides TCP syslog reception
+#$ModLoad imtcp
+#$InputTCPServerRun 514
+
+
+###########################
+#### GLOBAL DIRECTIVES ####
+###########################
+
+#
+# Use traditional timestamp format.
+# To enable high precision timestamps, comment out the following line.
+#
+$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
+
+#
+# Set the default permissions for all log files.
+#
+$FileOwner root
+$FileGroup adm
+$FileCreateMode 0640
+$DirCreateMode 0755
+$Umask 0022
+
+#
+# Where to place spool and state files
+#
+$WorkDirectory /var/spool/rsyslog
+
+#
+# Include all config files in /etc/rsyslog.d/
+#
+$IncludeConfig /etc/rsyslog.d/*.conf
+
+
+###############
+#### RULES ####
+###############
+#
+# First some standard log files.  Log by facility.
+#
+auth,authpriv.*                        /var/log/auth.log
+*.*;auth,authpriv.none         -/var/log/syslog
+#cron.*                                /var/log/cron.log
+daemon.*                       -/var/log/daemon.log
+kern.*                         -/var/log/kern.log
+lpr.*                          -/var/log/lpr.log
+mail.*                         -/var/log/mail.log
+user.*                         -/var/log/user.log
+
+#
+# Logging for the mail system.  Split it up so that
+# it is easy to write scripts to parse these files.
+#
+mail.info                      -/var/log/mail.info
+mail.warn                      -/var/log/mail.warn
+mail.err                       /var/log/mail.err
+
+#
+# Logging for INN news system.
+#
+news.crit                      /var/log/news/news.crit
+news.err                       /var/log/news/news.err
+news.notice                    -/var/log/news/news.notice
+
+#
+# Some "catch-all" log files.
+#
+*.=debug;\
+       auth,authpriv.none;\
+       news.none;mail.none     -/var/log/debug
+*.=info;*.=notice;*.=warn;\
+       auth,authpriv.none;\
+       cron,daemon.none;\
+       mail,news.none          -/var/log/messages
+
+#
+# Emergencies are sent to everybody logged in.
+#
+*.emerg                                :omusrmsg:*
+
+#
+# I like to have messages displayed on the console, but only on a virtual
+# console I usually leave idle.
+#
+#daemon,mail.*;\
+#      news.=crit;news.=err;news.=notice;\
+#      *.=debug;*.=info;\
+#      *.=notice;*.=warn       /dev/tty8
+
+# Redirect Phoenix log to syslog.rennes
+local7.* @syslog.rennes.grid5000.fr:514
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/syslog_iptables.conf
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/syslog_iptables.conf
new file mode 100644
index 0000000..3e4d28e
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/net_access/syslog_iptables.conf
@@ -0,0 +1,7 @@
+#Redirect iptables log to gwol syslog : 
http://www.rsyslog.com/doc/v8-stable/tutorials/reliable_forwarding.html -> 
Forwarding to More than One Server
+$ActionQueueType LinkedList # use asynchronous processing
+$ActionQueueFileName srvrfwd1 # set file name, also enables disk mode
+$ActionResumeRetryCount -1 # infinite retries on insert failure
+$ActionQueueSaveOnShutdown on # save in-memory data if rsyslog shuts down
+:msg, contains, "outgoing traffic " @@gwol-north.grid5000.fr:514
+:msg, contains, "outgoing traffic " @@gwol-south.grid5000.fr:514
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/nvidia_configure/nvidia-reset-mig
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/nvidia_configure/nvidia-reset-mig
new file mode 100644
index 0000000..e17ccbc
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/nvidia_configure/nvidia-reset-mig
@@ -0,0 +1,16 @@
+#!/bin/bash
+GPU=`/usr/bin/nvidia-smi --query-gpu=index --format=csv,noheader 2> /dev/null`
+if [ $? -eq 9 ] ; then
+        echo "`hostname` node don't have GPU"
+else
+        for i in $GPU
+        do 
+                mig=`/usr/bin/nvidia-smi -i $i -mig 0`
+                if [[ $mig =~ "Not Supported" ]]; then
+                        echo "GPU $i isn't compatible with MIG"
+                else
+                        echo "OK : Disabled MIG Mode for GPU $i"
+                fi
+        done
+fi
+exit 0
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/nvidia_configure/nvidia-reset-mig.service
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/nvidia_configure/nvidia-reset-mig.service
new file mode 100644
index 0000000..2742427
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/nvidia_configure/nvidia-reset-mig.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Reset MIG configuration on GPU nvidia A100
+Before=dcgm-exporter.service prometheus-node-exporter.service 
ganglia-monitor.service
+After=nvidia-smi.service
+[Service]
+Type=oneshot
+# Ignore the exit code: the command fails when no GPU is found or when GPU 
isn't A100
+ExecStart=-/usr/local/bin/nvidia-reset-mig
+[Install]
+WantedBy=multi-user.target
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/batch_job_bashrc 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/batch_job_bashrc
new file mode 100644
index 0000000..032fd99
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/batch_job_bashrc
@@ -0,0 +1,6 @@
+#
+# OAR bash environnement file for only the batch job users
+#
+
+source ~/.bashrc
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/default_oar-node 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/default_oar-node
new file mode 100644
index 0000000..b8a6fc0
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/default_oar-node
@@ -0,0 +1,53 @@
+# OARSERVER: machine where we remotely run oarnodesetting
+OARREMOTE="oar"
+
+# The paths to oarnodecheckquery and oarnodecheckrun (check your installation)
+OARNODECHECKQUERY=/usr/bin/oarnodecheckquery
+OARNODECHECKRUN=/usr/lib/oar/oarnodecheckrun
+# Home directory of user oar
+OARHOME=/var/lib/oar
+
+# retry settings
+MODSLEEP=20
+MINSLEEP=10
+MAXRETRY=180
+
+
+start_oar_node() {
+    test -n "$OARREMOTE" || exit 0
+    local retry=0
+    local sleep=0
+    local status=1
+    until [ $status -eq 0 ]; do
+      echo "oar-node: perform sanity checks"
+      $OARNODECHECKRUN
+      $OARNODECHECKQUERY
+      status=$?
+      [ $status -eq 0 ] && {
+        echo "oar-node: set the ressources of this node to Alive"
+        ssh -t -oStrictHostKeyChecking=no -oPasswordAuthentication=no -i 
$OARHOME/.ssh/oarnodesetting_ssh.key oar@$OARREMOTE
+        status=$?
+      }
+      [ $status -ne 0 ] && {
+        if [ $((retry+=sleep)) -gt $MAXRETRY ]; then
+          echo "oar-node: FAILED"
+          return 1
+        fi
+       local random=$RANDOM
+        # Workaround for the case where dash is the default shell: dash does
+        # not provide $RANDOM
+        if [ "x$random" = "x" ]; then
+            random=$(bash -c 'echo $RANDOM')
+        fi 
+        sleep=$(($random % $MODSLEEP + $MINSLEEP))
+        echo "oar-node: retrying in $sleep seconds..."
+        sleep $sleep
+      }
+    done
+    return 0
+}
+
+stop_oar_node() {
+    :
+}
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/default_oar-node_site
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/default_oar-node_site
new file mode 100644
index 0000000..273cf08
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/default_oar-node_site
@@ -0,0 +1,49 @@
+# OARSERVER: machine where we remotely run oarnodesetting
+OARREMOTE="frontend"
+
+# The paths to oarnodecheckquery and oarnodecheckrun (check your installation)
+OARNODECHECKQUERY=/usr/bin/oarnodecheckquery
+OARNODECHECKRUN=/usr/lib/oar/oarnodecheckrun
+# Home directory of user oar
+OARHOME=/var/lib/oar
+
+# retry settings
+MODSLEEP=20
+MINSLEEP=10
+MAXRETRY=180
+# Ungly glitch do use the good oar key.
+SITE=$( hostname | cut -d'.' -f2)
+
+
+start_oar_node() {
+    test -n "$OARREMOTE" || exit 0
+    local retry=0
+    local sleep=0
+    local status=1
+    until [ $status -eq 0 ]; do
+      echo "oar-node: perform sanity checks"
+      $OARNODECHECKRUN
+      $OARNODECHECKQUERY
+      status=$?
+      [ $status -eq 0 ] && {
+        echo "oar-node: set the ressources of this node to Alive"
+        ssh -t -oStrictHostKeyChecking=no -oPasswordAuthentication=no -i 
$OARHOME/.ssh/oarnodesetting_ssh_$SITE.key oar@$OARREMOTE -p 6667
+        status=$?
+      }
+      [ $status -ne 0 ] && {
+        if [ $((retry+=sleep)) -gt $MAXRETRY ]; then
+          echo "oar-node: FAILED"
+          return 1
+        fi
+        ((sleep = $RANDOM % $MODSLEEP + $MINSLEEP))
+        echo "oar-node: retrying in $sleep seconds..."
+        sleep $sleep
+      }
+    done
+    return 0
+}
+
+stop_oar_node() {
+    :
+}
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/etc/security/access.conf
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/etc/security/access.conf
new file mode 100644
index 0000000..d5a4ebb
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/etc/security/access.conf
@@ -0,0 +1,66 @@
+# Login access control table.
+# 
+# When someone logs in, the table is scanned for the first entry that
+# matches the (user, host) combination, or, in case of non-networked
+# logins, the first entry that matches the (user, tty) combination.  The
+# permissions field of that table entry determines whether the login will 
+# be accepted or refused.
+# 
+# Format of the login access control table is three fields separated by a
+# ":" character:
+#
+# [Note, if you supply a 'fieldsep=|' argument to the pam_access.so
+# module, you can change the field separation character to be
+# '|'. This is useful for configurations where you are trying to use
+# pam_access with X applications that provide PAM_TTY values that are
+# the display variable like "host:0".]
+# 
+#      permission : users : origins
+# 
+# The first field should be a "+" (access granted) or "-" (access denied)
+# character. 
+#
+# The second field should be a list of one or more login names, group
+# names, or ALL (always matches). A pattern of the form user@host is
+# matched when the login name matches the "user" part, and when the
+# "host" part matches the local machine name.
+#
+# The third field should be a list of one or more tty names (for
+# non-networked logins), host names, domain names (begin with "."), host
+# addresses, internet network numbers (end with "."), ALL (always
+# matches) or LOCAL (matches any string that does not contain a "."
+# character).
+#
+# If you run NIS you can use @netgroupname in host or user patterns; this
+# even works for @usergroup@@hostgroup patterns. Weird.
+#
+# The EXCEPT operator makes it possible to write very compact rules.
+#
+# The group file is searched only when a name does not match that of the
+# logged-in user. Both the user's primary group is matched, as well as
+# groups in which users are explicitly listed.
+#
+# TTY NAMES: Must be in the form returned by ttyname(3) less the initial
+# "/dev" (e.g. tty1 or vc/1)
+#
+##############################################################################
+# 
+# Disallow non-root logins on tty1
+#
+#-:ALL EXCEPT root:tty1
+# 
+# Disallow console logins to all but a few accounts.
+#
+#-:ALL EXCEPT wheel shutdown sync:LOCAL
+#
+# Disallow non-local logins to privileged accounts (group wheel).
+#
+#-:wheel:ALL EXCEPT LOCAL .win.tue.nl
+#
+# Some accounts are not allowed to login from anywhere:
+#
+#-:wsbscaro wsbsecr wsbspac wsbsym wscosor wstaiwde:ALL
+#
+# All other accounts are allowed to login from anywhere.
+#
++:ALL:LOCAL EXCEPT ttyS1
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/oar_sshclient_config
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/oar_sshclient_config
new file mode 100644
index 0000000..65ca659
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/oar_sshclient_config
@@ -0,0 +1,5 @@
+Host *
+    ForwardX11 no
+    StrictHostKeyChecking no
+    PasswordAuthentication no
+    AddressFamily inet
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/var/lib/oar/access.conf
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/var/lib/oar/access.conf
new file mode 100644
index 0000000..e367d23
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/oar/var/lib/oar/access.conf
@@ -0,0 +1,66 @@
+# Login access control table.
+# 
+# When someone logs in, the table is scanned for the first entry that
+# matches the (user, host) combination, or, in case of non-networked
+# logins, the first entry that matches the (user, tty) combination.  The
+# permissions field of that table entry determines whether the login will 
+# be accepted or refused.
+# 
+# Format of the login access control table is three fields separated by a
+# ":" character:
+#
+# [Note, if you supply a 'fieldsep=|' argument to the pam_access.so
+# module, you can change the field separation character to be
+# '|'. This is useful for configurations where you are trying to use
+# pam_access with X applications that provide PAM_TTY values that are
+# the display variable like "host:0".]
+# 
+#      permission : users : origins
+# 
+# The first field should be a "+" (access granted) or "-" (access denied)
+# character. 
+#
+# The second field should be a list of one or more login names, group
+# names, or ALL (always matches). A pattern of the form user@host is
+# matched when the login name matches the "user" part, and when the
+# "host" part matches the local machine name.
+#
+# The third field should be a list of one or more tty names (for
+# non-networked logins), host names, domain names (begin with "."), host
+# addresses, internet network numbers (end with "."), ALL (always
+# matches) or LOCAL (matches any string that does not contain a "."
+# character).
+#
+# If you run NIS you can use @netgroupname in host or user patterns; this
+# even works for @usergroup@@hostgroup patterns. Weird.
+#
+# The EXCEPT operator makes it possible to write very compact rules.
+#
+# The group file is searched only when a name does not match that of the
+# logged-in user. Both the user's primary group is matched, as well as
+# groups in which users are explicitly listed.
+#
+# TTY NAMES: Must be in the form returned by ttyname(3) less the initial
+# "/dev" (e.g. tty1 or vc/1)
+#
+##############################################################################
+# 
+# Disallow non-root logins on tty1
+#
+#-:ALL EXCEPT root:tty1
+# 
+# Disallow console logins to all but a few accounts.
+#
+#-:ALL EXCEPT wheel shutdown sync:LOCAL
+#
+# Disallow non-local logins to privileged accounts (group wheel).
+#
+#-:wheel:ALL EXCEPT LOCAL .win.tue.nl
+#
+# Some accounts are not allowed to login from anywhere:
+#
+#-:wsbscaro wsbsecr wsbspac wsbsym wscosor wstaiwde:ALL
+#
+# All other accounts are allowed to login from anywhere.
+#
+-:ALL:ALL
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/sudo-g5k/id_rsa_sudo-g5k
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/sudo-g5k/id_rsa_sudo-g5k
new file mode 100644
index 0000000..0086ebd
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/sudo-g5k/id_rsa_sudo-g5k
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAk+SSpC0tgjVcoagfCoBNhBktXuY1rWvv9nuTL6dM3mkcu6uP
+wch1n/KQHgq9+ibr/ZAvo0Mva8G181wMBpwMpuI0/jlvd5710of7aM/LEz5fI7GO
+AhU1Fn/WehgcNzFskEfBbEEZEzKV/lcGMYViMPLKQ22g1ADYhiff5U0B+Q7asDSY
+1MbbCEpDR6xDJMjgkhZL6BS67S2RJibYO1V7moQP9l5lKha5hz8B515m6nFAugvM
+En8xWPE39vjpNkHo78juboxeDz94qOGGFZjZUqodpw4j9cKjgDfCkpDN2hyjbgMX
+mct55rBvbnMkLYH2zsqVRM978fEwNlvw2OnSgwIDAQABAoIBAAMhREU7O4pU7Mfz
+Ee0b+AgCrGYkwyAPd72kseHBTawrV1NVdy5nuq2O6aPpIEoqBraQFaID6v4B1IL5
+ALwnE1F42hxDROuoLpWtERIPy8F1gXf06wd6QWxfej+NQROd9Sk8i4hp/EjeujPu
+zY/AvepBSSySJmQ2PF7ieyeUMKV/tb6rftx0r5aeFmCU3Rwm9FtSRobmanJtNlxN
+364awfrY3p9pF4DaCwXU/S6OGwZDVbxZR0CDURvd1nvsw75bDwYmMSLYXN8NqLYv
+zgBfY93NC1sWn3LYGWhkzHLLkgsFq1rtF559ndWsNAInsZoHWVlOXgXFJDP0uXQ9
+OfBH2wECgYEAw6gCrqDNpMD9ZcGuoa8iyQNbVDVhSK+1BDHFOcy/jn6RSEeD5832
+qwHE2/XTGU1XFKEiEFuTAdEchs6FuMPjkL/HRelDEZuzES6zECi4aUlWF3cZIDmc
+YAncBDXfq19Clr7JewlSHToPvVi/f1ZBeuT9BQppLYVAUnmNX1Wg6IECgYEAwYFp
+7SpGWsR/ztGe0XHyCpYuWjoAiGrwMSCilzza8LBfCVauZKNEzzyRxU/90JdQb7Vt
+OLUoDnViMXqJxKKT/AxYK1/pMIEb2/hVL5IuGRF3P5B93f8MMUiL8h3vLi7ckFu3
+Y2yfsVbLkzH+/miz9K+3K21Pm+0qbDuqi1QSmQMCgYAn6YkKiIEKv9exP05Zazmk
+WcvypKUAx98cSO7buJnG/qiyXuxYiBpujgDTghUbDzzZV3l0bsnRUBAKq+x5YXR1
+nbmP2GK3H9Tqh2U9waDE0ZH/XWtBuJ7etIQuU7MZ6WfTCn5dW+xlS+fUD1uZJUZc
+RIQc2B/Wil0xtdwR+4zEgQKBgBLF0Vb24PhxU8zmvTeojEjGpRyPv/l8Fm1Nszhk
+6QCsu6uWQzj0Nfq8749q4T7ZTy5nNX9o48fzT2Fpd/AhraWoNO2QUnkoLWG7x9fj
+oKFB9oWuKOfelHo7hYgpq0iZt0AyBaqZoSx9NSbElO5tjffRDD4kTrLb4V+6siLu
+NxetAoGABG57q1pIx1ftFjQCrEd8HaQX1Axx2EOGU0Iu8sA9d0u1aWMx4NYHVS8U
+PIqNtP5/EnlInCU1oWyg9zf6Wj7Z1F72v6EqfbxRYUr5F6qfMRkRDDyXTuweLwex
+Q5Zty1bC9bIDMsJQBoK6e3ywFwNu685X/lO5SNNSbmBJ1mWeu8o=
+-----END RSA PRIVATE KEY-----
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/std/sudo-g5k/id_rsa_sudo-g5k.pub
 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/sudo-g5k/id_rsa_sudo-g5k.pub
new file mode 100644
index 0000000..8a42299
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/files/std/sudo-g5k/id_rsa_sudo-g5k.pub
@@ -0,0 +1 @@
+ssh-rsa 
AAAAB3NzaC1yc2EAAAADAQABAAABAQCT5JKkLS2CNVyhqB8KgE2EGS1e5jWta+/2e5Mvp0zeaRy7q4/ByHWf8pAeCr36Juv9kC+jQy9rwbXzXAwGnAym4jT+OW93nvXSh/toz8sTPl8jsY4CFTUWf9Z6GBw3MWyQR8FsQRkTMpX+VwYxhWIw8spDbaDUANiGJ9/lTQH5DtqwNJjUxtsISkNHrEMkyOCSFkvoFLrtLZEmJtg7VXuahA/2XmUqFrmHPwHnXmbqcUC6C8wSfzFY8Tf2+Ok2QejvyO5ujF4PP3io4YYVmNlSqh2nDiP1wqOAN8KSkM3aHKNuAxeZy3nmsG9ucyQtgfbOypVEz3vx8TA2W/DY6dKD
 sudog5k@key
diff --git a/grid5000/steps/data/setup/puppet/modules/env/files/version 
b/grid5000/steps/data/setup/puppet/modules/env/files/version
new file mode 100644
index 0000000..4043493
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/version
@@ -0,0 +1,2 @@
+# This file will contains the image version of this build.
+# This version will be filled by kameleon (stored as kameleon global)
diff --git a/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/id_rsa 
b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/id_rsa
new file mode 100644
index 0000000..938b6b3
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/id_rsa
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAt0IIibHCE0vMewl4JB8zOsDi4VUJ2Msuec41brBNvT5ANA23
++o0KoQte4w8UIwvzeWggUDRBzyKkpkejuJm+r9fH1zsFCdT6eMXeITeV8ZYGVjhq
+dEcoLF5wOsfOmg9pbHtlmu9odnXmHsm/+d7wYeykohdYxKkl4UR3hdusjj7RKdjW
+QPdaaChcHR0XrD//Yc/4z54MFnY4hZ4HpS7HdLeP55HaG8uvtZLrDs05XQzi7m5X
+/6HvM6jW52gHEyYJD+47oY7dZMtBw3sAwTFpMJY2kI6uU5l+FCKBEmF/ztxaekWW
+TxDlbBVBK+x37omop91QYCXjIhOQhRaetfjTawIDAQABAoIBAAkh7079XtCbXGtd
+Q3F5ZJIu/p+AH2eAaKaFUkBb5OPjcEuny11fHgJ8kJP8MmK0u8N3HvUgRY9PCKmI
+tG4Eq24T8M+XD184D+to4PMC1CQf99zgHt4Alc3wPuOPBYrD7dsMIzofaDNPGNK7
+9yc6pvwaUPIK+8+BJnQdd19iXS0RepzDzeCw4P0rGtkQpwX6VV09AGzuH4d+puhE
+u6/yLLCN9/kb46SuGG4AFxuQl5LtAllU4jtAkcUxG/vdNKRGIh3BsP4wmVFGTQ0t
+chDX0IKm8u7OJAF18zEpEOPuXpWCMZ3TerALc94S/WBQBuEcJmMInhKZYoiYK68T
+xewb0VECgYEA8qCd0t9w4d3zcn5Hvq1kHTwgtZnnUvdebqgPBnBZb8XOWdNIkyFG
+j/zzI9edO8UmJQ388SJtdlp4jTzD39n4jmR5pkvS2AUKWnKYDYTkPnqPXnWf/2c0
+myrBeX2CXExtpHZw2gkBhpe5qR8fXGxazBuZA9QfpleNdw2Ybnhvst0CgYEAwVu8
+S5yRJ/VFXy6gqsdB1VNui1PtPe7LaWG9uYLB2oa6I89R1yjJYx+UP1Nt0v2Y4rCw
+dM4/1fyMh/vAzUeyOt/Un2CVVpbO/K1XBEztlwAQIDy1tqLfvYOeAgQ753o4OYpo
+XpCDYnjlRzaMRPZsWn7c5y4p5Dg0jxnJ/DP+RucCgYEA6w54WEdokSn6JL36u9w3
+1are9ZD47wQAVKw0gkRuIT89vwBWm1PtjKm+1Maa6cECR3vZxbNY4QSdLhfknAYM
+K8djo5xp1CZt9Vp3vQE2LuGF7DmAnGtcJ8ewUQcrOEhDIMYuZs260K5FjHc+ZsgC
+3yMNhwwG7Zx8zQ460yuS63ECgYEAtjS46lN+obXKCliJBIVB70FwsRCERlFJE2QM
+gczK2h8NNwN9bpA7vhGbBFWc7y8UK8IuddOJah9TWi0NUSQXus0DsrAz6eWw5YB+
+uEm5tgpUJ9ytq44t4ostkV7mCEouw2I+2aW6eUfNXt7zLWU9U8Wqapsg1LN2K8c0
+hFmGas8CgYEAz2FwiwMyVRsloI+QtN4VMsZsz+CFfUXYTv5erNV2FE9JhpEEulSU
+1Gn8psGe6NSQfcNQ5IntoWQD4WWcKznlNFBPc6N8n0kQttvoeNU0Jaw/6P0FRtLv
+xw7uclDorHsyMjCRxK9H7rhKx27uWL2/g2gd6RYwYBYs9HPMS9DGBQo=
+-----END RSA PRIVATE KEY-----
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/id_rsa.pub 
b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/id_rsa.pub
new file mode 100644
index 0000000..b2661a8
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa 
AAAAB3NzaC1yc2EAAAADAQABAAABAQC3QgiJscITS8x7CXgkHzM6wOLhVQnYyy55zjVusE29PkA0Dbf6jQqhC17jDxQjC/N5aCBQNEHPIqSmR6O4mb6v18fXOwUJ1Pp4xd4hN5XxlgZWOGp0RygsXnA6x86aD2lse2Wa72h2deYeyb/53vBh7KSiF1jEqSXhRHeF26yOPtEp2NZA91poKFwdHResP/9hz/jPngwWdjiFngelLsd0t4/nkdoby6+1kusOzTldDOLublf/oe8zqNbnaAcTJgkP7juhjt1ky0HDewDBMWkwljaQjq5TmX4UIoESYX/O3Fp6RZZPEOVsFUEr7Hfuiain3VBgJeMiE5CFFp61+NNr
 dom0 to domU key
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/random_mac 
b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/random_mac
new file mode 100644
index 0000000..9c2bc8a
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/random_mac
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+SITE_NAME=$(hostname | cut -d. -f2)
+
+# Code the 2nd byte of the IP in the mac address, in order to avoid conflicts
+# with g5k-subnets (see [[Virtual network interlink]])
+
+if   [ "x$SITE_NAME" = "xbordeaux"     ] ; then
+  SITE_HEX=83
+elif [ "x$SITE_NAME" = "xlille"        ] ; then
+  SITE_HEX=8b
+elif [ "x$SITE_NAME" = "xlyon"         ] ; then
+  SITE_HEX=8f
+elif [ "x$SITE_NAME" = "xnancy"        ] ; then
+  SITE_HEX=93
+elif [ "x$SITE_NAME" = "xrennes"       ] ; then
+  SITE_HEX=9f
+elif [ "x$SITE_NAME" = "xtoulouse"     ] ; then
+  SITE_HEX=a3
+elif [ "x$SITE_NAME" = "xsophia"       ] ; then
+  SITE_HEX=a7
+elif [ "x$SITE_NAME" = "xreims"        ] ; then
+  SITE_HEX=ab
+elif [ "x$SITE_NAME" = "xluxembourg"   ] ; then
+  SITE_HEX=af
+elif [ "x$SITE_NAME" = "xnantes"       ] ; then
+  SITE_HEX=b3
+elif [ "x$SITE_NAME" = "xgrenoble"     ] ; then
+  SITE_HEX=b7
+elif [ "x$SITE_NAME" = "xqualif"       ] ; then
+  SITE_HEX=ff
+else
+  # Orsay (or unknown site)
+  SITE_HEX=97
+fi
+
+MACADDR="00:16:3e:$SITE_HEX:$(dd if=/dev/urandom count=1 2>/dev/null | md5sum 
| sed 's/^\(..\)\(..\).*$/\1:\2/')"
+echo $MACADDR
diff --git a/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/xen-g5k 
b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/xen-g5k
new file mode 100644
index 0000000..e4c48cf
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/xen-g5k
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+NAME=xen-g5k
+RAND_MAC_CMD="/usr/local/bin/random_mac"
+
+XEN_DIR=/etc/xen
+DOMUS_CONF_FILES=`ls $XEN_DIR/*.cfg`
+
+test -f $RAND_MAC_CMD || exit 0
+test -d $XEN_DIR      || exit 0
+
+case "$1" in
+       start|reload|force-reload|restart)
+
+      mkdir -p /var/log/xen
+      for conf_file in $DOMUS_CONF_FILES; do
+        sed -i s/mac=[A-Za-z0-9:]*/mac=$($RAND_MAC_CMD)/g $conf_file
+      done
+
+       ;;
+       stop)
+       ;;
+       *)
+               echo "Usage: invoke-rc.d $NAME 
{start|stop|reload|force-reload|restart}"
+       ;;
+esac
+
+exit 0
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/xen-g5k.service 
b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/xen-g5k.service
new file mode 100644
index 0000000..f79c7e5
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/xen-g5k.service
@@ -0,0 +1,8 @@
+[Unit]
+Description=Generate MAC addresse for Xen DomU and create /var/log/xen
+
+[Service]
+ExecStart=/usr/sbin/xen-g5k start
+
+[Install]
+WantedBy=multi-user.target
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/xend-config.sxp 
b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/xend-config.sxp
new file mode 100644
index 0000000..28057f6
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/files/xen/xen/xend-config.sxp
@@ -0,0 +1,5 @@
+(network-script network-bridge)
+(vif-script vif-bridge)
+(dom0-min-mem 196)
+(dom0-cpus 0)
+(vncpasswd '')
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/lib/facter/installed_kernelreleases.rb
 
b/grid5000/steps/data/setup/puppet/modules/env/lib/facter/installed_kernelreleases.rb
new file mode 100644
index 0000000..71f91fb
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/lib/facter/installed_kernelreleases.rb
@@ -0,0 +1,12 @@
+Facter.add(:installed_kernelreleases) do
+  setcode do
+    kernels = Dir.glob('/boot/{vmlinuz,vmlinux}-*')
+
+    kernels.sort_by! do |k|
+      m = /^\/boot\/vmlinu[zx]-(\d+)\.(\d+)\.(\d+)(_|-)(\d+).*$/.match(k)
+      [m[1].to_i, m[2].to_i, m[3].to_i, m[5].to_i]
+    end
+
+    kernels.map { |k| k.gsub(/\/boot\/vmlinu[zx]-(\d+\.\d+\.\d+(_|-)\d+.*)/, 
'\1') }
+  end
+end
diff --git a/grid5000/steps/data/setup/puppet/modules/env/manifests/base.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base.pp
new file mode 100644
index 0000000..3ff9155
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/base.pp
@@ -0,0 +1,53 @@
+# This file contains the 'base' class used to configure a basic environment to 
be executed in grid'5000.
+
+class env::base ( $variant = "base", $parent_parameters = {} ){
+
+  $base_parameters = {
+    misc_keep_tmp => true,
+    ganglia_enable => false
+  }
+
+  $parameters = merge ( $base_parameters, $parent_parameters )
+  # Include min class
+  class {
+    'env::min':
+      variant => $variant,
+      parent_parameters => $parameters;
+  }
+
+  class { 'env::base::do_not_clean_tmp':
+    keep_tmp => $parameters['misc_keep_tmp'];
+  }
+
+  # Include kexec-tools
+  class { 'env::base::configure_kexec': }
+  # SSH modification
+  class { 'env::base::increase_ssh_maxstartups': }
+  # Specific tuning
+  class { 'env::base::tcp_tuning_for_10gbe': }
+  # Cpufreq. Not available on ppc64
+  if $env::deb_arch != 'ppc64el' {
+    class { 'env::base::enable_cpufreq_with_performance_governor': }
+  }
+  # Ganglia
+  class {
+    'env::base::install_and_disable_ganglia':
+      enable => $parameters['ganglia_enable']
+  }
+  #IbOverIP
+  class { 'env::base::configure_ip_over_infiniband': }
+  # memlock tuning for infiniband
+  class { 'env::base::unlimited_memlock_for_infiniband': }
+  # Omni-Path
+  class { 'env::base::configure_omnipath': }
+  #Add ca2019.grid5000.fr certificate
+  class { 'env::base::add_ca_grid5000': }
+  #Dhclient conf
+  class { 'env::base::configure_dhclient': }
+  # Disable ndctl monitor service
+  class { 'env::base::disable_ndctl_monitor': }
+  # Enable userns for Nix
+  class { 'env::base::enable_userns': }
+  # Disable NVMe multipath support
+  class { 'env::base::disable_nvme_multipath': }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/add_ca_grid5000.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/add_ca_grid5000.pp
new file mode 100644
index 0000000..bbca3c6
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/add_ca_grid5000.pp
@@ -0,0 +1,14 @@
+# Add ca2019.grid5000.fr certificate
+
+class env::base::add_ca_grid5000 {
+
+  exec {
+    'get_ca2019':
+      command => "/usr/bin/wget --no-check-certificate -q 
https://www.grid5000.fr/certs/ca2019.grid5000.fr.crt -O 
/usr/local/share/ca-certificates/ca2019.grid5000.fr.crt",
+      creates => "/usr/local/share/ca-certificates/ca2019.grid5000.fr.crt";
+    'update_ca':
+      command => "/usr/sbin/update-ca-certificates",
+      require => Exec['get_ca2019'];
+    }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_dhclient.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_dhclient.pp
new file mode 100644
index 0000000..d810750
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_dhclient.pp
@@ -0,0 +1,16 @@
+class env::base::configure_dhclient () {
+
+  file_line { 'dhclient_interval':
+    ensure => present,
+    path   => '/etc/dhcp/dhclient.conf',
+    line   => 'initial-interval 1; # retry more frequently in case packets get 
lost',
+    match  => '.*initial-interval.*',
+  }
+
+  file_line { 'dhclient_timeout':
+    ensure => present,
+    path   => '/etc/dhcp/dhclient.conf',
+    line   => 'timeout 90; # slow clusters can take more than 60s (bug #10716, 
grisou)',
+    match  => '^(#)?timeout .*',
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_ip_over_infiniband.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_ip_over_infiniband.pp
new file mode 100644
index 0000000..50fd606
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_ip_over_infiniband.pp
@@ -0,0 +1,69 @@
+class env::base::configure_ip_over_infiniband (){
+
+  if  $::lsbdistcodename == 'stretch' {
+
+    $infiniband_packages = ['qlvnictools']
+
+    ensure_packages([$infiniband_packages], {'ensure' => 'installed'})
+
+    Package[$infiniband_packages]
+    ->Service['openibd']
+
+  }
+
+  # En suivant la doc https://wiki.debian.org/RDMA, vous n'avez pas besoin 
d'installer opensm sur les environnements
+  # Il risque de rentrer en conflit avec d'autres instances d'OpenSM présent 
sur du matériel réseau, ou bien sur des clusters externes à Grid5000 (exemple : 
https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=10747)
+  service {
+    'openibd':
+      provider => 'systemd',
+      enable   => true,
+      require  => [
+        File['/etc/systemd/system/openibd.service']
+      ];
+  }
+
+  file {
+    '/etc/infiniband':
+      ensure  => directory,
+      owner   => root,
+      group   => root,
+      mode    => '0644';
+    '/etc/infiniband/openib.conf':
+      ensure  => file,
+      owner   => root,
+      group   => root,
+      mode    => '0644',
+      source  => 'puppet:///modules/env/base/infiniband/openib.conf',
+      require => File['/etc/infiniband'];
+    '/etc/init.d/openibd':
+      ensure  => file,
+      owner   => root,
+      group   => root,
+      mode    => '0755',
+      source  => 'puppet:///modules/env/base/infiniband/openibd';
+    '/etc/systemd/system/openibd.service':
+      ensure  => file,
+      owner   => root,
+      group   => root,
+      mode    => '0644',
+      source  => 'puppet:///modules/env/base/infiniband/openibd.service';
+    '/lib/udev/rules.d/90-ib.rules':
+      ensure  => present,
+      owner   => root,
+      group   => root,
+      mode    => '0644',
+      source  => 'puppet:///modules/env/base/infiniband/90-ib.rules';
+  }
+
+  # Empeche que ibacm.service soit en status failed (voir #13013)
+  if "${::lsbdistcodename}" == "bullseye" {
+    file {
+      '/etc/systemd/system/ibacm.service.d/':
+        ensure  => directory;
+      '/etc/systemd/system/ibacm.service.d/override.conf':
+        ensure  => present,
+        content => 
"[Service]\nType=exec\nExecStart=\nExecStart=-/usr/sbin/ibacm --systemd",
+        require => File['/etc/systemd/system/ibacm.service.d/'];
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_kexec.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_kexec.pp
new file mode 100644
index 0000000..55a95b0
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_kexec.pp
@@ -0,0 +1,16 @@
+class env::base::configure_kexec {
+
+  file {
+    "/etc/default/kexec":
+      mode    => '0755',
+      owner   => root,
+      group   => root,
+      source  => "puppet:///modules/env/base/kexec/kexec";
+  }
+
+  package {
+    'kexec-tools':
+      ensure  => installed;
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_omnipath.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_omnipath.pp
new file mode 100644
index 0000000..9cf899a
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/configure_omnipath.pp
@@ -0,0 +1,74 @@
+class env::base::configure_omnipath(){
+
+  case "${::lsbdistcodename}" {
+    'bullseye': {
+      $opapackages = ['opa-address-resolution', 'opa-fastfabric', 
'libopamgt0', 'libopasadb1',
+                      'opa-basic-tools', 'firmware-misc-nonfree']
+
+      $rdmapackages = ['qperf', 'libibverbs1', 'librdmacm1', 'libibmad5', 
'libibumad3', 'ibverbs-providers',
+                      'rdmacm-utils', 'infiniband-diags', 'libfabric1', 
'ibverbs-utils']
+
+      if $env::deb_arch == 'amd64' {
+        ensure_packages([$opapackages, $rdmapackages], {
+          ensure => present
+        })
+
+        # rdma-load-modules@opa.service would fail with opa_vnic (not 
available)
+        # opa_vnic isn't required to make OPA working
+        exec {
+          'disable opa_vnic':
+            command => "/bin/sed -i 's/opa_vnic/# opa_vnic/g' 
/etc/rdma/modules/opa.conf",
+            require => Package[$rdmapackages]
+        }
+      } else {
+        # opapackages are only available on amd64
+        ensure_packages($rdmapackages, {
+          ensure => present
+        })
+      }
+    }
+    'buster': {
+      $opapackages = ['opa-address-resolution', 'opa-fastfabric', 
'libopamgt0', 'libopasadb1',
+                      'opa-basic-tools', 'firmware-misc-nonfree']
+
+      $rdmapackages = ['qperf', 'libibverbs1', 'librdmacm1', 'libibmad5', 
'libibumad3', 'ibverbs-providers',
+                      'rdmacm-utils', 'infiniband-diags', 'libfabric1', 
'ibverbs-utils']
+
+      if $env::deb_arch == 'amd64' {
+        ensure_packages([$opapackages, $rdmapackages], {
+          ensure => present
+        })
+
+        # rdma-load-modules@opa.service would fail with opa_vnic (not 
available)
+        # opa_vnic isn't required to make OPA working
+        exec {
+          'disable opa_vnic':
+            command => "/bin/sed -i 's/opa_vnic/# opa_vnic/g' 
/etc/rdma/modules/opa.conf",
+            require => Package[$rdmapackages]
+        }
+      } else {
+        # opapackages and libfabric1 are only available on amd64
+        ensure_packages([$rdmapackages - ['libfabric1']], {
+          ensure => present
+        })
+      }
+    }
+    'stretch': {
+      $opapackages = ['opa-address-resolution', 'hfi1-diagtools-sw',
+                      'hfi1-firmware', 'hfi1-uefi', 'libhfi1',
+                      'opa-fastfabric', 'opa-scripts', 'qperf' ]
+
+      env::common::g5kpackages {
+          'scibian9-opa10.7':
+            packages => $opapackages;
+      }
+
+      # There's a bug in the renicing of ib_mad processes (see bug 9421), so 
we disable it.
+      exec {
+        'disable renicing':
+          command => "/bin/sed -i 's/RENICE_IB_MAD=yes/RENICE_IB_MAD=no/' 
/etc/rdma/rdma.conf",
+          require => Package['opa-scripts']
+      }
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/disable_ndctl_monitor.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/disable_ndctl_monitor.pp
new file mode 100644
index 0000000..4170fb1
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/disable_ndctl_monitor.pp
@@ -0,0 +1,16 @@
+class env::base::disable_ndctl_monitor {
+  file {
+    '/etc/systemd/system-preset/' :
+      ensure  => directory,
+      owner   => root,
+      group   => root,
+      mode    => '0644';
+    '/etc/systemd/system-preset/10-ndctl.preset' :
+      ensure  => file,
+      owner   => root,
+      group   => root,
+      mode    => '0644',
+      source  => "puppet:///modules/env/base/ndctl/ndctl.preset",
+      require => File['/etc/systemd/system-preset/'];
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/disable_nvme_multipath.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/disable_nvme_multipath.pp
new file mode 100644
index 0000000..f1fb18a
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/disable_nvme_multipath.pp
@@ -0,0 +1,21 @@
+# Disable multipath support in the NVMe driver.
+#
+# Multipath makes the device name unpredictable (e.g. nvme0n1 vs nvme1n1
+# when there are two NVMe drives), see 
https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=12958
+#
+# Multipath also creates a fake device node (e.g. nvme0c65n1) that messes up 
with g5k-checks:
+#
+# https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=12962
+# https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=12955
+
+class env::base::disable_nvme_multipath {
+  file {
+    '/etc/modprobe.d/disable_nvme_multipath.conf' :
+      ensure  => file,
+      owner   => root,
+      group   => root,
+      mode    => '0644',
+      content => "options nvme_core multipath=off\n",
+      notify  => Exec['generate_initramfs'];
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/do_not_clean_tmp.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/do_not_clean_tmp.pp
new file mode 100644
index 0000000..aeb2ce5
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/do_not_clean_tmp.pp
@@ -0,0 +1,12 @@
+class env::base::do_not_clean_tmp ($keep_tmp = false) {
+
+  if $keep_tmp {
+    # Don't delete /tmp on reboot
+    file {
+      '/etc/tmpfiles.d/tmp.conf':
+        ensure => 'link',
+        target => '/dev/null';
+    }
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/enable_cpufreq_with_performance_governor.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/enable_cpufreq_with_performance_governor.pp
new file mode 100644
index 0000000..2cd2d62
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/enable_cpufreq_with_performance_governor.pp
@@ -0,0 +1,16 @@
+class env::base::enable_cpufreq_with_performance_governor (){
+
+  package {
+    'cpufrequtils':
+      ensure   => installed;
+  }
+
+  file {
+    '/etc/default/cpufrequtils':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/base/cpufreq/cpufrequtils'
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/enable_userns.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/enable_userns.pp
new file mode 100644
index 0000000..89da8b0
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/enable_userns.pp
@@ -0,0 +1,11 @@
+class env::base::enable_userns (){
+
+  file {
+    '/etc/sysctl.d/00-userns.conf':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/base/userns/sysctl-00-userns.conf';
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/increase_ssh_maxstartups.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/increase_ssh_maxstartups.pp
new file mode 100644
index 0000000..389737a
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/increase_ssh_maxstartups.pp
@@ -0,0 +1,15 @@
+# This class add configurations to sshd. It consider ssh server / service are 
already declared (in 'min' variant).
+class env::base::increase_ssh_maxstartups (){
+
+  augeas {
+    'sshd_config_base':
+      changes => [
+        'set /files/etc/ssh/sshd_config/MaxStartups 500'
+      ],
+      require  => Package['ssh server'];
+  }
+
+  Augeas['sshd_config_base'] ~> Service['ssh']
+
+}
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/install_and_disable_ganglia.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/install_and_disable_ganglia.pp
new file mode 100644
index 0000000..287634d
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/install_and_disable_ganglia.pp
@@ -0,0 +1,40 @@
+class env::base::install_and_disable_ganglia ($enable = false){
+
+  include env::common::software_versions
+
+  if "$operatingsystem" == 'Debian' {
+    case "${::lsbdistcodename}" {
+      'buster' : {
+        env::common::g5kpackages {
+          'ganglia-monitor':
+            ensure  => $::env::common::software_versions::ganglia_monitor,
+            release => "${::lsbdistcodename}";
+        }
+
+        file {
+          '/etc/ganglia' :
+            ensure  => directory,
+            owner   => root,
+            group   => root,
+            mode    => '0644';
+          '/etc/ganglia/gmond.conf' :
+            ensure  => file,
+            owner   => root,
+            group   => root,
+            mode    => '0644',
+            source  => "puppet:///modules/env/base/ganglia/gmond.conf",
+            require => File['/etc/ganglia'];
+        }
+
+        service {
+          'ganglia-monitor':
+            enable  => $enable,
+            require => Package['ganglia-monitor'];
+        }
+      }
+      default : {
+        # No more ganglia since bullseye
+      }
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/tcp_tuning_for_10gbe.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/tcp_tuning_for_10gbe.pp
new file mode 100644
index 0000000..4221fe6
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/tcp_tuning_for_10gbe.pp
@@ -0,0 +1,17 @@
+class env::base::tcp_tuning_for_10gbe (){
+
+
+# *** Setting up TCP buffers
+#
+# See: https://www.grid5000.fr/w/TCP_bandwidth_tuning
+#
+
+  file {
+    '/etc/sysctl.d/00-grid5000.conf':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/base/tuning/sysctl-00-grid5000.conf';
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/base/unlimited_memlock_for_infiniband.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/unlimited_memlock_for_infiniband.pp
new file mode 100644
index 0000000..c6b30c6
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/base/unlimited_memlock_for_infiniband.pp
@@ -0,0 +1,17 @@
+class env::base::unlimited_memlock_for_infiniband (){
+
+# *** Setting up unlimited memlock (necessary for infiniband)
+#
+# See: https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=7183
+#
+
+  file {
+    '/etc/security/limits.d/grid5000.conf':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/base/tuning/limits-grid5000.conf';
+  }
+}
+
diff --git a/grid5000/steps/data/setup/puppet/modules/env/manifests/big.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big.pp
new file mode 100644
index 0000000..26ce166
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/big.pp
@@ -0,0 +1,46 @@
+# This file contains the 'big' class used to configure improved environment to 
be executed in grid'5000.
+class env::big ( $variant = "big", $parent_parameters = {} ){
+
+  $big_parameters = {
+    mic_enable => false
+  }
+  $parameters = merge( $big_parameters, $parent_parameters )
+
+  # Include nfs class
+  class {
+    'env::nfs':
+      variant => $variant,
+      parent_parameters => $parameters;
+  }
+  # mail
+  class { 'env::big::configure_postfix': }
+  # kvm
+  class { 'env::big::configure_kvm': }
+  # nvidia
+  if $env::deb_arch == 'amd64' or $env::deb_arch == 'ppc64el' {
+    class { 'env::big::configure_nvidia_gpu': }
+  }
+  # amdgpu
+  if $env::deb_arch == 'amd64' {
+    class { 'env::big::configure_amd_gpu': }
+  }
+  # beegfs install
+  if $env::deb_arch == 'amd64' {
+    class { 'env::big::install_beegfs': }
+  }
+  #Allow sshfs
+  class { 'env::big::configure_sshfs': }
+  # Config OpenMPI
+  class { 'env::big::install_openmpi': }
+  # Snmp tools
+  class { 'env::big::install_snmp_tools': }
+  # remove RESUME device from initramfs
+  class { 'env::big::configure_initramfs': }
+  # Prometheus
+  class { 'env::big::install_prometheus_exporters': }
+  # g5k-jupyterlab
+  class { 'env::big::install_g5k_jupyterlab': }
+  # smartd
+  class { 'env::big::install_smartd': }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_amd_gpu.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_amd_gpu.pp
new file mode 100644
index 0000000..3a3b9be
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_amd_gpu.pp
@@ -0,0 +1,56 @@
+class env::big::configure_amd_gpu () {
+
+  case $::lsbdistcodename {
+
+    'buster' : {
+      apt::source {
+        'repo.radeon.com':
+          comment      => 'Repo for AMD ROCM packages',
+          location     => 'https://repo.radeon.com/rocm/apt/debian/',
+          release      => 'xenial',
+          repos        => 'main',
+          architecture => 'amd64',
+          key          => {
+            'id'     => '1A693C5C',
+            'source' => 'https://repo.radeon.com/rocm/rocm.gpg.key',
+          },
+          include      => {
+            'deb' => true,
+            'src' => false
+          },
+          notify       => Exec['apt_update'],
+      }
+
+      package {
+        [ 'rock-dkms', 'hip-base', 'rocminfo', 'rocm-smi-lib', 'hip-rocclr', 
'rocm-device-libs', 'libtinfo5' ]:
+          ensure          => installed,
+          install_options => ['--no-install-recommends'],
+          require         => [Apt::Source['repo.radeon.com'], 
Exec['apt_update']];
+      }
+
+      file_line {
+        'rocm_etc_profile_path':
+          path => '/etc/profile',
+          line => 'export PATH=$PATH:/opt/rocm-4.2.0/bin';
+      }
+
+      file {
+        '/usr/local/bin/rocm-smi':
+          ensure  => link,
+          target  => '/opt/rocm-4.2.0/bin/rocm-smi',
+          require => Package['rocm-smi-lib'];
+        '/etc/udev/rules.d/70-amdgpu.rules':
+          ensure  => present,
+          owner   => root,
+          group   => root,
+          mode    => '0644',
+          source  => 'puppet:///modules/env/big/amd_gpu/70-amdgpu.rules',
+          require => Package['rock-dkms'];
+      }
+    }
+
+    'bullseye' : {
+      # TODO Build du module amdgpu (Rocm 4.2) en erreur avec le kernel 5.10 - 
Bug #13159
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_initramfs.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_initramfs.pp
new file mode 100644
index 0000000..8cc32bf
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_initramfs.pp
@@ -0,0 +1,15 @@
+class env::big::configure_initramfs () {
+
+  case "${::lsbdistcodename}" {
+    "stretch", "buster" : {
+      file {
+        '/etc/initramfs-tools/conf.d/resume':
+          ensure    => present,
+          owner     => root,
+          group     => root,
+          mode      => '0644',
+          content   => 'RESUME=none',
+      }
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_kvm.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_kvm.pp
new file mode 100644
index 0000000..4c6bb0b
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_kvm.pp
@@ -0,0 +1,83 @@
+class env::big::configure_kvm () {
+
+  package {
+    'uml-utilities':
+      ensure    => installed;
+  }
+
+  file {
+    '/etc/sudoers.d/kvm':
+      ensure    => present,
+      owner     => root,
+      group     => root,
+      mode      => '0440',
+      source    => 'puppet:///modules/env/big/kvm/sudoers',
+      require   => Package['sudo'];
+    '/etc/udev/rules.d/60-qemu-system.rules':
+      ensure    => present,
+      owner     => root,
+      group     => root,
+      mode      => '0644',
+      source    => 'puppet:///modules/env/big/kvm/60-qemu-system.rules';
+    '/usr/local/bin/create_tap':
+      ensure    => present,
+      owner     => root,
+      group     => root,
+      mode      => '0755',
+      source    => 'puppet:///modules/env/big/kvm/create_tap';
+    '/usr/lib/qemu/qemu-bridge-helper':
+      ensure    => present,
+      owner     => root,
+      group     => root,
+      mode      => '4755',
+      require   => Env::Common::G5kpackages['g5k-meta-packages'];
+    '/etc/qemu':
+      ensure    => directory,
+      owner     => root,
+      group     => root,
+      mode      => '0755';
+    '/etc/qemu/bridge.conf':
+      ensure    => file,
+      owner     => root,
+      group     => root,
+      mode      => '0644',
+      content   => "allow br0",
+      require   => File['/etc/qemu'];
+    '/usr/local/bin/random_mac':
+      ensure    => present,
+      owner     => root,
+      group     => root,
+      mode      => '0755',
+      source    => 'puppet:///modules/env/big/kvm/random_mac';
+  }
+
+  Exec{
+    'disable uml-utilities service':
+      command => "/usr/sbin/update-rc.d uml-utilities disable",
+      require => Package['uml-utilities'];
+  }
+
+  package {
+    'sudo':
+      ensure   => installed;
+  }
+
+  file_line { 'kvm_etc_profile_createpath':
+    path => '/etc/profile',
+    line => 'mkdir -p /tmp/$USER-runtime-dir';
+  }
+
+  file_line { 'kvm_etc_profile_path':
+    path => '/etc/profile',
+    line => 'export XDG_RUNTIME_DIR=/tmp/$USER-runtime-dir',
+    require => File_line['kvm_etc_profile_createpath'];
+  }
+
+  # Not sure this is required anymore. Try without, uncomment if needed
+  # augeas {
+  #   'set_XDG_RUNTIME_DIR':
+  #     context   => "/files/etc/profile",
+  #     tag       => "modules",
+  #     changes   =>["set export[last()+1] 
XDG_RUNTIME_DIR=/tmp/$USER-runtime-dir",];
+  # }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu.pp
new file mode 100644
index 0000000..ebf88f1
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu.pp
@@ -0,0 +1,18 @@
+class env::big::configure_nvidia_gpu () {
+
+  #packages = [ 'g++', 'gfortran', 'freeglut3-dev', 'libxmu-dev', 'libxi-dev' ]
+
+  # Blacklist nvidia modules
+  include 'env::big::configure_nvidia_gpu::modules'
+  # Install nvidia drivers
+  include 'env::big::configure_nvidia_gpu::drivers'
+  # Install additional services (currently nvidia-smi, needed by cuda and 
prometheus)
+  include 'env::big::configure_nvidia_gpu::services'
+  # Install cuda
+  include 'env::big::configure_nvidia_gpu::cuda'
+  # Install nvidia ganglia plugins
+  include 'env::big::configure_nvidia_gpu::ganglia'
+  # Install nvidia prometheus exporter
+  include 'env::big::configure_nvidia_gpu::prometheus'
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/cuda.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/cuda.pp
new file mode 100644
index 0000000..9e886bf
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/cuda.pp
@@ -0,0 +1,126 @@
+class env::big::configure_nvidia_gpu::cuda () {
+
+  case "${::lsbdistcodename}" {
+    "stretch" : {
+      $driver_source = 
'http://packages.grid5000.fr/other/cuda/cuda_9.0.176_384.81_linux-run'
+      $libcuda = '/usr/lib/x86_64-linux-gnu/libcuda.so'
+      $cuda_args = '--silent'
+    }
+    default: {
+      $driver_source = 
"http://packages.grid5000.fr/other/cuda/cuda_$::env::common::software_versions::nvidia_cuda.run";
+      case "$env::deb_arch" {
+        "amd64": {
+          $libcuda = '/usr/lib/x86_64-linux-gnu/libcuda.so'
+          $cuda_args = '--silent'
+        }
+        "ppc64el": {
+          $libcuda = '/usr/lib/powerpc64le-linux-gnu/libcuda.so'
+          $cuda_args = '--silent'
+        }
+      }
+    }
+  }
+
+  $opengl_packages = ['ocl-icd-libopencl1', 'opencl-headers']
+
+  exec{
+    'retrieve_nvidia_cuda':
+      command   => "/usr/bin/wget -q $driver_source -O 
/tmp/NVIDIA-Linux_cuda.run && chmod u+x /tmp/NVIDIA-Linux_cuda.run",
+      timeout   => 1200, # 20 min
+      creates   => "/tmp/NVIDIA-Linux_cuda.run";
+    'install_nvidia_cuda':
+      command     => "/tmp/NVIDIA-Linux_cuda.run $cuda_args --toolkit && 
/bin/rm /tmp/NVIDIA-Linux_cuda.run",
+      timeout     => 2400, # 20 min
+      user        => root,
+      environment => ["HOME=/root", "USER=root"], # prevent cuda installer to 
failed when copying sample files (default sample path : 
$(HOME)/NVIDIA_CUDA-10.1_Samples, cf. 
https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#runfile-advanced)
+      require     =>  File['/tmp/NVIDIA-Linux_cuda.run'];
+    'update_ld_conf':
+      command   => "/sbin/ldconfig",
+      user      => root,
+      refreshonly => true;
+  }
+
+  file{
+    '/tmp/NVIDIA-Linux_cuda.run':
+      ensure    => file,
+      require   => Exec['retrieve_nvidia_cuda'];
+    '/usr/local/cuda/lib64/libcuda.so':
+      ensure    => 'link',
+      target    => $libcuda,
+      require   => Exec['install_nvidia_cuda'],
+      notify    => Exec['update_ld_conf'];
+    '/etc/ld.so.conf.d/cuda.conf':
+      ensure    => file,
+      owner     => root,
+      group     => root,
+      mode      => '0644',
+      source    => 'puppet:///modules/env/big/nvidia/cuda.conf',
+      notify    => Exec['update_ld_conf'];
+    '/etc/systemd/system/nvidia-persistenced.service':
+      ensure    => file,
+      owner     => root,
+      group     => root,
+      mode      => '0644',
+      source    => 
'puppet:///modules/env/big/nvidia/nvidia-persistenced.service';
+    '/etc/systemd/system/multi-user.target.wants/nvidia-persistenced.service':
+      ensure => link,
+      target => '/etc/systemd/system/nvidia-persistenced.service';
+  }
+
+  # Sounds dirty as fuck, but Augeas does not manage /etc/profile which is a 
bash file, and not a real configuration file (or I'm really bad with Augeas).
+  file_line {
+    'cuda_etc_profile_path':
+      path => '/etc/profile',
+      line => 'export PATH=$PATH:/usr/local/cuda/bin';
+  }
+
+  package{
+    $opengl_packages:
+      ensure    => installed;
+  }
+
+  # Install one or more fake (empty) package(s) to help satisfy 
libhwloc-contrib-plugins dependencies.
+  # No need to force a particular version, newer versions of the package(s) 
should still be equally empty.
+  # cf. bug #12877, #12861 and #13260
+  case "${::lsbdistcodename}" {
+    "bullseye" : {
+      case "$env::deb_arch" {
+        "ppc64el": {
+          env::common::g5kpackages {
+            'libnvidia-tesla-460-cuda1':
+              ensure    => installed;
+            'libnvidia-tesla-460-ml1':
+              ensure    => installed;
+            'libcudart11.0':
+              ensure    => installed;
+          } -> package {
+            'libhwloc-contrib-plugins':
+              ensure    => installed;
+          }
+        }
+        default: {
+          env::common::g5kpackages {
+            'libcuda1':
+              ensure    => installed;
+            'libnvidia-ml1':
+              ensure    => installed;
+            'libcudart11.0':
+              ensure    => installed;
+          } -> package {
+            'libhwloc-contrib-plugins':
+              ensure    => installed;
+          }
+        }
+      }
+    }
+    default: {
+      env::common::g5kpackages {
+        'libcuda1':
+          ensure    => installed;
+      } -> package {
+        'libhwloc-contrib-plugins':
+          ensure    => installed;
+      }
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/drivers.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/drivers.pp
new file mode 100644
index 0000000..206b612
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/drivers.pp
@@ -0,0 +1,120 @@
+class env::big::configure_nvidia_gpu::drivers () {
+
+  ### This class exists for GPU clusters that require a recent version of 
nvidia driver
+
+  include env::big::prepare_kernel_module_build
+
+  case "$env::deb_arch" {
+    "amd64": {
+      $libdir = '/usr/lib/x86_64-linux-gnu'
+    }
+    "ppc64el": {
+      $libdir = '/usr/lib/powerpc64le-linux-gnu'
+    }
+  }
+
+  $driver_source = 
"http://packages.grid5000.fr/other/nvidia/NVIDIA-Linux-${::env::common::software_versions::nvidia_driver_arch}-${::env::common::software_versions::nvidia_driver}.run";
+  $nvidia_basename = 'NVIDIA-Linux'
+  $nvidia_runfile = "$nvidia_basename.run"
+
+  file{
+    "/tmp/$nvidia_runfile":
+      ensure  => file,
+      require => Exec['retrieve_nvidia_drivers'];
+  }
+  exec{
+    'retrieve_nvidia_drivers':
+      command => "/usr/bin/wget -q $driver_source -O /tmp/$nvidia_runfile; 
chmod u+x /tmp/$nvidia_runfile",
+      timeout => 1200, # 20 min
+      creates => "/tmp/$nvidia_runfile";
+  }
+
+  if ("$env::deb_arch" == 'ppc64el') and ("$lsbdistcodename" == 'buster') {
+    exec{
+      'extract_nvidia_driver':
+        command   => "/tmp/$nvidia_runfile -x --target /tmp/$nvidia_basename",
+        user      => root,
+        require   => [Exec['prepare_kernel_module_build'], 
File["/tmp/$nvidia_runfile"]];
+      'patch_nvidia_driver':
+        command   => "/usr/bin/sed -i 
's/MODULE_LICENSE(\"NVIDIA\");/MODULE_LICENSE(\"GPL\");/' 
/tmp/$nvidia_basename/kernel/nvidia-modeset/nvidia-modeset-linux.c; 
/usr/bin/sed -i 's/MODULE_LICENSE(\"NVIDIA\");/MODULE_LICENSE(\"GPL\");/' 
/tmp/$nvidia_basename/kernel/nvidia/nv-frontend.c",
+        user      => root,
+        require   => Exec['extract_nvidia_driver'];
+      'cleanup_nvidia_extracted':
+        command   => "/bin/rm -r /tmp/$nvidia_basename",
+        user      => root,
+        require   => Exec['install_nvidia_driver'];
+    }
+    $nvidia_installer = "$nvidia_basename/nvidia-installer"
+  } else {
+    $nvidia_installer = "$nvidia_runfile"
+  }
+
+  exec{
+    'install_nvidia_driver':
+      command   => "/tmp/$nvidia_installer -qa --no-cc-version-check --ui=none 
--dkms -k ${installed_kernelreleases[-1]}",
+      timeout   => 1200, # 20 min,
+      user      => root,
+      # The nvidia installer tries to load the nvidia-drm module at the end, 
but it fails because
+      # the building machine has no GPU. Make sure that modprobe doesn't 
actually try to load the module.
+      environment => ['MODPROBE_OPTIONS=--dry-run'],
+      require   => [Exec['prepare_kernel_module_build'], 
File["/tmp/$nvidia_runfile"]];
+    'cleanup_nvidia':
+      command   => "/bin/rm /tmp/$nvidia_runfile",
+      user      => root,
+      require   => Exec['install_nvidia_driver'];
+  }
+
+  if ($::env::common::software_versions::nvidia_user_driver != undef) {
+
+    # Install a different user-mode driver.
+    # See 
https://docs.nvidia.com/deploy/cuda-compatibility/index.html#forward-compatible-upgrade
+    # This allows to use an old kernel driver with a newer user-mode driver 
(and thus support newer CUDA)
+    # It is based on the NVIDIA driver installer, but we only extract relevant 
files.
+    $user_driver_source = 
"http://packages.grid5000.fr/other/nvidia/NVIDIA-Linux-${::env::common::software_versions::nvidia_driver_arch}-${::env::common::software_versions::nvidia_user_driver}.run";
+
+    file{
+      '/tmp/NVIDIA-Linux-user-driver.run':
+        ensure    => file,
+        require   => Exec['retrieve_nvidia_user_driver'];
+    }
+    exec{
+      'retrieve_nvidia_user_driver':
+        command   => "/usr/bin/wget -q $user_driver_source -O 
/tmp/NVIDIA-Linux-user-driver.run; chmod u+x /tmp/NVIDIA-Linux-user-driver.run",
+        timeout   => 1200, # 20 min
+        creates   => "/tmp/NVIDIA-Linux-user-driver.run";
+      'extract_nvidia_user_driver':
+        command   => "/tmp/NVIDIA-Linux-user-driver.run -x --target 
/tmp/NVIDIA-Linux-user-driver",
+        timeout   => 600, # 10 min,
+        require   => File['/tmp/NVIDIA-Linux-user-driver.run'];
+      'cleanup_nvidia_user_driver':
+        command   => "/bin/rm /tmp/NVIDIA-Linux-user-driver.run",
+        require   => Exec['extract_nvidia_user_driver'];
+    }
+    file{
+      # Copy libraries from the newer driver
+      
"${libdir}/libcuda.so.${::env::common::software_versions::nvidia_user_driver}":
+        source    => 
"/tmp/NVIDIA-Linux-user-driver/libcuda.so.${::env::common::software_versions::nvidia_user_driver}",
+        mode      => '0755',
+        require   => Exec['extract_nvidia_user_driver'];
+      
"${libdir}/libnvidia-ptxjitcompiler.so.${::env::common::software_versions::nvidia_user_driver}":
+        source    => 
"/tmp/NVIDIA-Linux-user-driver/libnvidia-ptxjitcompiler.so.${::env::common::software_versions::nvidia_user_driver}",
+        mode      => '0755',
+        require   => Exec['extract_nvidia_user_driver'];
+      # Override symlinks so that they point to the newer driver
+      "${libdir}/libcuda.so.1":
+        ensure    => link,
+        target    => 
"libcuda.so.${::env::common::software_versions::nvidia_user_driver}",
+        replace   => true,
+        require   => Exec['install_nvidia_driver'];
+      "${libdir}/libnvidia-ptxjitcompiler.so.1":
+        ensure    => link,
+        target    => 
"libnvidia-ptxjitcompiler.so.${::env::common::software_versions::nvidia_user_driver}",
+        replace   => true,
+        require   => Exec['install_nvidia_driver'];
+    } ->
+    exec{
+      'cleanup_nvidia_user_driver_files':
+        command   => "/bin/rm -r /tmp/NVIDIA-Linux-user-driver",
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/ganglia.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/ganglia.pp
new file mode 100644
index 0000000..ae401fa
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/ganglia.pp
@@ -0,0 +1,47 @@
+class env::big::configure_nvidia_gpu::ganglia () {
+
+  case $operatingsystem {
+    'Debian': {
+
+      case "${::lsbdistcodename}" {
+        'buster' : {
+          env::common::g5kpackages {
+            'ganglia-monitor-nvidia':
+              packages => 'ganglia-monitor-python-nvidia',
+              ensure => installed;
+          }
+
+          Package['ganglia-monitor'] -> 
Package['ganglia-monitor-python-nvidia']
+
+          file{
+            '/etc/ganglia/conf.d/modpython-nvidia.conf':
+              ensure  => file,
+              owner   => root,
+              group   => root,
+              mode    => '0644',
+              source  => 
"puppet:///modules/env/big/nvidia/modpython-nvidia.conf",
+              require => Package['ganglia-monitor-python-nvidia'];
+            '/etc/systemd/system/ganglia-monitor.service':
+              ensure  => file,
+              owner   => root,
+              group   => root,
+              mode    => '0644',
+              source  => 
"puppet:///modules/env/big/nvidia/ganglia-monitor.service";
+          }
+          exec {
+            'Enable ganglia on startup':
+              command => "systemctl enable ganglia-monitor",
+              path    => ['/bin','/usr/bin','/sbin'];
+          }
+        }
+        default : {
+          # No more ganglia since bullseye
+        }
+      }
+    }
+    default: {
+      err "${operatingsystem} not supported."
+    }
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/modules.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/modules.pp
new file mode 100644
index 0000000..8fc928e
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/modules.pp
@@ -0,0 +1,25 @@
+class env::big::configure_nvidia_gpu::modules () {
+
+  augeas {
+    'blacklist_vga16fb':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] vga16fb",],
+      onlyif    =>"match blacklist[.='vga16fb'] size == 0 ";
+    'blacklist_rivafb':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] rivafb",],
+      onlyif    =>"match blacklist[.='rivafb'] size == 0 ";
+    'blacklist_rivatv':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] rivatv",],
+      onlyif    =>"match blacklist[.='rivatv'] size == 0 ";
+    'blacklist_nvidiafb':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] nvidiafb",],
+      onlyif    =>"match blacklist[.='nvidiafb'] size == 0 ";
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/prometheus.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/prometheus.pp
new file mode 100644
index 0000000..313791a
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/prometheus.pp
@@ -0,0 +1,41 @@
+class env::big::configure_nvidia_gpu::prometheus () {
+
+  case $operatingsystem {
+    'Debian','Ubuntu': {
+
+      env::common::g5kpackages {
+        'nvidia-dcgm-exporter':
+          packages => 'dcgm-exporter',
+          ensure   => $::env::common::software_versions::dcgm_exporter;
+      }
+
+      # Version 2.X bumped the SONAME, so we force version 1.X for now
+      package {
+        'datacenter-gpu-manager':
+          ensure  => $::env::common::software_versions::datacenter_gpu_manager,
+          require => Env::Common::G5kpackages['nvidia-dcgm-exporter'];
+      }
+
+      file{
+        '/etc/systemd/system/dcgm-exporter.service':
+          ensure  => file,
+          owner   => root,
+          group   => root,
+          mode    => '0644',
+          source  => "puppet:///modules/env/big/nvidia/dcgm-exporter.service";
+      }
+      service {
+        'dcgm.service':
+          enable => false,
+          require => Package['datacenter-gpu-manager'];
+        'dcgm-exporter.service':
+          enable => true,
+          require => [File['/etc/systemd/system/dcgm-exporter.service'], 
Package['dcgm-exporter']];
+      }
+    }
+    default: {
+      err "${operatingsystem} not supported."
+    }
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/services.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/services.pp
new file mode 100644
index 0000000..ddd664c
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_nvidia_gpu/services.pp
@@ -0,0 +1,15 @@
+class env::big::configure_nvidia_gpu::services () {
+
+  # We only install the service but do not enable it.
+  # Services that depend on it can add "Wants=nvidia-smi.service"
+  # and "After=nvidia-smi.service", and this will automatically start
+  # this service.
+  file{
+    '/etc/systemd/system/nvidia-smi.service':
+      ensure    => file,
+      owner     => root,
+      group     => root,
+      mode      => '0644',
+      source    => 'puppet:///modules/env/big/nvidia/nvidia-smi.service';
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_postfix.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_postfix.pp
new file mode 100644
index 0000000..600c684
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_postfix.pp
@@ -0,0 +1,31 @@
+class env::big::configure_postfix () {
+
+  if "$::lsbdistcodename" != "stretch" {
+
+    $g5k_sysemail = 'sysadmin@internal.grid5000.fr'
+
+    package {
+      'postfix':
+        ensure  => installed,
+        require => Exec['fix_resolv_conf', 'fix_hostname'],
+        before  => Exec['newaliases', 'set_root_alias'];
+    }
+
+    exec {
+      'fix_resolv_conf':
+        command  => "/bin/sed 's/\\([^\\s]*\\)\\.\\(\\s\\|$\\)/\\1\\2/g' -i 
/etc/resolv.conf";
+      'fix_hostname':
+        command  => "/bin/sed 's/localhost//' -i /etc/hostname";
+      # set root alias to local + internal mailbox
+      'set_root_alias':
+        command  => "if /usr/bin/grep -q ^root: /etc/aliases; then /bin/sed -i 
's/^root:.*/root: root, ${g5k_sysemail}/' /etc/aliases; else /usr/bin/echo 
'root: root, ${g5k_sysemail}' >> /etc/aliases; fi",
+        provider => 'shell';
+      # update aliases database
+      'newaliases':
+        command  => '/usr/bin/newaliases',
+    }
+
+    # Keep default main.cf configuration file
+    # Note that some configs are set with postconf by g5k-postinstall
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_sshfs.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_sshfs.pp
new file mode 100644
index 0000000..0a67aff
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/configure_sshfs.pp
@@ -0,0 +1,13 @@
+class env::big::configure_sshfs {
+
+  file {
+    # for sshfs
+    "/usr/bin/fusermount":
+      mode    => '4755';
+    "/etc/udev/rules.d/40-fuse.rules":
+      mode    => '0644',
+      owner   => root,
+      group   => root,
+      source  => "puppet:///modules/env/base/sshfs/40-fuse.rules";
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_beegfs.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_beegfs.pp
new file mode 100644
index 0000000..e131ec1
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_beegfs.pp
@@ -0,0 +1,112 @@
+class env::big::install_beegfs {
+
+  case "${::lsbdistcodename}" {
+    "stretch" : {
+
+      apt::source { 'beegfs':
+          location     => 'https://www.beegfs.com/release/beegfs_7/',
+          release      => 'deb9',
+          repos        => 'non-free',
+          architecture => 'amd64',
+          key          => {
+              id       => '055D000F1A9A092763B1F0DD14E8E08064497785',
+              source   => 
'https://www.beegfs.io/release/beegfs_7/gpg/DEB-GPG-KEY-beegfs',
+          },
+      }
+      -> package { # client
+          [ 'beegfs-utils', 'beegfs-helperd', 'beegfs-client', 
'linux-headers-amd64', 'beegfs-opentk-lib' ]:
+          require => Class['apt::update'],
+          ensure => installed;
+      }
+      -> service { [ 'beegfs-helperd', 'beegfs-client'] :
+        provider => systemd,
+        enable => false,
+      }
+      -> exec { "beegfs-setup-rdma":
+        command => "/usr/sbin/beegfs-setup-rdma -i on"
+      }
+
+      file { '/etc/beegfs/beegfs-client-autobuild.conf':
+          content => "buildEnabled=true\nbuildArgs=-j8 
BEEGFS_OPENTK_IBVERBS=1\n",
+          require => Package['beegfs-client']
+      }
+      -> exec {
+      '/etc/init.d/beegfs-client rebuild':
+          timeout => 1200,
+          refreshonly => true
+      }
+    }
+
+    "buster" : {
+
+      include env::big::prepare_kernel_module_build
+
+      apt::source { 'beegfs':
+          location     => 'https://www.beegfs.com/release/beegfs_7_1/',
+          release      => 'stretch',
+          repos        => 'non-free',
+          architecture => 'amd64',
+          key          => {
+              id       => '055D000F1A9A092763B1F0DD14E8E08064497785',
+              source   => 
'https://www.beegfs.io/release/beegfs_7/gpg/DEB-GPG-KEY-beegfs',
+          },
+      }
+      -> package { # client
+          [ 'beegfs-utils', 'beegfs-helperd', 'beegfs-client', 'libbeegfs-ib' 
]:
+          require => Class['apt::update'],
+          ensure => installed;
+      }
+      -> service { [ 'beegfs-helperd', 'beegfs-client'] :
+        provider => systemd,
+        enable => false,
+      }
+
+      file { '/etc/beegfs/beegfs-client-autobuild.conf':
+          content => "buildEnabled=true\nbuildArgs=-j8 
BEEGFS_OPENTK_IBVERBS=1\n",
+          require => Package['beegfs-client']
+      }
+      -> exec {
+      '/etc/init.d/beegfs-client rebuild':
+          timeout => 1200,
+          refreshonly => true,
+          require => Exec['prepare_kernel_module_build']
+      }
+    }
+
+    "bullseye" : {
+      # Do not build for now, cf. Bug #13077
+
+#      include env::big::prepare_kernel_module_build
+#
+#      apt::source { 'beegfs':
+#          location     => 'https://www.beegfs.io/release/beegfs_7.2.3',
+#          release      => 'buster', #FIXME : change release to bullseye when 
beegfs release it
+#          repos        => 'non-free',
+#          architecture => 'amd64',
+#          key          => {
+#              id       => '055D000F1A9A092763B1F0DD14E8E08064497785',
+#              source   => 
'https://www.beegfs.io/release/beegfs_7.2.3/gpg/DEB-GPG-KEY-beegfs',
+#          },
+#      }
+#      -> package { # client
+#          [ 'beegfs-utils', 'beegfs-helperd', 'beegfs-client', 'libbeegfs-ib' 
]:
+#          require => Class['apt::update'],
+#          ensure => installed;
+#      }
+#      -> service { [ 'beegfs-helperd', 'beegfs-client'] :
+#        provider => systemd,
+#        enable => false,
+#      }
+#
+#      file { '/etc/beegfs/beegfs-client-autobuild.conf':
+#          content => "buildEnabled=true\nbuildArgs=-j8 
BEEGFS_OPENTK_IBVERBS=1\n",
+#          require => Package['beegfs-client']
+#      }
+#      -> exec {
+#      '/etc/init.d/beegfs-client rebuild':
+#          timeout => 1200,
+#          require => Exec['prepare_kernel_module_build']
+#      }
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_g5k_jupyterlab.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_g5k_jupyterlab.pp
new file mode 100644
index 0000000..8cc0fd5
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_g5k_jupyterlab.pp
@@ -0,0 +1,19 @@
+class env::big::install_g5k_jupyterlab {
+  case $operatingsystem {
+    'Debian': {
+      if "${::lsbdistcodename}" != "stretch" {
+
+        include env::common::software_versions
+
+        env::common::g5kpackages {
+          'g5k-jupyterlab':
+            ensure => $::env::common::software_versions::g5k_jupyterlab;
+        }
+      }
+    }
+    default: {
+      err "${operatingsystem} not supported."
+    }
+  }
+}
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_openmpi.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_openmpi.pp
new file mode 100644
index 0000000..b9af00e
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_openmpi.pp
@@ -0,0 +1,58 @@
+class env::big::install_openmpi () {
+
+  case "${::lsbdistcodename}" {
+
+    "stretch" : {
+      $openmpi_packages = [ 'libopenmpi-dev', 'openmpi-bin' ]
+      $openmpi_deps_packages = [ 'librdmacm1', 'libgfortran3', 'libnuma1', 
'blcr-util', 'libibverbs1-dbg', 'libibverbs-dev', 'libpsm2-dev', 'libhfi1-dev', 
'libopamgt-dev' ]
+      $openmpi_scibian_version = '2.0.2-2sci9+opa10.7u4'
+
+      ensure_packages($openmpi_deps_packages, {
+        ensure => present,
+        require => Class['apt::update']
+      })
+
+      ensure_packages($openmpi_packages, {
+        ensure => $openmpi_scibian_version,
+        require => Class['apt::update']
+      })
+    }
+
+    "buster", "bullseye" : {
+      $openmpi_packages = [ 'libopenmpi-dev', 'openmpi-bin' ]
+      $openmpi_deps_packages = [ 'libnuma1', 'libibverbs-dev' ]
+      $openmpi_opa_packages = [ 'libpsm2-dev', 'libopamgt-dev' ]
+
+      ensure_packages($openmpi_deps_packages, {
+        ensure => present,
+        require => Class['apt::update']
+      })
+
+      if $env::deb_arch == 'amd64' {
+        ensure_packages($openmpi_opa_packages, {
+          ensure => present,
+          require => Class['apt::update']
+        })
+      }
+
+      ensure_packages($openmpi_packages, {
+        ensure => present,
+        require => Class['apt::update']
+      })
+    }
+  }
+
+  if ($::lsbdistcodename == 'buster') {
+    # The 'verbs' OFI provider is broken in OpenMPI 3.1.3. We disable it.
+    # See https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=10918
+    # and https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=941996
+    # https://github.com/open-mpi/ompi/issues/7035
+    # OpenMPI 4.x is not affected, so this can be removed after buster.
+    # This does not affect OpenMPI when loaded using 'module'
+    file_line { 'disable_verbs_ofi_provider':
+      path => '/etc/openmpi/openmpi-mca-params.conf',
+      line => 'mtl_ofi_provider_exclude = shm,sockets,tcp,udp,rstream,verbs',
+      require => Package['openmpi-bin'];
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_prometheus_exporters.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_prometheus_exporters.pp
new file mode 100644
index 0000000..8d3b83a
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_prometheus_exporters.pp
@@ -0,0 +1,8 @@
+class env::big::install_prometheus_exporters {
+
+  package {
+    'prometheus-node-exporter':
+      ensure => installed;
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_smartd.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_smartd.pp
new file mode 100644
index 0000000..3d266d5
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_smartd.pp
@@ -0,0 +1,26 @@
+class env::big::install_smartd {
+
+  package {
+    'smartmontools':
+      ensure => installed;
+  }
+
+  file {
+    '/etc/systemd/system/smartd.service.d/':
+      ensure  => directory,
+      require => Package['smartmontools'];
+    '/etc/systemd/system/smartd.service.d/override.conf':
+      ensure  => present,
+      content => "[Service]\nExecStartPre=mkdir -p /dev/discs",
+      require => File['/etc/systemd/system/smartd.service.d/'];
+  }
+
+  file_line { 'smard.conf':
+    ensure  => present,
+    require => Package['smartmontools'],
+    path    => '/etc/smartd.conf',
+    line    => 'DEVICESCAN -d nvme -d scsi -d ata -d sat -n standby -m root -M 
exec /usr/share/smartmontools/smartd-runner',
+    match   => '^DEVICESCAN .*';
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_snmp_tools.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_snmp_tools.pp
new file mode 100644
index 0000000..76901e6
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/install_snmp_tools.pp
@@ -0,0 +1,15 @@
+class env::big::install_snmp_tools {
+
+  package {
+    'snmp':
+      ensure => installed;
+    'snmp-mibs-downloader':
+      ensure => installed;
+  }
+
+  exec {
+    'conf mibs':
+      command => "/bin/sed -i 's/^mibs/#mibs/' /etc/snmp/snmp.conf",
+      require => Package['snmp'];
+    }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/big/prepare_kernel_module_build.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/prepare_kernel_module_build.pp
new file mode 100644
index 0000000..4d5c978
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/big/prepare_kernel_module_build.pp
@@ -0,0 +1,18 @@
+class env::big::prepare_kernel_module_build {
+
+  # Prepare everything needed to build a custom kernel module.
+  # Installs kernel headers for the latest available kernel, which can be 
different
+  # from the running kernel.
+
+  package {
+    ['module-assistant', 'dkms']:
+      ensure    => installed;
+  }
+
+  exec {
+    'prepare_kernel_module_build':
+      command   => "/usr/bin/m-a prepare -i -l 
${installed_kernelreleases[-1]}",
+      user      => root,
+      require   => Package['module-assistant'];
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/common/apt_pinning.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/common/apt_pinning.pp
new file mode 100644
index 0000000..6cb54eb
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/common/apt_pinning.pp
@@ -0,0 +1,25 @@
+# Define apt_pinning
+# Parameters:
+# Packages to pin
+# Pinned version
+# Priority
+
+define env::common::apt_pinning (
+  $packages = undef,
+  $version = undef,
+  $priority = 1001,
+) {
+
+  if $packages == undef or $version == undef {
+    fail 'Missing required parameter'
+  }
+
+  file {
+    "/etc/apt/preferences.d/${name}.pref":
+      ensure  => file,
+      mode    => '0644',
+      owner   => root,
+      group   => root,
+      content => template('env/common/apt_pinning.erb');
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/common/g5kpackages.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/common/g5kpackages.pp
new file mode 100644
index 0000000..527d7c5
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/common/g5kpackages.pp
@@ -0,0 +1,33 @@
+# This class provides a wrapper for apt::source to make it easier to install 
packages from packages.grid5000.fr
+# the default value for the 'packages' parameter is the name of the resource. 
So, you can use, e.g:
+#  env::common::g5kpackages {
+#   'sudo-g5k':
+#      ensure => $::env::common::software_versions::sudo_g5k;
+#  }
+
+define env::common::g5kpackages (
+  String $source_filename = $name,
+  Variant[Array, String] $packages = $name,
+  String $ensure = installed,
+  String $release = ''
+) {
+  include apt
+
+  apt::source { $source_filename:
+    key      => {
+      'id'      => '3C38BDEAA05D4A7BED7815E5B1F34F56797BF2D1',
+      'content' => file('env/min/apt/grid5000-archive-key.asc')
+    },
+    comment  => "Grid5000 repository for ${name}",
+    location => "http://packages.grid5000.fr/deb/${name}/${release}";,
+    release  => '/',
+    repos    => '',
+    include  => { 'deb' => true, 'src' => false }
+  }
+
+  package {
+    $packages:
+      ensure  => $ensure,
+      require => Class['apt::update']
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/common/software_versions.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/common/software_versions.pp
new file mode 100644
index 0000000..2f968be
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/common/software_versions.pp
@@ -0,0 +1,55 @@
+# This file defines the software versions in use
+
+class env::common::software_versions {
+  $g5k_subnets                 = '1.4.2'
+  $g5k_meta_packages           = '0.7.45'
+  $tgz_g5k                     = '2.0.17'
+  $g5k_checks                  = '0.11.2'
+  $sudo_g5k                    = '1.11'
+  $ganglia_monitor             = '3.6.0-7.1'
+  $libguestfs_backport_arm64   = '1:1.40.2-7~bpog5k10+1'
+  $libguestfs_backport_ppc64el = '1:1.40.2-7~bpog5k10+1'
+  $lmod                        = '6.6-0.3g5k1'
+  $g5k_jupyterlab              = '0.6'
+
+  case "$env::deb_arch" {
+    'amd64': {
+      $nvidia_driver_arch         = 'x86_64'
+      case $lsbdistcodename {
+        'stretch', 'buster': {
+          $nvidia_driver          = '450.119.04'
+          $nvidia_cuda            = '10.1.243_418.87.00_linux'
+          $datacenter_gpu_manager = '1:1.7.2'
+          $dcgm_exporter          = '2.0.0-rc.11'
+        }
+        'bullseye': {
+          $nvidia_driver          = '460.73.01'
+          $nvidia_cuda            = '11.2.2_460.32.03_linux'
+          $datacenter_gpu_manager = '1:2.1.4'
+          $dcgm_exporter          = '2.3.0-1'
+        }
+      }
+    }
+    'ppc64el': {
+      # We are stuck on driver 418 for ppc64.
+      # Newer version of the driver (440.X, 450.X, 460.X) are unstable and 
cause kernel panic.
+      # See https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=12545
+      $nvidia_driver_arch         = 'ppc64le'
+      case $lsbdistcodename {
+        'stretch', 'buster': {
+          $nvidia_driver          = '418.197.02'
+          $nvidia_cuda            = '10.1.243_418.87.00_linux_ppc64le'
+          $datacenter_gpu_manager = '1:1.7.2'
+          $dcgm_exporter          = '2.0.0-rc.11'
+        }
+        'bullseye': {
+          $nvidia_driver          = '418.197.02'
+          $nvidia_user_driver     = '460.73.01'
+          $nvidia_cuda            = '11.2.2_460.32.03_linux_ppc64le'
+          $datacenter_gpu_manager = '1:2.0.15'
+          $dcgm_exporter          = '2.3.0-1'
+        }
+      }
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/commonpackages.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/commonpackages.pp
new file mode 100644
index 0000000..6a4a7c5
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/commonpackages.pp
@@ -0,0 +1,21 @@
+class env::commonpackages{
+
+}
+
+class env::commonpackages::rubyrspec{
+  package{ 'ruby-rspec':
+    ensure => installed;
+  }
+}
+
+class env::commonpackages::rake{
+  package{ 'rake':
+    ensure => installed;
+  }
+}
+
+class env::commonpackages::rsyslog{
+  package{ 'rsyslog':
+    ensure => installed;
+  }
+}
diff --git a/grid5000/steps/data/setup/puppet/modules/env/manifests/init.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/init.pp
new file mode 100644
index 0000000..51fadaf
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/init.pp
@@ -0,0 +1,48 @@
+class env ($given_variant){
+  ## Global variables used for this build
+  # build to be run inside g5k (could become a parameter)
+  $target_g5k = true
+  # build from inside g5k (proxy parameter may be set before running any 
action or network will be unavailable)
+  $from_g5k = false
+
+  ## Variant (min/base/base/nfs/big/std)
+  # need to create a local variable to access it from any sub-recipe.
+  $variant = $given_variant
+  $version = file('env/version')
+
+  ## Define a stage that will be runned after most of normal installation
+  # As an exemple, this is used to setup apt-proxy. If setup earlier, any 
package installation would fail (proxy unreachable)
+  stage { 'g5k_adjustment' :
+    require => Stage['main'];
+  }
+
+  ## Define the Debian architecture name
+  if $architecture == 'aarch64' {
+    $deb_arch = 'arm64'
+    $deb_arch_long = upcase($deb_arch)
+    $g5k_arch = 'arm64'
+  } elsif $architecture == 'amd64' {
+    $deb_arch = $architecture
+    $deb_arch_long = 'AMD64/EM64T'
+    $g5k_arch = 'x64'
+  } elsif $architecture == 'ppc64le' {
+    $deb_arch = 'ppc64el'
+    $deb_arch_long = 'powerpc64le'
+    $g5k_arch = 'ppc64'
+  } else {
+    $deb_arch = $architecture
+    $deb_arch_long = upcase($deb_arch)
+    $g5k_arch = $architecture
+  }
+
+  ## Call the actual recipe
+  case $variant {
+    'min' :  { include env::min }
+    'base':  { include env::base }
+    'xen' :  { include env::xen }
+    'nfs' :  { include env::nfs }
+    'big' :  { include env::big }
+    'std' :  { include env::std }
+    default: { notify {"variant $variant is not implemented":}}
+  }
+}
diff --git a/grid5000/steps/data/setup/puppet/modules/env/manifests/min.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min.pp
new file mode 100644
index 0000000..e4a4dd3
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/min.pp
@@ -0,0 +1,53 @@
+# This file contains the 'min' class used to configure an environment with 
minimal modification to be executed in grid'5000.
+
+class env::min ( $variant = "min", $parent_parameters = {} ) {
+
+  stage { 'last':
+    require => Stage['main'],
+  }
+
+  $min_parameters = {
+    misc_root_pwd => '$1$qzZwnZXQ$Ak1xs7Oma6HUHw/xDJ8q91',
+  }
+  $parameters = merge( $min_parameters, $parent_parameters )
+
+  # Package manager
+  case $operatingsystem {
+    'Debian','Ubuntu': {
+    }
+    'Centos': {
+      class { 'env::min::yum': }
+    }
+    default: {
+      err "${operatingsystem} not suported."
+    }
+  }
+  # Install cpu microcode
+  if $env::deb_arch == 'amd64' {
+    class { 'env::min::install_cpu_microcode': }
+  }
+  # ssh
+  class { 'env::min::install_and_configure_ssh': }
+  # setup
+  class { 'env::min::install_and_configure_locales': }
+  # motd
+  class { 'env::min::generate_etc_motd': }
+  # tgs-g5k
+  class { 'env::min::install_tgz_g5k': }
+  # install meta-packages
+  class { 'env::min::install_metapackage': variant => $variant }
+  # network configuration
+  class { 'env::min::configure_network_and_install_drivers': }
+  # root password
+  class { 'env::min::set_root_password':
+    root_pwd => $parameters['misc_root_pwd'];
+  }
+  # timezone
+  class { 'env::min::set_timezone_to_europe_paris': }
+  # keep tmp
+
+  # kernel installation
+  class { 'env::min::configure_kernel_and_blacklist_some_modules': }
+  # Tagging to recognize images
+  class { 'env::min::add_image_version_in_etc': }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/add_image_version_in_etc.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/add_image_version_in_etc.pp
new file mode 100644
index 0000000..98293db
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/add_image_version_in_etc.pp
@@ -0,0 +1,18 @@
+# Marking images for debug purpose and to help kadeploy and pre/post-install 
to recognize images
+
+class env::min::add_image_version_in_etc () {
+
+  file {
+    '/etc/grid5000':
+      ensure   => directory,
+      mode     => '0755',
+      owner    => root,
+      group    => root;
+    "/etc/grid5000/release":
+      ensure   => file,
+      mode     => '0644',
+      owner    => root,
+      source   => 'puppet:///modules/env/min/image_versioning/release',
+      group    => root;
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/configure_kernel_and_blacklist_some_modules.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/configure_kernel_and_blacklist_some_modules.pp
new file mode 100644
index 0000000..f32ca3e
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/configure_kernel_and_blacklist_some_modules.pp
@@ -0,0 +1,18 @@
+class env::min::configure_kernel_and_blacklist_some_modules {
+
+  # Install kernel : not required here. Kameleon set-up the kernel because it 
is required to have SSH access on the build VM (only access way for virtualbox 
backend)
+
+  # Setup links: creates symlink /vmlinuz and /initrd pointing to real files 
in /boot
+  include env::min::kernel::setup_links
+
+  # blacklist undesired module and regenerate initramfs
+  include env::min::kernel::modules
+
+  # initramfs regeneration declaration
+  include env::min::kernel::initramfs
+
+  # Remove old kernel if exist: it can happen that the running kernel (the 
installer's one) is not the most recent (installed after upgrade)
+  class { 'env::min::kernel::remove_old':
+    stage => last,
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/configure_network_and_install_drivers.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/configure_network_and_install_drivers.pp
new file mode 100644
index 0000000..fc3d36b
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/configure_network_and_install_drivers.pp
@@ -0,0 +1,32 @@
+class env::min::configure_network_and_install_drivers {
+
+  # Network configuration
+  file {
+    '/etc/hosts':
+        owner  => "root",
+        group  => "root",
+        mode   => '0644',
+        source => "puppet:///modules/env/min/network/hosts";
+    '/etc/dhcp/dhclient-exit-hooks.d/g5k-update-host-name':
+        owner  => "root",
+        group  => "root",
+        mode   => '0644',
+        source => "puppet:///modules/env/min/network/g5k-update-host-name";
+  }
+
+  # Network driver for many dell server and arm pyxi cluster (qlogic)
+  case $operatingsystem {
+    'Debian': {
+      $drivers = ['firmware-bnx2x', 'firmware-bnx2', 'firmware-qlogic']
+    }
+    'Ubuntu': {
+      $drivers = ['linux-firmware']
+    }
+  }
+
+  package {
+    $drivers:
+      ensure   => installed;
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/generate_etc_motd.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/generate_etc_motd.pp
new file mode 100644
index 0000000..b1779da
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/generate_etc_motd.pp
@@ -0,0 +1,26 @@
+class env::min::generate_etc_motd {
+
+  case "${::lsbdistcodename}" {
+    'bullseye': {
+      $userdistribname = "debian11"
+    }
+    'buster': {
+      $userdistribname = "debian10"
+    }
+    'stretch': {
+      $userdistribname = "debian9"
+    }
+    default: {
+      $userdistribname = "${::lsbdistcodename}"
+    }
+  }
+
+  file {
+    '/etc/motd':
+      ensure  => file,
+      owner   => root,
+      group   => root,
+      content => template('env/min/motd.erb'),
+      mode    => '0755';
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_and_configure_locales.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_and_configure_locales.pp
new file mode 100644
index 0000000..5d1f7a6
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_and_configure_locales.pp
@@ -0,0 +1,26 @@
+class env::min::install_and_configure_locales {
+
+  file {
+    "/etc/locale.gen":
+      mode    => '0644',
+      owner   => root,
+      group   => root,
+      source  => "puppet:///modules/env/min/locales/locale.gen",
+      notify  => Exec['generate-locales'];
+    "/etc/default/locale":
+      mode    => '0644',
+      owner   => root,
+      group   => root,
+      source  => "puppet:///modules/env/min/locales/locale";
+  }
+  package {
+    'locales':
+      ensure  => installed;
+  }
+  exec {
+    'generate-locales':
+      command  => '/usr/sbin/locale-gen',
+      user     => root,
+      require  => Package['locales'];
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_and_configure_ssh.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_and_configure_ssh.pp
new file mode 100644
index 0000000..9880eb9
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_and_configure_ssh.pp
@@ -0,0 +1,58 @@
+class env::min::install_and_configure_ssh {
+
+  case $operatingsystem {
+    'Debian','Ubuntu': {
+
+      package {
+        'ssh server':
+          name => 'openssh-server',
+          ensure => present;
+      }
+
+      service {
+        'ssh':
+          name   => 'ssh',
+          ensure => running;
+      }
+
+    }
+
+    'Centos': {
+
+      package {
+        'ssh server':
+          name => 'sshd',
+          ensure => present;
+      }
+
+      service {
+        'ssh':
+          name => 'sshd',
+          ensure => running;
+      }
+
+    }
+  }
+
+  package {
+    'ssh client':
+      name => 'openssh-client',
+      ensure => present;
+  }
+
+  augeas {
+    'sshd_config_min':
+      incl    => '/etc/ssh/sshd_config',
+      lens    => 'Sshd.lns',
+      changes => [
+        'set /files/etc/ssh/sshd_config/PermitUserEnvironment yes',
+        'set /files/etc/ssh/sshd_config/MaxStartups 500'
+      ],
+      require  => Package['ssh server'];
+  }
+  # Todo: 'check that key files are overwritten by postinstall'
+
+  Augeas['sshd_config_min'] ~> Service['ssh']
+
+}
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_cpu_microcode.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_cpu_microcode.pp
new file mode 100644
index 0000000..a1f6ff0
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_cpu_microcode.pp
@@ -0,0 +1,31 @@
+class env::min::install_cpu_microcode {
+
+  package {
+    ['intel-microcode','amd64-microcode']:
+      ensure => installed;
+  }
+
+  file {
+    '/etc/default/intel-microcode':
+      ensure  => file,
+      owner   => 'root',
+      group   => 'root',
+      mode    => '644',
+      source  => 'puppet:///modules/env/min/cpu_microcode/intel-microcode',
+      require => Package['intel-microcode'];
+    '/etc/default/amd64-microcode':
+      ensure  => file,
+      owner   => 'root',
+      group   => 'root',
+      mode    => '644',
+      source  => 'puppet:///modules/env/min/cpu_microcode/amd64-microcode',
+      require => Package['amd64-microcode'];
+  }
+
+  exec {
+    'update_initramfs':
+      command => '/usr/sbin/update-initramfs -u',
+      require => 
File['/etc/default/intel-microcode','/etc/default/amd64-microcode'],
+      refreshonly => true;
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_metapackage.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_metapackage.pp
new file mode 100644
index 0000000..d303c7e
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_metapackage.pp
@@ -0,0 +1,45 @@
+class env::min::install_metapackage ( $variant ) {
+
+  include stdlib
+  include env::common::software_versions
+
+  case $operatingsystem {
+    'Debian','Ubuntu': {
+      case "${::lsbdistcodename}" {
+        'bullseye': {
+          $base = "g5k-meta-packages-debian11"
+        }
+        'buster': {
+          $base = "g5k-meta-packages-debian10"
+        }
+        'stretch': {
+          $base = "g5k-meta-packages-debian9"
+        }
+        default: {
+          $base = "g5k-meta-packages-${::lsbdistcodename}"
+        }
+      }
+    }
+    default: {
+      err "${operatingsystem} not supported."
+    }
+  }
+
+  $g5kmetapackages = "${base}-${variant}"
+
+  $pinned = join(['min', 'base', 'nfs','big'].map |$env| { "${base}-${env}" 
}," ")
+
+  env::common::apt_pinning {
+    'g5k-meta-packages':
+      packages => $pinned,
+      version => $::env::common::software_versions::g5k_meta_packages
+  }
+
+  env::common::g5kpackages {
+    'g5k-meta-packages':
+      packages => $g5kmetapackages,
+      ensure   => $::env::common::software_versions::g5k_meta_packages,
+      require  => Env::Common::Apt_pinning['g5k-meta-packages'];
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_tgz_g5k.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_tgz_g5k.pp
new file mode 100644
index 0000000..ef33719
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/install_tgz_g5k.pp
@@ -0,0 +1,17 @@
+class env::min::install_tgz_g5k {
+  case $operatingsystem {
+    'Debian','Ubuntu': {
+
+      include env::common::software_versions
+
+      env::common::g5kpackages {
+        'tgz-g5k':
+          ensure => $::env::common::software_versions::tgz_g5k;
+      }
+    }
+    default: {
+      err "${operatingsystem} not suported."
+    }
+  }
+}
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/initramfs.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/initramfs.pp
new file mode 100644
index 0000000..720a714
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/initramfs.pp
@@ -0,0 +1,8 @@
+class env::min::kernel::initramfs {
+
+  exec {
+    'generate_initramfs':
+      command     => "/usr/sbin/update-initramfs -u -k all",
+      refreshonly => true;
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/modules.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/modules.pp
new file mode 100644
index 0000000..47e5cbe
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/modules.pp
@@ -0,0 +1,52 @@
+class env::min::kernel::modules {
+
+  # Blacklist modules
+  file {
+    '/etc/modprobe.d/blacklist.conf':
+      ensure => 'file',
+  }
+  augeas {
+    'blacklist_nouveau':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] nouveau",],
+      onlyif    =>"match blacklist[.='nouveau'] size == 0 ";
+    'blacklist_myri10ge':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] myri10ge",],
+      onlyif    =>"match blacklist[.='myri10ge'] size == 0 ";
+    'blacklist_usb_storage':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] usb_storage",],
+      onlyif    =>"match blacklist[.='usb_storage'] size == 0 ";
+    'blacklist_usbhid':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] usbhid",],
+      onlyif    =>"match blacklist[.='usbhid'] size == 0 ";
+    'blacklist_ohci_hcd':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] ohci_hcd",],
+      onlyif    =>"match blacklist[.='ohci_hcd'] size == 0 ";
+    'blacklist_ehci_hcd':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] ehci_hcd",],
+      onlyif    =>"match blacklist[.='ehci_hcd'] size == 0 ";
+    'blacklist_usbcore':
+      context   => "/files/etc/modprobe.d/blacklist.conf",
+      tag       => "modules",
+      changes   =>["set blacklist[last()+1] usbcore",],
+      onlyif    =>"match blacklist[.='usbcore'] size == 0 ";
+
+  }
+
+  # Retrieve all modules tag and regenerate initramfs
+  # This allow another manifest to modify blacklist.conf
+  # or another blacklist file and benefit from this refresh.
+  # It only needs to tag Augeas with 'modules' tag.
+  Augeas <| tag == "modules" |> ~> Exec['generate_initramfs']
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/remove_old.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/remove_old.pp
new file mode 100644
index 0000000..15118e2
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/remove_old.pp
@@ -0,0 +1,17 @@
+class env::min::kernel::remove_old {
+  # Remove the current kernel if it's not the last one
+  if $kernelrelease != $installed_kernelreleases[-1] {
+    package { "linux-image-$kernelrelease":
+      ensure => 'purged'
+    }
+
+    file {
+      "/lib/modules/$kernelrelease":
+        ensure => absent,
+        force  => true;
+      "/usr/lib/modules/$kernelrelease":
+        ensure => absent,
+        force  => true;
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/setup_links.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/setup_links.pp
new file mode 100644
index 0000000..66f2074
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/kernel/setup_links.pp
@@ -0,0 +1,48 @@
+class env::min::kernel::setup_links {
+  # Ensure the kernel symlink will be installed in /
+  file_line {
+    '/etc/kernel-img.conf: update kernel symlink behavior to link in /':
+      path     => '/etc/kernel-img.conf',
+      line     => "link_in_boot = no",
+      match    => '^link_in_boot =',
+      before   => [
+        Exec['linux-update-symlinks-vmlinuz'],
+        Exec['linux-update-symlinks-vmlinux'],
+        ];
+  }
+
+  # Ensure symlinks to /boot are removed
+  file {
+    '/boot/vmlinuz':
+      path     => '/boot/vmlinuz',
+      ensure   => absent;
+    '/boot/vmlinuz.old':
+      path     => '/boot/vmlinuz.old',
+      ensure   => absent;
+    '/boot/vmlinux':
+      path     => '/boot/vmlinux',
+      ensure   => absent;
+    '/boot/vmlinux.old':
+      path     => '/boot/vmlinux.old',
+      ensure   => absent;
+    '/boot/initrd.img':
+      path     => '/boot/initrd.img',
+      ensure   => absent;
+    '/boot/initrd.img.old':
+      path     => '/boot/initrd.img.old',
+      ensure   => absent;
+  }
+
+  # Setup symlink for initrd and vmlinuz/vmlinux
+  exec {
+    'linux-update-symlinks-vmlinuz':
+      onlyif  => "/usr/bin/test -e /boot/vmlinuz-${kernelrelease}",
+      command => "/usr/bin/linux-update-symlinks install ${kernelrelease} 
/boot/vmlinuz-${kernelrelease}";
+  }
+  exec {
+    'linux-update-symlinks-vmlinux':
+      onlyif  => "/usr/bin/test -e /boot/vmlinux-${kernelrelease}",
+      command => "/usr/bin/linux-update-symlinks install ${kernelrelease} 
/boot/vmlinux-${kernelrelease}";
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/set_root_password.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/set_root_password.pp
new file mode 100644
index 0000000..9b39c08
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/set_root_password.pp
@@ -0,0 +1,10 @@
+class env::min::set_root_password ($root_pwd = 
'$1$qzZwnZXQ$Ak1xs7Oma6HUHw/xDJ8q91') {
+
+  # Set root password
+  user {
+    'root':
+      ensure   => 'present',
+      password => $root_pwd;
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/min/set_timezone_to_europe_paris.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/set_timezone_to_europe_paris.pp
new file mode 100644
index 0000000..4725285
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/min/set_timezone_to_europe_paris.pp
@@ -0,0 +1,9 @@
+class env::min::set_timezone_to_europe_paris {
+
+  # Set timezone
+  file {
+    '/etc/localtime':
+      ensure => link,
+      target => '/usr/share/zoneinfo/Europe/Paris',
+  }
+}
diff --git a/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs.pp
new file mode 100644
index 0000000..b824c72
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs.pp
@@ -0,0 +1,31 @@
+# This file contains the 'nfs' class used to configure a basic environment 
with nfs support to be executed in grid'5000.
+
+class env::nfs ( $variant = "nfs", $parent_parameters = {} ){
+  $nfs_parameters = {
+    ntp_drift_file => false
+  }
+  $parameters = merge( $nfs_parameters, $parent_parameters )
+  # Include base class
+  class {
+    'env::base':
+      variant => $variant,
+      parent_parameters => $parameters;
+  }
+  # Openiscsi (storage5k)
+  class { 'env::nfs::configure_iscsi': }
+  # ntp (required by nfs)
+  class {
+    'env::nfs::configure_ntp':
+      drift_file => $parameters['ntp_drift_file']
+  }
+  # ldap
+  class { 'env::nfs::configure_ldap': }
+  # nfs
+  class { 'env::nfs::install_nfs_requirements': }
+  # storage5k required
+  class { 'env::nfs::install_storage5k_requirements': }
+  # osirim
+  class { 'env::nfs::install_osirim_requirements': }
+  # module spack
+  class { 'env::nfs::configure_module_path': }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_iscsi.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_iscsi.pp
new file mode 100644
index 0000000..2f8b465
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_iscsi.pp
@@ -0,0 +1,29 @@
+class env::nfs::configure_iscsi (){
+
+  # used by storage5k (bug #4309)
+
+  package {
+    'open-iscsi':
+      ensure  => installed;
+  }
+
+  file {
+    '/etc/udev/rules.d/55-openiscsi.rules':
+      owner   => root,
+      group   => root,
+      mode    => '0644',
+      source  => 'puppet:///modules/env/nfs/openiscsi/55-openiscsi.rules';
+    '/etc/udev/scripts':
+      ensure  => "directory",
+      owner   => root,
+      group   => root,
+      mode    => '0755';
+    '/etc/udev/scripts/iscsidev.sh':
+      ensure  => present,
+      owner   => root,
+      group   => root,
+      mode    => '0755',
+      source  => 'puppet:///modules/env/nfs/openiscsi/iscsidev.sh',
+      require => File['/etc/udev/scripts'];
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_ldap.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_ldap.pp
new file mode 100644
index 0000000..4218a9a
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_ldap.pp
@@ -0,0 +1,89 @@
+class env::nfs::configure_ldap () {
+
+  # Contains configuration to have ldap authentication working (ldap, nss, 
pam, nscd...)
+
+  $ldap_packages = [ libnss-ldapd, libpam-ldapd, nslcd ]
+
+  package {
+    $ldap_packages:
+      ensure   => installed;
+  }
+
+  file {
+    '/etc/ldap/ldap.conf':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/nfs/ldap/ldap.conf';
+    '/etc/ldap/certificates':
+      ensure   => directory,
+      owner    => root,
+      group    => root,
+      mode     => '0755';
+    '/etc/ldap/certificates/ca2019.grid5000.fr.cert':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/nfs/ldap/ca2019.grid5000.fr.cert',
+      require  => File['/etc/ldap/certificates'];
+    '/etc/nsswitch.conf':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/nfs/ldap/nsswitch.conf';
+    '/etc/libnss-ldap.conf':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/nfs/ldap/libnss-ldap.conf';
+    '/etc/pam_ldap.conf':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/nfs/ldap/libnss-ldap.conf';
+    '/etc/pam.d/common-account':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      content   => template('env/nfs/ldap/common-account.erb');
+    '/etc/pam.d/common-auth':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/nfs/ldap/common-auth';
+    '/etc/pam.d/common-password':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/nfs/ldap/common-password';
+    '/etc/nscd.conf':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/nfs/ldap/nscd.conf',
+      notify   => Service['nscd'];
+    '/etc/nslcd.conf':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/nfs/ldap/nslcd.conf',
+      notify   => Service['nslcd'];
+  }
+
+  service {
+    'nscd':
+      ensure   => running;
+    'nslcd':
+      ensure   => running;
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_module_path.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_module_path.pp
new file mode 100644
index 0000000..e1d0451
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_module_path.pp
@@ -0,0 +1,23 @@
+class env::nfs::configure_module_path () {
+
+  # Configure module path (installed in g5k-metapackage)
+  case "$env::deb_arch" {
+    "amd64": {
+      $modulespath = 
"/grid5000/spack/share/spack/modules/linux-debian9-x86_64\n/grid5000/spack/share/spack/modules/linux-debian10-x86_64\n"
+    }
+    "ppc64el": {
+      $modulespath = 
"/grid5000/spack/share/spack/modules/linux-debian10-ppc64le\n"
+    }
+    default: {
+      $modulespath = ""
+    }
+  }
+
+  file {
+    '/etc/lmod/modulespath':
+      ensure   => file,
+      backup   => '.puppet-bak',
+      content  => $modulespath,
+      require  => Env::Common::G5kpackages['g5k-meta-packages'];
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_ntp.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_ntp.pp
new file mode 100644
index 0000000..602c565
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/configure_ntp.pp
@@ -0,0 +1,40 @@
+class env::nfs::configure_ntp ( $drift_file = false ) {
+
+  $ntp = [ 'ntp', 'ntpdate' ]
+  package {
+    'ntpdate':
+      ensure    => installed;
+    'ntp':
+      ensure    => installed,
+      require   => Package['openntpd'];
+    'openntpd':
+      ensure    => absent;
+  } # Here we forced ntp package to be 'ntp' and not 'openntp' because ntp is 
listed as dependencie by g5kchecks and conflict openntp.
+
+  file {
+    '/etc/ntp.conf':
+      ensure    => file,
+      owner     => root,
+      group     => root,
+      mode      => '0644',
+      content   => template("env/nfs/ntp/ntp.conf.erb"),
+      notify    => Service['ntp'];
+  }
+
+  if $drift_file {
+    file {
+      '/var/lib/ntp/ntp.drift':
+        ensure    => file,
+        owner     => ntp,
+        group     => ntp,
+        mode      => '0644',
+        content   => "",
+        require   => Package[$ntp];
+    }
+  }
+
+  service {
+    'ntp':
+      enable    => true;
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/install_nfs_requirements.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/install_nfs_requirements.pp
new file mode 100644
index 0000000..f2c1385
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/install_nfs_requirements.pp
@@ -0,0 +1,18 @@
+class env::nfs::install_nfs_requirements () {
+
+  package {
+    'nfs-common':
+      ensure   => installed;
+    'libcap2-bin':
+      ensure   => installed;
+  }
+
+  if "${::lsbdistcodename}" == "bullseye" {
+    # Force python3 usage instead of python2 (only 'python' is Recommended by 
nfs-common package)
+    # see bug #13194
+    package {
+      'python-is-python3':
+        ensure => installed;
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/install_osirim_requirements.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/install_osirim_requirements.pp
new file mode 100644
index 0000000..8485b44
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/install_osirim_requirements.pp
@@ -0,0 +1,42 @@
+class env::nfs::install_osirim_requirements () {
+
+  package {
+    'autofs':
+      ensure   => installed;
+  }
+
+  service {
+    'autofs':
+      ensure   => running,
+      require  => Package['autofs'];
+  }
+
+  file {
+    '/srv/osirim':
+      ensure   => directory,
+      owner    => root,
+      group    => root,
+      mode     => '0755';
+    '/etc/auto.master.d':
+      ensure   => directory,
+      owner    => root,
+      group    => root,
+      mode     => '0755';
+    '/etc/auto.master.d/osirim.autofs':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      content  => '/srv/osirim /etc/auto.osirim --timeout=60',
+      require  => File['/etc/auto.master.d'],
+      notify   => Service['autofs'];
+    '/etc/auto.osirim':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      content  => '* 
-fstype=nfs,rw,nfsvers=3,hard,intr,async,noatime,nodev,nosuid,auto,rsize=32768,wsize=32768
 osirim.toulouse.grid5000.fr:/ifs/grid5000/data/home/&',
+      require  => File['/srv/osirim'],
+      notify   => Service['autofs'];
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/install_storage5k_requirements.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/install_storage5k_requirements.pp
new file mode 100644
index 0000000..eb11500
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/nfs/install_storage5k_requirements.pp
@@ -0,0 +1,8 @@
+class env::nfs::install_storage5k_requirements {
+
+  #Package required by storage5k
+  package {
+    "libdbd-pg-perl":
+      ensure  => installed;
+  }
+}
diff --git a/grid5000/steps/data/setup/puppet/modules/env/manifests/std.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std.pp
new file mode 100644
index 0000000..99d3611
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/std.pp
@@ -0,0 +1,58 @@
+# This file contains the 'std' class used to configure the standard 
environment to be executed in grid'5000.
+
+class env::std ( $variant = "big", $parent_parameters = {} ){
+
+  if $env::target_g5k {
+    $root_pwd = lookup("env::std::misc::rootpwd")
+  }
+  else {
+    $root_pwd = '$1$qzZwnZXQ$Ak1xs7Oma6HUHw/xDJ8q91' # grid5000
+  }
+
+  $std_parameters = {
+    ganglia_enable => true,
+    ntp_drift_file => true,
+    misc_keep_tmp  => false,
+    misc_root_pwd  => $root_pwd,
+    mic_enable     => true,
+  }
+
+  $parameters = merge( $std_parameters, $parent_parameters )
+
+  # Include big class
+  class {
+    'env::big':
+      variant => $variant,
+      parent_parameters => $parameters;
+  }
+  # OAR
+  class { 'env::std::configure_oar_client': }
+  # g5kchecks (+ ipmitool)
+  class { 'env::std::install_g5kchecks': }
+  # g5kcode
+  class { 'env::std::add_g5kcode_to_path': }
+  # g5k-subnets
+  class { 'env::std::install_g5ksubnets': }
+  # Log net access
+  class { 'env::std::configure_rsyslog_remote': }
+  # sudo-g5k
+  class { 'env::std::install_sudog5k': }
+  if $env::deb_arch == 'amd64' {
+    # megacli (RAID controler)
+    class { 'env::std::install_megacli': }
+    # g5k systemd generator
+    class { 'env::std::g5k_generator': }
+    # g5k-disk-manager-backend
+    class { 'env::std::configure_g5kdiskmanagerbackend': }
+    # g5k-pmem-manager
+    class { 'env::std::configure_g5kpmemmanager': }
+    # nvidia-reset-mig
+    class { 'env::std::nvidia_reset_mig': }
+  }
+  # disable lvm pvscan (bug 9453)
+  class { 'env::std::disable_lvm_pvscan': }
+  # Install backported libguestfs-tools from g5k packages on non-x86 arch
+  if $env::deb_arch == 'arm64' or $env::deb_arch == 'ppc64el' {
+    class { 'env::std::install_libguestfs_backport': }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/add_g5kcode_to_path.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/add_g5kcode_to_path.pp
new file mode 100644
index 0000000..ef7ec33
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/add_g5kcode_to_path.pp
@@ -0,0 +1,16 @@
+class env::std::add_g5kcode_to_path {
+
+  file {
+    '/root/.ssh':
+      ensure  => directory,
+      owner   => root,
+      group   => root,
+      mode    => '0700';
+  }
+
+  # Sounds dirty as fuck, but Augeas does not manage /etc/profile which is a 
bash file, and not a real configuration file (or I'm really bad with Augeas).
+  file_line { 'g5kcode_etc_profile_path':
+    path => '/etc/profile',
+    line => 'export PATH=$PATH:/grid5000/code/bin';
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_g5kdiskmanagerbackend.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_g5kdiskmanagerbackend.pp
new file mode 100644
index 0000000..9ec247b
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_g5kdiskmanagerbackend.pp
@@ -0,0 +1,33 @@
+class env::std::configure_g5kdiskmanagerbackend {
+
+  require env::std::install_hwraid_apt_source
+  require env::std::configure_g5kmanager
+
+  case $operatingsystem {
+    'Debian': {
+      case "${::lsbdistcodename}" {
+        "stretch", "buster", "bullseye" : {
+          file {
+            '/etc/systemd/system/g5k-disk-manager-backend.service':
+              source => 
'puppet:///modules/env/std/g5k-manager/g5k-disk-manager-backend.service',
+              ensure => file;
+            '/usr/local/libexec/g5k-disk-manager-backend':
+              source => 
'puppet:///modules/env/std/g5k-manager/g5k-disk-manager-backend',
+              mode => '0755',
+              ensure => file;
+            
'/etc/systemd/system/multi-user.target.wants/g5k-disk-manager-backend.service':
+              ensure => link,
+              target => '/etc/systemd/system/g5k-disk-manager-backend.service';
+          }
+        }
+        default : {
+          err "${::lsbdistcodename} not supported."
+        }
+      }
+    }
+    default : {
+      err "${operatingsystem} not supported."
+    }
+  }
+}
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_g5kmanager.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_g5kmanager.pp
new file mode 100644
index 0000000..d6740f5
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_g5kmanager.pp
@@ -0,0 +1,33 @@
+class env::std::configure_g5kmanager {
+  case $operatingsystem {
+    'Debian': {
+      case "${::lsbdistcodename}" {
+        "stretch", "buster", "bullseye" : {
+          file {
+            '/usr/local/libexec/':
+              ensure   => directory,
+              mode     => '0755',
+              owner    => 'root',
+              group    => 'root';
+            '/usr/local/lib/g5k/':
+              ensure   => directory,
+              mode     => '0755',
+              owner    => 'root',
+              group    => 'root';
+            '/usr/local/lib/g5k/g5k-manager.rb':
+              source => 
'puppet:///modules/env/std/g5k-manager/lib/g5k-manager.rb',
+              mode   => '0755',
+              ensure => file;
+          }
+        }
+        default : {
+          err "${::lsbdistcodename} not supported."
+        }
+      }
+    }
+    default : {
+      err "${operatingsystem} not supported."
+    }
+  }
+}
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_g5kpmemmanager.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_g5kpmemmanager.pp
new file mode 100644
index 0000000..bde3f08
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_g5kpmemmanager.pp
@@ -0,0 +1,32 @@
+class env::std::configure_g5kpmemmanager {
+
+  require env::std::configure_g5kmanager
+
+  case $operatingsystem {
+    'Debian': {
+      case "${::lsbdistcodename}" {
+        "buster", "bullseye" : {
+          file {
+            '/etc/systemd/system/g5k-pmem-manager.service':
+              source => 
'puppet:///modules/env/std/g5k-manager/g5k-pmem-manager.service',
+              ensure => file;
+            '/usr/local/libexec/g5k-pmem-manager':
+              source => 
'puppet:///modules/env/std/g5k-manager/g5k-pmem-manager',
+              mode => '0755',
+              ensure => file;
+            
'/etc/systemd/system/multi-user.target.wants/g5k-pmem-manager.service':
+              ensure => link,
+              target => '/etc/systemd/system/g5k-pmem-manager.service';
+          }
+        }
+        default : {
+          err "${::lsbdistcodename} not supported."
+        }
+      }
+    }
+    default : {
+      err "${operatingsystem} not supported."
+    }
+  }
+}
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_oar_client.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_oar_client.pp
new file mode 100644
index 0000000..d73964d
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_oar_client.pp
@@ -0,0 +1,247 @@
+class env::std::configure_oar_client {
+
+  $oar_packages = ['oar-common', 'oar-node']
+
+  if "$operatingsystem" == "Debian" {
+    # Can specify oar client version below
+    case "${::lsbdistcodename}" {
+      'stretch' : {
+        $oar_version       = "2.5.8~rc8-1~bpo9+1";
+        $oar_repos         = "2.5/debian/";
+        $oar_repos_release = "stretch-backports_beta"
+      }
+      'buster' : {
+        $oar_version       = "2.5.10~g5k8-1";
+        $oar_repos         = "g5k"
+      }
+      'bullseye' : {
+        $oar_version       = "2.5.10~g5k8-1";
+        $oar_repos         = "g5k"
+      }
+      default : {
+        err "${::lsbdistcodename} not supported."
+      }
+    }
+  }
+
+  if ($oar_repos == "default") {
+    package {
+      'oar-common':
+        ensure   => $oar_version,
+        require  => Package["liboar-perl"];
+      'oar-node':
+        ensure   => $oar_version,
+        require  => Package["liboar-perl"];
+      'liboar-perl':
+        ensure   => $oar_version;
+    }
+  } elsif ($oar_repos == "g5k") {
+    env::common::g5kpackages {
+      "oar/${::lsbdistcodename}":
+        source_filename => 'oar',
+        packages        => ['liboar-perl', 'oar-common', 'oar-node'],
+        ensure          => $oar_version
+    }
+  } else {
+    apt::source {
+      'oar-repo':
+        location => "http://oar-ftp.imag.fr/oar/$oar_repos";,
+        release  => "$oar_repos_release",
+        repos    => 'main',
+        notify   => Exec['oar apt update'],
+        require  => Exec["import oar gpg key"],
+    }
+    exec {
+      "import oar gpg key":
+        command => "/usr/bin/wget -q http://oar-ftp.imag.fr/oar/oarmaster.asc 
-O- | /usr/bin/apt-key add -",
+        unless  => "/usr/bin/apt-key list | /bin/grep oar",
+    }
+    exec {
+      "oar apt update":
+        command => "/usr/bin/apt-get update",
+    }
+    package {
+      'oar-common':
+        ensure          => $oar_version,
+        install_options => ['-t', "$oar_repos_release"],
+        require         => [ Package["liboar-perl"], Apt::Source['oar-repo'] ];
+      'oar-node':
+        ensure          => $oar_version,
+        install_options => ['-t', "$oar_repos_release"],
+        require         => [ Package["liboar-perl"], Apt::Source['oar-repo'] ];
+      'liboar-perl':
+        ensure          => $oar_version,
+        install_options => ['-t', "$oar_repos_release"],
+        require         => Apt::Source['oar-repo'];
+    }
+  }
+
+  if ($oar_version != "installed") {
+    apt::pin { 'oar client pin':
+      packages => [ 'oar-common', 'oar-node', 'liboar-perl' ],
+      version  => $oar_version,
+      priority => 1001,
+    }
+  }
+
+  $hiera   = lookup("env::std::oar::ssh")
+  file {
+    '/var/lib/oar/checklogs/':
+      ensure   => directory,
+      owner    => root,
+      group    => root,
+      mode     => '0755',
+      require  => Package[$oar_packages];
+    '/var/lib/oar/.ssh':
+      ensure   => directory,
+      owner    => oar,
+      group    => oar,
+      mode     => '0755',
+      require  => Package[$oar_packages];
+    '/var/lib/oar/.ssh/config':
+      ensure   => present,
+      owner    => oar,
+      group    => oar,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/std/oar/oar_sshclient_config',
+      require  => [ File['/var/lib/oar/.ssh'], Package[$oar_packages] ];
+    '/etc/oar/oar_ssh_host_dsa_key':
+      ensure   => present,
+      owner    => root,
+      group    => root,
+      mode     => '0600',
+      content  => $hiera['oar_ssh_host_dsa_key'],
+      require  => Package[$oar_packages];
+    '/etc/oar/oar_ssh_host_rsa_key':
+      ensure   => present,
+      owner    => root,
+      group    => root,
+      mode     => '0600',
+      content  => $hiera['oar_ssh_host_rsa_key'],
+      require  => Package[$oar_packages];
+    '/etc/oar/oar_ssh_host_dsa_key.pub':
+      ensure   => present,
+      owner    => root,
+      group    => root,
+      mode     => '0600',
+      content  => $hiera['oar_ssh_host_dsa_key_pub'],
+      require  => Package[$oar_packages];
+    '/etc/oar/oar_ssh_host_rsa_key.pub':
+      ensure   => present,
+      owner    => root,
+      group    => root,
+      mode     => '0600',
+      content  => $hiera['oar_ssh_host_rsa_key_pub'],
+      require  => Package[$oar_packages];
+    '/var/lib/oar/.batch_job_bashrc':
+      ensure   => present,
+      owner    => oar,
+      group    => oar,
+      mode     => '0755',
+      source   => 'puppet:///modules/env/std/oar/batch_job_bashrc',
+      require  => Package[$oar_packages];
+    '/etc/security/access.conf':
+      ensure   => present,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/std/oar/etc/security/access.conf',
+      require  => Package[$oar_packages];
+    '/var/lib/oar/access.conf':
+      ensure   => present,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/std/oar/var/lib/oar/access.conf',
+      require  => Package[$oar_packages];
+    '/etc/oar/sshd_config':
+      ensure   => present,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => '/etc/ssh/sshd_config',
+      require  => Package[$oar_packages, 'ssh server'];
+    '/var/lib/oar/.ssh/authorized_keys':
+      ensure   => present,
+      owner    => oar,
+      group    => oar,
+      mode     => '0644',
+      content  => $hiera['oar_authorized_keys'],
+      require  => Package[$oar_packages];
+    '/etc/default/oar-node':
+      ensure   => present,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/std/oar/default_oar-node',
+      require  => Package[$oar_packages];
+  }
+
+  augeas {
+    'sshd_config_oar':
+      incl    => '/etc/oar/sshd_config',
+      lens    => 'Sshd.lns',
+      changes => [
+        'set /files/etc/oar/sshd_config/Port 6667',
+        'set /files/etc/oar/sshd_config/HostKey /etc/oar/oar_ssh_host_rsa_key',
+        'set /files/etc/oar/sshd_config/LoginGraceTime 10m',
+        'set /files/etc/oar/sshd_config/PermitRootLogin no',
+        'set /files/etc/oar/sshd_config/PasswordAuthentication no',
+        'set /files/etc/oar/sshd_config/ChallengeResponseAuthentication no',
+        'set /files/etc/oar/sshd_config/UsePAM yes',
+        'set /files/etc/oar/sshd_config/X11Forwarding yes',
+        'set /files/etc/oar/sshd_config/PrintMotd no',
+        'set /files/etc/oar/sshd_config/PermitUserEnvironment yes',
+        'set /files/etc/oar/sshd_config/MaxStartups 500',
+        'set /files/etc/oar/sshd_config/AcceptEnv/1 LANG',
+        'set /files/etc/oar/sshd_config/AcceptEnv/2 LC_*',
+        'set /files/etc/oar/sshd_config/AcceptEnv/3 OAR_CPUSET',
+        'set /files/etc/oar/sshd_config/AcceptEnv/4 OAR_USER_CPUSET',
+        'set /files/etc/oar/sshd_config/AcceptEnv/5 OAR_USER_GPUDEVICE',
+        'set /files/etc/oar/sshd_config/AcceptEnv/6 OAR_JOB_USER',
+        'set /files/etc/oar/sshd_config/Subsystem/sftp 
/usr/lib/openssh/sftp-server',
+        'set /files/etc/oar/sshd_config/AllowUsers/1 oar'
+      ],
+      require  => File['/etc/oar/sshd_config'];
+  }
+
+  file_line { 'oar_conf':
+    ensure     => present,
+    match      => "^(#)?COMPUTE_THREAD_SIBLINGS=*",
+    path       => '/etc/oar/oar.conf',
+    line       => 'COMPUTE_THREAD_SIBLINGS="yes"',
+    replace    => true,
+    require    => Package[$oar_packages];
+  }
+
+  if $env::target_g5k {
+    $key_values   = lookup("env::std::oar::ssh")
+
+    file {
+      "/var/lib/oar/.ssh/oarnodesetting_ssh.key":
+        ensure   => file,
+        owner    => oar,
+        group    => oar,
+        mode     => '0600',
+        content  => $key_values['oarnodesetting_ssh_key'];
+      "/var/lib/oar/.ssh/oarnodesetting_ssh.key.pub":
+        ensure   => file,
+        owner    => oar,
+        group    => oar,
+        mode     => '0644',
+        content  => $key_values['oarnodesetting_ssh_key_pub'];
+      "/var/lib/oar/.ssh/id_rsa":
+        ensure   => file,
+        owner    => oar,
+        group    => oar,
+        mode     => '0600',
+        content  => $key_values['id_rsa'];
+      "/var/lib/oar/.ssh/id_rsa.pub":
+        ensure   => file,
+        owner    => oar,
+        group    => oar,
+        mode     => '0644',
+        content  => $key_values['id_rsa_pub'];
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_rsyslog_remote.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_rsyslog_remote.pp
new file mode 100644
index 0000000..57e084d
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/configure_rsyslog_remote.pp
@@ -0,0 +1,39 @@
+class env::std::configure_rsyslog_remote {
+
+  require env::commonpackages::rsyslog
+
+  file {
+    "/etc/rsyslog.conf":
+      mode    => '0600',
+      owner   => root,
+      group   => root,
+      source  => "puppet:///modules/env/std/net_access/rsyslog.conf";
+    "/etc/rsyslog.d/syslog_iptables.conf":
+      mode    => '0655',
+      owner   => root,
+      group   => root,
+      source  => "puppet:///modules/env/std/net_access/syslog_iptables.conf";
+  }
+
+  # Stretch has an old iptables version that does not support 
hashlimit-rate-match
+  if "${::lsbdistcodename}" == "stretch" {
+    # iptables installed by kameleon.
+    file {
+      "/etc/network/if-pre-up.d/iptables":
+        mode    => '0755',
+        owner   => root,
+        group   => root,
+        source  => "puppet:///modules/env/std/net_access/iptables.stretch"
+    }
+  } else {
+    # iptables installed by kameleon.
+    file {
+      "/etc/network/if-pre-up.d/iptables":
+        mode    => '0755',
+        owner   => root,
+        group   => root,
+        source  => "puppet:///modules/env/std/net_access/iptables"
+    }
+  }
+}
+
diff --git a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/dell.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/dell.pp
new file mode 100644
index 0000000..e131e3b
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/dell.pp
@@ -0,0 +1,120 @@
+# Recipe from grid5000-puppet
+# Don't forget to update both repositories when modifying something
+#
+
+class { 'apt':
+  update => {
+    timeout => 1000
+  }
+}
+
+class env::std::dell (
+  Array $packages_names = $env::std::dell::params::packages_names,
+) inherits env::std::dell::params {
+
+  include apt
+
+  $_key = '42550ABD1E80D7C1BC0BAD851285491434D8786F'
+
+  case $::lsbdistcodename {
+    'stretch', 'buster': {
+      # No official Debian support since buster
+      $_location = 
"https://linux.dell.com/repo/community/openmanage/910/stretch";
+      $_release = "stretch"
+      $_repos = "main"
+      $_packages_names = $packages_names
+      $service_status = 'service dataeng status'
+    }
+    'bullseye': {
+      # Ubuntu 20.04 packages
+      $_location = "https://linux.dell.com/repo/community/openmanage/950/focal";
+      $_release = "focal"
+      $_repos = "main"
+      $_packages_names = $packages_names - 'libssl1.0.0'
+      $service_status = 'systemctl status dsm_sa_datamgrd.service 
dsm_sa_eventmgrd.service'
+    }
+  }
+
+  apt::source {
+    'dell':
+      comment  => 'Dell repository for OpenManage Server Administrator tools',
+      location => $_location,
+      release  => $_release,
+      repos    => $_repos,
+      key      => {
+        'id'      => $_key,
+        'content' => template('env/std/dell/linux.dell.com.key.erb'),
+      },
+      include  => {
+        'deb' => true,
+        'src' => false
+      },
+      notify  => Exec['apt_update'];
+  }
+
+  package {
+    $_packages_names:
+      ensure  => present,
+      require => [
+        Apt::Source['dell'],
+        Exec['apt_update'],
+      ];
+  }
+
+  case $::lsbdistcodename  {
+    # OMSA <= 9.1.0
+    'stretch', 'buster': {
+      service {
+        'dataeng':
+          enable  => true,
+          require => Package[$_packages_names];
+      }
+    }
+    # OMSA >= 9.3.0
+    'bullseye': {
+      service {
+        'dsm_sa_datamgrd':
+          enable  => true,
+          require => Package[$_packages_names];
+      }
+      service {
+        'dsm_sa_eventmgrd.service':
+          enable  => true,
+          require => Package[$_packages_names];
+      }
+    }
+  }
+
+  if ($::lsbdistcodename == 'buster') or ($::lsbdistcodename == 'bullseye') {
+    # Using enable => false doesn't seem to work, maybe because openipmi use 
systemd-sysv-generator
+    exec {
+      'disable openipmi service':
+        command => "/lib/systemd/systemd-sysv-install disable openipmi",
+        require => Package[$packages, 'ipmitool'];
+    }
+  }
+
+  if ($::lsbdistcodename == 'bullseye') {
+    # Fix bug 12930
+    exec {
+      'disable NVMe devices support':
+        command => "/bin/sed -i 's/^vil7=dsm_sm_psrvil/; vil7=dsm_sm_psrvil/' 
/opt/dell/srvadmin/etc/srvadmin-storage/stsvc.ini",
+        require => Package[$_packages_names];
+    }
+  }
+
+  if ($::lsbdistcodename == 'buster') {
+    # Fix bug 8048 and 8975
+    file {
+      '/etc/systemd/system/dataeng.service.d':
+        ensure  => 'directory',
+        require => Package[$packages];
+      '/etc/systemd/system/dataeng.service.d/stop.conf':
+        ensure  => 'file',
+        content => 
"[Service]\nExecStop=\nKillMode=control-group\nKillSignal=9",
+        require => Package[$packages];
+    }
+    File['/etc/systemd/system/dataeng.service.d']
+    ->File['/etc/systemd/system/dataeng.service.d/stop.conf']
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/dell/params.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/dell/params.pp
new file mode 100644
index 0000000..e6d3665
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/dell/params.pp
@@ -0,0 +1,16 @@
+# Recipe from grid5000-puppet, keep synchronized!
+#
+
+class env::std::dell::params {
+
+  $packages_names = [
+    'srvadmin-base',
+    "srvadmin-idracadm7",
+    "srvadmin-idracadm8",
+    'srvadmin-storageservices',
+    'srvadmin-omcommon',
+    'libncurses5',
+    'libxslt1.1',
+    'libssl1.0.0',
+  ]
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/disable_lvm_pvscan.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/disable_lvm_pvscan.pp
new file mode 100644
index 0000000..908d323
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/disable_lvm_pvscan.pp
@@ -0,0 +1,19 @@
+class env::std::disable_lvm_pvscan {
+
+  # Disable LVM2 pvscan on std environment to avoid issues with disk 
reservation (see bug 9453)
+  package {'lvm2':
+    ensure => installed;
+  }
+
+  file {
+    "/etc/lvm/lvm.conf":
+      mode    => '0644',
+      owner   => root,
+      group   => root,
+      source  => "puppet:///modules/env/std/lvm/lvm.conf",
+      require => Package['lvm2'],
+      notify  => Exec['generate_initramfs'];
+
+  }
+}
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/g5k_generator.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/g5k_generator.pp
new file mode 100644
index 0000000..c03fc8b
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/g5k_generator.pp
@@ -0,0 +1,26 @@
+class env::std::g5k_generator {
+
+  case $operatingsystem {
+    'Debian','Ubuntu': {
+
+      #smbios-utils package for binary tool smbios-sys-info-lite
+      package {
+        'smbios-utils':
+          ensure   => 'installed'
+      }
+
+      file {
+        '/lib/systemd/system-generators/g5k-generator':
+          ensure   => present,
+          owner    => root,
+          group    => root,
+          mode     => '0755',
+          source   => 'puppet:///modules/env/std/g5k_generator/g5k_generator',
+          require  => Package['smbios-utils']
+      }
+    }
+    default: {
+      err "${operatingsystem} not supported."
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_g5kchecks.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_g5kchecks.pp
new file mode 100644
index 0000000..871045f
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_g5kchecks.pp
@@ -0,0 +1,35 @@
+class env::std::install_g5kchecks {
+
+  include 'env::std::ipmitool' # ipmitool is required by g5k-checks
+  if $env::deb_arch == 'amd64' {
+    include 'env::std::dell'     # dell tools are required by g5k-checks
+  }
+
+  case $operatingsystem {
+
+    'Debian','Ubuntu': {
+
+      require env::commonpackages::rake
+      require env::commonpackages::rubyrspec
+
+      env::common::g5kpackages {
+        'g5k-checks':
+          ensure  => $::env::common::software_versions::g5k_checks,
+          release => "${::lsbdistcodename}";
+      }
+
+      file {
+        '/etc/g5k-checks.conf':
+          ensure   => present,
+          owner    => root,
+          group    => root,
+          mode     => '0644',
+          source   => "puppet:///modules/env/std/g5kchecks/g5k-checks.conf",
+          require  => Package["g5k-checks"];
+      }
+    }
+    default: {
+      err "${operatingsystem} not supported."
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_g5ksubnets.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_g5ksubnets.pp
new file mode 100644
index 0000000..0d894ba
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_g5ksubnets.pp
@@ -0,0 +1,9 @@
+class env::std::install_g5ksubnets {
+
+  include env::common::software_versions
+
+  env::common::g5kpackages {
+    'g5k-subnets':
+      ensure => $::env::common::software_versions::g5k_subnets;
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_hwraid_apt_source.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_hwraid_apt_source.pp
new file mode 100644
index 0000000..1194f73
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_hwraid_apt_source.pp
@@ -0,0 +1,38 @@
+class env::std::install_hwraid_apt_source {
+
+  include apt
+
+  # FIXME remove when HWRAID bullseye repository
+  if "${::lsbdistcodename}" == "bullseye" {
+    apt::source { 'hwraid.le-vert.net':
+      key      => {
+        'id'      => '0073C11919A641464163F7116005210E23B3D3B4',
+        'content' => template('env/std/hwraid/hwraid.le-vert.net.key.erb'),
+      },
+      comment  => 'Repo for megacli package',
+      location => 'http://hwraid.le-vert.net/debian',
+      release  => "buster",
+      repos    => 'main',
+        include  => {
+          'deb' => true,
+          'src' => false
+        }
+  }
+
+  } else {
+    apt::source { 'hwraid.le-vert.net':
+      key      => {
+        'id'      => '0073C11919A641464163F7116005210E23B3D3B4',
+        'content' => template('env/std/hwraid/hwraid.le-vert.net.key.erb'),
+      },
+      comment  => 'Repo for megacli package',
+      location => 'http://hwraid.le-vert.net/debian',
+      release  => "${::lsbdistcodename}",
+      repos    => 'main',
+        include  => {
+          'deb' => true,
+          'src' => false
+      }
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_libguestfs_backport.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_libguestfs_backport.pp
new file mode 100644
index 0000000..719d060
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_libguestfs_backport.pp
@@ -0,0 +1,27 @@
+class env::std::install_libguestfs_backport {
+
+  case $lsbdistcodename {
+    'buster': {
+      if $env::deb_arch == 'arm64' {
+        env::common::g5kpackages {
+          'libguestfs-backport':
+            packages => 'libguestfs-tools',
+            ensure  => 
$::env::common::software_versions::libguestfs_backport_arm64;
+        }
+      }
+      elsif $env::deb_arch == 'ppc64el' {
+        env::common::g5kpackages {
+          'libguestfs-backport':
+            packages => 'libguestfs-tools',
+            ensure  => 
$::env::common::software_versions::libguestfs_backport_ppc64el;
+        }
+      }
+      else {
+        err "${env::deb_arch} not supported"
+      }
+    }
+    default: {
+      err "${lsbdistcodename} not supported."
+    }
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_megacli.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_megacli.pp
new file mode 100644
index 0000000..1bd4d87
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_megacli.pp
@@ -0,0 +1,11 @@
+class env::std::install_megacli {
+
+  require env::std::install_hwraid_apt_source
+
+  package {
+    'megacli':
+      ensure => installed,
+      require  => [Apt::Source['hwraid.le-vert.net'], Exec['apt_update']]
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_sudog5k.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_sudog5k.pp
new file mode 100644
index 0000000..fb0e4a6
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/install_sudog5k.pp
@@ -0,0 +1,29 @@
+class env::std::install_sudog5k {
+
+  case $operatingsystem {
+    'Debian': {
+      require env::commonpackages::rake
+      require env::commonpackages::rubyrspec
+      require env::commonpackages::rsyslog
+
+      env::common::g5kpackages {
+        'sudo-g5k':
+          ensure => $::env::common::software_versions::sudo_g5k;
+      }
+
+    }
+    default: {
+      err "${operatingsystem} not suported."
+    }
+  }
+
+  file {
+    '/etc/sudo-g5k/id_rsa_sudo-g5k':
+      ensure  => file,
+      owner   => root,
+      group   => root,
+      mode    => '0600',
+      source  => 'puppet:///modules/env/std/sudo-g5k/id_rsa_sudo-g5k',
+      require => Package['sudo-g5k'];
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/ipmitool.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/ipmitool.pp
new file mode 100644
index 0000000..fed7222
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/ipmitool.pp
@@ -0,0 +1,18 @@
+class env::std::ipmitool {
+
+  package { 'ipmitool':
+    ensure   => 'installed'
+  }
+
+  augeas {
+    'module_ipmi_si':
+      context => "/files/etc/modules",
+      changes => ["ins ipmi_si after #comment[last()]",],
+      onlyif  => "match ipmi_si size == 0 ";
+    'module_ipmi_devintf':
+      context => "/files/etc/modules",
+      changes => ["ins ipmi_devintf after #comment[last()]",],
+      onlyif  => "match ipmi_devintf size == 0 ";
+  }
+
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/std/nvidia_reset_mig.pp
 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/nvidia_reset_mig.pp
new file mode 100644
index 0000000..c9285ae
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/std/nvidia_reset_mig.pp
@@ -0,0 +1,21 @@
+class env::std::nvidia_reset_mig () {
+
+  file{
+    '/usr/local/bin/nvidia-reset-mig':
+      ensure    => present,
+      owner     => root,
+      group     => root,
+      mode      => '0755',
+      source    => 
'puppet:///modules/env/std/nvidia_configure/nvidia-reset-mig';
+    '/etc/systemd/system/nvidia-reset-mig.service':
+      ensure    => present,
+      owner     => root,
+      group     => root,
+      mode      => '0644',
+      source    => 
'puppet:///modules/env/std/nvidia_configure/nvidia-reset-mig.service';
+    '/etc/systemd/system/multi-user.target.wants/nvidia-reset-mig.service':
+      ensure => link,
+      target => '/etc/systemd/system/nvidia-reset-mig.service';
+
+  }
+}
diff --git a/grid5000/steps/data/setup/puppet/modules/env/manifests/xen.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/xen.pp
new file mode 100644
index 0000000..20ca3ca
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/xen.pp
@@ -0,0 +1,13 @@
+# This file contains the 'xen' class used to configure xen environment to be 
executed in grid'5000.
+
+class env::xen ( $parent_parameters = {} ) {
+
+  $xen_parameters = {}
+  $parameters = merge( $xen_parameters, $parent_parameters )
+
+  # Include base
+  class{ 'env::base': }
+
+  # xen packages
+  class{ 'env::xen::configure_xen': }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/xen/configure_xen.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/xen/configure_xen.pp
new file mode 100644
index 0000000..1ac7cb8
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/xen/configure_xen.pp
@@ -0,0 +1,236 @@
+class env::xen::configure_xen () {
+
+  if "$operatingsystem" == "Debian" {
+    case "${::lsbdistcodename}" {
+      'stretch' : {
+        $hypervisor = "/boot/xen-4.8-${env::deb_arch}.gz"
+        $xen_packages = [ 'xen-utils', 'debootstrap', 'xen-tools', 
'sysfsutils', "xen-linux-system-${env::deb_arch}" ]
+        file {
+          '/etc/xen/xend-config.sxp.puppet-bak':
+            ensure   => file,
+            owner    => root,
+            group    => root,
+            mode     => '0644',
+            source   => '/etc/xen/xend-config.sxp',
+            require  => Package['xen-utils'];
+        }
+
+        file_line {
+          '/etc/xen/xend-config.sxp: enable network bridge':
+            path     => '/etc/xen/xend-config.sxp',
+            line     => '(network-script network-bridge)',
+            match    => '^#\ \(network-script\ network-bridge\)',
+            require  => [ Package['xen-utils'], 
File['/etc/xen/xend-config.sxp.puppet-bak'] ],
+            before   => Exec['create_example_domU'];
+        }
+      }
+
+      'buster' : {
+        $hypervisor = "/boot/xen-4.11-${env::deb_arch}.gz"
+        $xen_packages = [ 'xen-utils', 'debootstrap', 'xen-tools', 
'sysfsutils', "xen-system-${env::deb_arch}" ]
+      }
+
+      'bullseye' : {
+        $hypervisor = "/boot/xen-4.14-${env::deb_arch}.gz"
+        $xen_packages = [ 'xen-utils', 'debootstrap', 'xen-tools', 
'sysfsutils', "xen-system-${env::deb_arch}" ]
+      }
+    }
+
+    file_line {
+      '/etc/xen-tools/xen-tools.conf: change dir':
+        path     => '/etc/xen-tools/xen-tools.conf',
+        line     => 'dir = /opt/xen',
+        match    => '^ *dir *=',
+        require  => File['/etc/xen-tools/xen-tools.conf.puppet-bak'],
+        before   => Exec['create_example_domU'];
+      '/etc/xen-tools/xen-tools.conf: change size':
+        path     => '/etc/xen-tools/xen-tools.conf',
+        line     => 'size = 600M',
+        match    => '^ *size *=',
+        require  => File['/etc/xen-tools/xen-tools.conf.puppet-bak'],
+        before   => Exec['create_example_domU'];
+      '/etc/xen-tools/xen-tools.conf: change memory':
+        path     => '/etc/xen-tools/xen-tools.conf',
+        line     => 'memory = 128M',
+        match    => '^ *memory *=',
+        require  => File['/etc/xen-tools/xen-tools.conf.puppet-bak'],
+        before   => Exec['create_example_domU'];
+      '/etc/xen-tools/xen-tools.conf: change swap':
+        path     => '/etc/xen-tools/xen-tools.conf',
+        line     => 'swap = 128M',
+        match    => '^ *swap *=',
+        require  => File['/etc/xen-tools/xen-tools.conf.puppet-bak'],
+        before   => Exec['create_example_domU'];
+      '/etc/xen-tools/xen-tools.conf: change distribution':
+        path     => '/etc/xen-tools/xen-tools.conf',
+        line     => "dist = ${::lsbdistcodename}",
+        match    => '^ *dist *=',
+        require  => File['/etc/xen-tools/xen-tools.conf.puppet-bak'],
+        before   => Exec['create_example_domU'];
+      '/etc/xen-tools/xen-tools.conf: change arch':
+        path     => '/etc/xen-tools/xen-tools.conf',
+        line     => "arch = ${env::deb_arch}",
+        match    => '^ *arch *=',
+        require  => File['/etc/xen-tools/xen-tools.conf.puppet-bak'],
+        before   => Exec['create_example_domU'];
+      '/etc/xen-tools/xen-tools.conf: change mirror':
+        path     => '/etc/xen-tools/xen-tools.conf',
+        line     => 'mirror = http://deb.debian.org/debian/',
+        match    => '^ *mirror *=',
+        require  => File['/etc/xen-tools/xen-tools.conf.puppet-bak'],
+        before   => Exec['create_example_domU'];
+      '/etc/xen-tools/xen-tools.conf: change vmlinuz in xen-tools.conf':
+        path     => '/etc/xen-tools/xen-tools.conf',
+        line     => 'kernel = /vmlinuz',
+        match    => '^kernel = /boot/vmlinuz',
+        require  => File['/etc/xen-tools/xen-tools.conf.puppet-bak'],
+        before   => Exec['create_example_domU'];
+      '/etc/xen-tools/xen-tools.conf: chnage initrd.img path in 
xen-tools.conf':
+        path     => '/etc/xen-tools/xen-tools.conf',
+        line     => 'initrd = /initrd.img',
+        match    => '^initrd = /boot/initrd.img',
+        require  => File['/etc/xen-tools/xen-tools.conf.puppet-bak'],
+        before   => Exec['create_example_domU'];
+    }
+  }
+
+  package {
+    $xen_packages :
+      ensure   => installed;
+      #notify   => Exec['update-grub'];
+  }
+  file {
+    '/hypervisor':  # Given in dsc file to kadeploy to configure 
/boot/grub/grub.cfg correctly.
+      ensure   => link,
+      target   => "$hypervisor";
+    '/root/.ssh/id_rsa':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0600',
+      source   => 'puppet:///modules/env/xen/xen/id_rsa';
+    '/root/.ssh/id_rsa.pub':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0600',
+      source   => 'puppet:///modules/env/xen/xen/id_rsa.pub';
+    '/etc/xen-tools/skel/root':
+      ensure   => directory,
+      owner    => root,
+      group    => root,
+      mode     => '0700',
+      require  => Package['xen-tools'];
+    '/etc/xen-tools/skel/root/.ssh':
+      ensure   => directory,
+      owner    => root,
+      group    => root,
+      mode     => '0700',
+      require  => File['/etc/xen-tools/skel/root'];
+    '/etc/xen-tools/skel/root/.ssh/authorized_keys': # Line content defined 
below
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0600',
+      require  => File['/etc/xen-tools/skel/root/.ssh'];
+    '/etc/xen-tools/xen-tools.conf.puppet-bak':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => '/etc/xen-tools/xen-tools.conf',
+      require  => Package['xen-tools'];
+    '/usr/local/bin/random_mac':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0755',
+      source   => 'puppet:///modules/env/xen/xen/random_mac';
+    '/usr/sbin/xen-g5k':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0755',
+      source   => 'puppet:///modules/env/xen/xen/xen-g5k';
+    '/etc/systemd/system/xen-g5k.service':
+      ensure   => file,
+      owner    => root,
+      group    => root,
+      mode     => '0644',
+      source   => 'puppet:///modules/env/xen/xen/xen-g5k.service',
+      notify   => Exec['daemon-reload'];
+    '/etc/systemd/system/multi-user.target.wants/xen-g5k.service':
+      ensure   => link,
+      target   => '/etc/systemd/system/xen-g5k.service',
+      require  => File['/etc/systemd/system/xen-g5k.service'],
+      notify   => Exec['daemon-reload'];
+  }
+
+  exec {
+    'daemon-reload':
+      command     => '/bin/systemctl daemon-reload',
+      refreshonly => true;
+  }
+
+  if $env::target_g5k {
+    file {
+      '/etc/xen-tools/skel/etc':
+        ensure   => directory,
+        owner    => root,
+        group    => root,
+        mode     => '0644',
+        require  => Package['xen-tools'];
+      '/etc/xen-tools/skel/etc/apt':
+        ensure   => directory,
+        owner    => root,
+        group    => root,
+        mode     => '0644',
+        require  => File['/etc/xen-tools/skel/etc'];
+      '/etc/xen-tools/skel/etc/apt/apt.conf.d':
+        ensure   => directory,
+        owner    => root,
+        group    => root,
+        mode     => '0644',
+        require  => File['/etc/xen-tools/skel/etc/apt'];
+      '/etc/xen-tools/skel/etc/dhcp':
+        ensure   => directory,
+        owner    => root,
+        group    => root,
+        mode     => '0644',
+        require  => File['/etc/xen-tools/skel/etc'];
+      '/etc/xen-tools/skel/etc/dhcp/dhclient-exit-hooks.d':
+        ensure   => directory,
+        owner    => root,
+        group    => root,
+        mode     => '0644',
+        require  => File['/etc/xen-tools/skel/etc/dhcp'];
+      
'/etc/xen-tools/skel/etc/dhcp/dhclient-exit-hooks.d/g5k-update-host-name':
+        ensure   => file,
+        owner    => root,
+        group    => root,
+        mode     => '0644',
+        source   => 'puppet:///modules/env/min/network/g5k-update-host-name',
+        require  => File['/etc/xen-tools/skel/etc/dhcp/dhclient-exit-hooks.d'];
+    }
+  }
+
+  file_line {
+    '/etc/xen-tools/skel/root/.ssh/authorized_keys dom0_key':
+      line     => file('env/xen/xen/id_rsa.pub'),
+      path     => '/etc/xen-tools/skel/root/.ssh/authorized_keys',
+      require  => File['/etc/xen-tools/skel/root/.ssh/authorized_keys'];
+  }
+
+
+  exec {
+    'create_example_domU':
+      command  => '/usr/bin/xen-create-image --hostname=domU --role=udev 
--genpass=0 --password=grid5000 --dhcp --mac=$(random_mac) --bridge=br0 
--size=1G --memory=512M',
+      creates  => '/etc/xen/domU.cfg',
+      timeout  => 1200,
+      require => [
+        Package['xen-tools', 'xen-utils'],
+        File_line['/etc/xen-tools/skel/root/.ssh/authorized_keys dom0_key'],
+        File['/etc/xen-tools/xen-tools.conf.puppet-bak', 
'/usr/local/bin/random_mac']
+      ];
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/manifests/xen/install_grub.pp 
b/grid5000/steps/data/setup/puppet/modules/env/manifests/xen/install_grub.pp
new file mode 100644
index 0000000..4abc575
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/manifests/xen/install_grub.pp
@@ -0,0 +1,14 @@
+class env::xen::install_grub () {
+
+  package {
+    'grub2':
+      ensure      => installed;
+  }
+
+  exec {
+    'update-grub':
+      command     => "/usr/sbin/update-grub2",
+      refreshonly => true,
+      require     => Package['grub2'];
+  }
+}
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/templates/base/omnipath/scibian.key.erb
 
b/grid5000/steps/data/setup/puppet/modules/env/templates/base/omnipath/scibian.key.erb
new file mode 100644
index 0000000..e98da61
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/templates/base/omnipath/scibian.key.erb
@@ -0,0 +1,51 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFfs3zwBEAC7PdMDC7crMfyt4MxEcbvR28dNnRNpngQNu1mtDWUXUmslZliE
+2GaLnHNeIwGQZVN1HBVcbHGbzGzxmHk3l9EPpruBbNItvQKXTgDjxFyBlyeR4vnL
+RijAWgoCJ1oFE3NOiGcWtrzUame/VFqqHkj/LD1tqtXTroQ5IxX8U4LJamrn5mdO
+IaEQKzkcxbnK6T96NUTbucFih2I+JXRy0ELhHj2+3iSfad5YgC142aiU7aiMKgpO
+taX7YwJ5a5Wv307VtK2JlUknn8SBC8Xo5qSxcppXhJg4jCJirslct9xYvp1DTG+2
+2n6TeVit7XwsTu6e4/C9B9WPfihEZl8Q8svrv8yBWj8PKRRgITh88JW5x9eLWCRS
+ebyPhj8VE+7a8youyy0i1hZQUaTI8LqR0FiXeNFWNM2cLMyEEcVOQ46eSl7lL4iq
+IWtvyCIBvqUXAnk2sp2yqBhQhrtYnM8BF4qwbqpnximmREOk/g8lVmz5Sf5BFl58
+gxs6HsxEepzFen6Y761bF3Cg9haPBolg/2WNkVGmLKZoC3LFankrfVhQF3nlhbLH
+ul8mF/3bBnRU3TdbCir4ErqXvDbSyfzTPONzd8TAm+Nq/qgo8tgYI8wegIwzMqDm
+O4PhFtLcHZEkeLbybjgHGi9K0hB0X2HMw0HojXI43nIRcTlFqEZ71enYHwARAQAB
+tDNTY2liaWFuIEF1dG9tYXRpYyBBcmNoaXZlIEtleSA8YXJjaGl2ZUBzY2liaWFu
+Lm9yZz6JAjgEEwECACIFAlfs3zwCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheA
+AAoJEMp1sqgOHPjpfUYP/22MviI1HtC1QxzkQ9VTOh2D8KFx+V9aFODUV4jFo+Vm
+rEoClyB0WkjhVf78vfh6H1DyBFwakM9RYheA0KLxhnRqr0rIUJSVdTWXbxT3Oust
+hSYvuwP6ED9XEwBVYHhk/E0udybMpeGLIgN8wrsJhYjRu0OQQUIMVMUuBQfRm/Xa
+H+89v9oWApchmn2ViMOH2dBtYub1VUYbIzthhModlIqO2yZh/NA/7ihqiUyTbp0I
+EzxDmQcDXbDtazdgz9CGBqHMtkHvSruETDZ/29n+Y9Iq4U7OyfPaZ/Cjwqbev7UW
+oy5VcZU1KF8GeqTSxCDLWruiWB6ISUnYG/sR8YVVbfFabA9VMkKpyPupeXBDdvUV
+0ye2SGfqIvHB7XogMYjVpac1ssPaBny7ZGIU2MpodXEKl8RZV8VkAP2etF0JtfuW
++jPJcdWMCHxPoh4yVVeF/7FUbfm9NmjMWxnTepFHf0OAEb/zOfn/PXsXUFPNC9cA
+yb0GrmO8OEqK7itCdmDFoWqQBTgiU7njOMMy8GtX8K5mY57ow04Iu5e5IvnXMCRJ
+VR+O6Sxu4z41PaLo2OZ0PMKQ+8GMCIKQp/3FTsXGNYlq6gRQUrxmZgFeQikKBQmE
+673L+98k1LBBu1KDLgushgRjB57+2jP6EpXExm9Xzb2VvVignNxzLxwxWZqkOYAy
+uQINBFfs3zwBEADNfrDRsoDT8ltl6MFF8SpQZRzwhuXaD/MWOX7yK1MpSDsu3jVa
+IgD0/DoZd5E1gMQZlhiQaG5mhksg2yh9Bm67sXFJYHN80q0mgL20Tudsf4Votfrn
+QDnj3osq6cydwVUtbQK9J36xrkHlZEriJ/hwWJVL6HMTs35j/+U3r8SgLJ0ESr/X
+uxRDxe/hrGCBfLu2JXO2ach+WGAqDPV6nBVY7XsDlly78Ds7b9LU9sq+23LpOE72
+oHr3iUmLmcat5aDVGv+z8PORHlkGXq7UeUd3mXubzv2QZ3/pyxwDhUbXBIwVxhxt
+1cIDqooCyqEwR011CNGWUDsGv81teS9P7EfdhZBuMVzgJ5HY5pqsL0YC6KjFg89O
+zdEo80gIjg0LsKwGigU6n7aJx0cjUXfLzg07kySlRCrGi5dTswnMd8rwtBiDLlyr
+9qqmZB8mYWbg1TBDHnac8kguuMDeKkrVsviMDAgokDbNb0UzUF7qjumHf35ABZxn
+bRGeWkm0YyMmBI8VJdzxaZdBd6P0rq21l2YYf80NpT3K4UeCukITFSyAcGnWN9WX
+AraQ4etXDxsb/WqR6EiDz5lWNmC8rAnRPgXCdjjBomrQWp4pHrkCZCOR3hc8aasz
+Jnh/5iJ2uknCbDJx3xwXEByYeMhZ7qk2Dw1PGDYvoq1PA39dtMX55y+YnQARAQAB
+iQIfBBgBAgAJBQJX7N88AhsMAAoJEMp1sqgOHPjpfIkP/j1rUx3cWH/5x3cYziqd
+X3bO/g5AUfr2yswjFeMIKk3uQVJ3XCXjRsqWRvrPbKiPO5uE1Rwau8ZA9FfO8Ayd
+xT/X93VEcPpiXzD6YFOMrWOYnoQR4+k22tbukJLEVnKKG4WfJKMrilRrNdXqLkPV
+jGTyZ2KsB8lr8wIGc2iZ+bCaHcJnxxeqLZGD1JC2hyRc7+r4VXcvD0adeWVVrkgp
+S1yCLG4FJJbUpd5saOsl73IK+TMf51IxK949rHZBPDUvAwS9yTwNZRK/sbdpk9i8
+5C7yc+4YpZQc6Fe4MYQ+3w4SRmKI003yE+UNlmDWLYyr+EwS9GZ9fEezkMCCyyYD
+52LWg4ey6UQLN0CH9wYEESFrPK5WmYj2VWGvH/pzmPtuhdPueiVQ5SohPbL60RK5
+7eexznLWPdx52OyWAc/iGgi3cV4M+4im/7x+jpYJ4HdQCeMKFeAzsg4zLBpLlCYm
+Av+/ctAasKSIV741GLexE+6zDKj9Y1XcYDRIhN489vIBqwe0CzD9k5+8s3YK7m/d
+o8boK16jhs/1RGx2NsBnuuvrLyiu1JmOdvn7CNNx1UX6pFDbwtCifmGycl2FwXJO
+iakf8FAsJ+eZHc3eshTif/a2Rl6Y6BXdfc01SHztOo3QBdohkiUK96g7SzXjKGxU
+91mtqulGCdHFNf5aXnuTInTN
+=f70J
+-----END PGP PUBLIC KEY BLOCK-----
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/templates/common/apt_pinning.erb 
b/grid5000/steps/data/setup/puppet/modules/env/templates/common/apt_pinning.erb
new file mode 100644
index 0000000..e824873
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/templates/common/apt_pinning.erb
@@ -0,0 +1,6 @@
+#Pinning for <%= @name %> Installed by puppet
+
+Package: <%= @packages %>
+Pin: version <%= @version %>
+Pin-Priority: <%= @priority %>
+
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/templates/min/motd.erb 
b/grid5000/steps/data/setup/puppet/modules/env/templates/min/motd.erb
new file mode 100644
index 0000000..0e3affd
--- /dev/null
+++ b/grid5000/steps/data/setup/puppet/modules/env/templates/min/motd.erb
@@ -0,0 +1,8 @@
+<%= @userdistribname.capitalize %>-<%= scope.lookupvar('env::g5k_arch') %>-<%= 
scope.lookupvar('env::variant') %>-<%= scope.lookupvar('env::version') %> 
(Image based on Debian <%= @lsbdistcodename.capitalize %> for <%= 
scope.lookupvar('env::deb_arch_long') %>)
+  Maintained by support-staff <support-staff@lists.grid5000.fr>
+
+<% case scope.lookupvar('env::variant')
+      when "xen" -%>
+Note: dom0_mem=4096MB is set on the kernel command line, following the 
recommendation on
+https://wiki.xenproject.org/wiki/Tuning_Xen_for_Performance#Memory
+<% end -%>
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/templates/nfs/ldap/common-account.erb
 
b/grid5000/steps/data/setup/puppet/modules/env/templates/nfs/ldap/common-account.erb
new file mode 100644
index 0000000..6a44180
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/templates/nfs/ldap/common-account.erb
@@ -0,0 +1,18 @@
+#
+# /etc/pam.d/common-account - authorization settings common to all services
+#
+# This file is included from other service-specific PAM config files,
+# and should contain a list of the authorization modules that define
+# the central access policy for use on the system.  The default is to
+# only deny service to users whose accounts are expired in /etc/shadow.
+#
+
+<% if scope.lookupvar('env::variant') == "std" %>
+account sufficient      pam_access.so accessfile=/etc/security/access.conf
+account required        pam_access.so accessfile=/var/lib/oar/access.conf
+<% else -%>
+account required        pam_access.so accessfile=/etc/security/access.conf
+<% end -%>
+
+account sufficient      pam_ldap.so
+account required        pam_unix.so
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/templates/nfs/ntp/ntp.conf.erb 
b/grid5000/steps/data/setup/puppet/modules/env/templates/nfs/ntp/ntp.conf.erb
new file mode 100644
index 0000000..a625bbc
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/templates/nfs/ntp/ntp.conf.erb
@@ -0,0 +1,63 @@
+# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
+
+<% if scope.lookupvar('env::nfs::configure_ntp::drift_file') == nil %>
+#Used in standard environement, not in reference
+driftfile /var/lib/ntp/ntp.drift
+<% end %>
+
+# Enable this if you want statistics to be logged.
+#statsdir /var/log/ntpstats/
+
+statistics loopstats peerstats clockstats
+filegen loopstats file loopstats type day enable
+filegen peerstats file peerstats type day enable
+filegen clockstats file clockstats type day enable
+
+
+# You do need to talk to an NTP server or two (or three).
+#server ntp.your-provider.example
+
+# pool.ntp.org maps to about 1000 low-stratum NTP servers.  Your server will
+# pick a different set every time it starts up.  Please consider joining the
+# pool: <http://www.pool.ntp.org/join.html>
+pool 0.debian.pool.ntp.org iburst
+pool 1.debian.pool.ntp.org iburst
+pool 2.debian.pool.ntp.org iburst
+pool 3.debian.pool.ntp.org iburst
+
+
+# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
+# details.  The web page 
<http://support.ntp.org/bin/view/Support/AccessRestrictions>
+# might also be helpful.
+#
+# Note that "restrict" applies to both servers and clients, so a configuration
+# that might be intended to block requests from certain clients could also end
+# up blocking replies from your own upstream servers.
+
+# By default, exchange time with everybody, but don't allow configuration.
+restrict -4 default kod notrap nomodify nopeer noquery limited
+restrict -6 default kod notrap nomodify nopeer noquery limited
+
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+
+
+
+# Needed for adding pool entries
+restrict source notrap nomodify noquery
+
+
+# Clients from this (example!) subnet have unlimited access, but only if
+# cryptographically authenticated.
+#restrict 192.168.123.0 mask 255.255.255.0 notrust
+
+
+# If you want to provide time to your local subnet, change the next line.
+# (Again, the address is an example only.)
+#broadcast 192.168.123.255
+
+# If you want to listen to time broadcasts on your local subnet, de-comment the
+# next lines.  Please do this only if you trust everybody on the network!
+#disable auth
+#broadcastclient
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/templates/std/dell/linux.dell.com.key.erb
 
b/grid5000/steps/data/setup/puppet/modules/env/templates/std/dell/linux.dell.com.key.erb
new file mode 100644
index 0000000..2adae76
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/templates/std/dell/linux.dell.com.key.erb
@@ -0,0 +1,68 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBE9RLYYBEADEAmJvn2y182B6ZUr+u9I29f2ue87p6HQreVvPbTjiXG4z2/k0
+l/Ov0DLImXFckaeVSSrqjFnEGUd3DiRr9pPb1FqxOseHRZv5IgjCTKZyj9Jvu6bx
+U9WL8u4+GIsFzrgS5G44g1g5eD4Li4sV46pNBTp8d7QEF4e2zg9xk2mcZKaT+STl
+O0Q2WKI7qN8PAoGd1SfyW4XDsyfaMrJKmIJTgUxe9sHGj+UmTf86ZIKYh4pRzUQC
+WBOxMd4sPgqVfwwykg/y2CQjrorZcnUNdWucZkeXR0+UCR6WbDtmGfvN5H3htTfm
+Nl84Rwzvk4NT/By4bHy0nnX+WojeKuygCZrxfpSqJWOKhQeH+YHKm1oVqg95jvCl
+vBYTtDNkpJDbt4eBAaVhuEPwjCBsfff/bxGCrzocoKlh0+hgWDrr2S9ePdrwv+rv
+2cgYfUcXEHltD5Ryz3u5LpiC5zDzNYGFfV092xbpG/B9YJz5GGj8VKMslRhYpUjA
+IpBDlYhOJ+0uVAAKPeeZGBuFx0A1y/9iutERinPx8B9jYjO9iETzhKSHCWEov/yp
+X6k17T8IHfVj4TSwL6xTIYFGtYXIzhInBXa/aUPIpMjwt5OpMVaJpcgHxLam6xPN
+FYulIjKAD07FJ3U83G2fn9W0lmr11hVsFIMvo9JpQq9aryr9CRoAvRv7OwARAQAB
+tGBEZWxsIEluYy4sIFBHUkUgMjAxMiAoUEcgUmVsZWFzZSBFbmdpbmVlcmluZyBC
+dWlsZCBHcm91cCAyMDEyKSA8UEdfUmVsZWFzZV9FbmdpbmVlcmluZ0BEZWxsLmNv
+bT6IRgQQEQoABgUCT1E0sQAKCRDKd5UdI7ZqnSh9AJ9jXsuabnqEfz5DQwWbmMDg
+aLGXiwCfXA9nDiBc1oyCXVabfbcMs8J0ktqIRgQTEQIABgUCT1FCzwAKCRAhq+73
+kvD8CSnUAJ4j3Q6r+DESBbvISTD4cX3WcpMepwCfX8oc1nHL4bFbVBS6BP9aHFcB
+qJ6IXgQQEQoABgUCT1E0yQAKCRB1a6cLEBnO1iQAAP98ZGIFya5HOUt6RAxL3TpM
+RSP4ihFVg8EUwZi9m9IVnwD/SXskcNW1PsZJO/bRaNVUZIUniDIxbYuj5++8KwBk
+sZiJAhwEEAEIAAYFAk9ROHAACgkQ2XsrqIahDMClCRAAhY59a8BEIQUR9oVeQG8X
+NZjaIAnybq7/IxeFMkYKr0ZsoxFy+BDHXl2bajqlILnd9IYaxsLDh+8lwOTBiHhW
+fNg4b96gDPg5h4XaHgZ+zPmLMuEL/hQoKdYKZDmM1b0YinoV5KisovpC5IZi1AtA
+Fs5EL++NysGeY3RffIpynFRsUomZmBx2Gz99xkiUXgbT9aXAJTKfsQrFLASM6LVi
+b/oA3Sx1MQXGFU3IA65ye/UXA4A53dSbE3m10RYBZoeS6BUQ9yFtmRybZtibW5RN
+OGZCD6/Q3Py65tyWeUUeRiKyksAKl1IGpb2awA3rAbrNd/xe3qAfR+NMlnidtU4n
+JO3GG6B7HTPQfGp8c69+YVaMML3JcyvACCJfVC0aLg+ru6UkCDSfWpuqgdMJrhm1
+2FM16r1X3aFwDA1qwnCQcsWJWManqD8ljHl3S2Vd0nyPcLZsGGuZfTCsK9pvhd3F
+ANC5yncwe5oi1ueiU3KrIWfvI08NzCsj8H2ZCAPKpz51zZfDgblMFXHTmDNZWj4Q
+rHG01LODe+mZnsCFrBWbiP13EwsJ9WAMZ6L+/iwJjjoi9e4IDmTOBJdGUoWKELYM
+fglpF5EPGUcsYaA9FfcSCgm9QR31Ixy+F95bhCTVT26xwTtNMYFdZ2rMRjA/TeTN
+fl5KHLi6YvAgtMaBT8nYKweJAjcEEwEKACEFAk9RLYYCGwMFCwkIBwMFFQoJCAsF
+FgIDAQACHgECF4AACgkQEoVJFDTYeG9eBw//asbM4KRxBfFi9RmzRNitOiFEN1Fq
+TbE5ujjN+9m9OEb+tB3ZFxv0bEPb2kUdpEwtMq6CgC5n8UcLbe5TF82Ho8r2mVYN
+Rh5RltdvAtDK2pQxCOh+i2b9im6GoIZa1HWNkKvKiW0dmiYYBvWlu78iQ8JpIixR
+IHXwEdd1nQIgWxjVix11VDr+hEXPRFRMIyRzMteiq2w/XNTUZAh275BaZTmLdMLo
+YPhHO99AkYgsca9DK9f0z7SYBmxgrKAs9uoNnroo4UxodjCFZHDu+UG2efP7SvJn
+q9v6XaC7ZxqBG8AObEswqGaLv9AN3t4oLjWhrAIoNWwIM1LWpYLmKjFYlLHaf30M
+YhJ8J7GHzgxANnkOP4g0RiXeYNLcNvsZGXZ61/KzuvE6YcsGXSMVKRVaxLWkgS55
+9OSjEcQV1TD65b+bttIeEEYmcS8jLKL+q2T1qTKnmD6VuNCtZwlsxjR5wHnxORju
+mtC5kbkt1lxjb0l2gNvT3ccA6FEWKS/uvtleQDeGFEA6mrKEGoD4prQwljPV0MZw
+yzWqclOlM7g21i/+SUj8ND2Iw0dCs4LvHkf4F1lNdV3QB41ZQGrbQqcCcJFm3qRs
+Yhi4dg8+24j3bNrSHjxosGtcmOLv15jXA1bxyXHkn0HPG6PZ27dogsJnAD1GXEH2
+S8yhJclYuL0JE0C5Ag0ET1Ev4QEQANlcF8dbXMa6vXSmznnESEotJ2ORmvr5R1zE
+gqQJOZ9DyML9RAc0dmt7IwgwUNX+EfY8LhXLKvHWrj2mBXm261A9SU8ijQOPHFAg
+/SYyP16JqfSx2jsvWGBIjEXF4Z3SW/JD0yBNAXlWLWRGn3dx4cHyxmeGjCAc/6t3
+22Tyi5XLtwKGxA/vEHeuGmTuKzNIEnWZbdnqALcrT/xK6PGjDo45VKx8mzLal/mn
+cXmvaNVEyld8MMwQfkYJHvZXwpWYXaWTgAiMMm+yEd0gaBZJRPBSCETYz9bENePW
+EMnrd9I65pRl4X27stDQ91yO2dIdfamVqti436ZvLc0L4EZ7HWtjN53vgXobxMzz
+4/6eH71BRJujG1yYEk2J1DUJKV1WUfV8Ow0TsJVNQRM/L9v8imSMdiR12BjzHism
+ReMvaeAWfUL7Q1tgwvkZEFtt3sl8o0eoB39R8xP4p1ZApJFRj6N3ryCTVQw536QF
+GEb+C51MdJbXFSDTRHFlBFVsrSE6PxB24RaQ+37w3lQZp/yCoGqA57S5VVIAjAll
+4Yl347WmNX9THogjhhzuLkXW+wNGIPX9SnZopVAfuc4hj0TljVa6rbYtiw6HZNmv
+vr1/vSQMuAyl+HkEmqaAhDgVknb3MQqUQmzeO/WtgSqYSLb7pPwDKYy7I1BojNiO
+t+qMj6P5ABEBAAGJAh4EGAEKAAkFAk9RL+ECGwwACgkQEoVJFDTYeG/6mA/4q6DT
+SLwgKDiVYIRpqacUwQLySufOoAxGSEde8vGRpcGEC+kWt1aqIiE4jdlxFH7Cq5Sn
+wojKpcBLIAvIYk6x9wofz5cx10s5XHq1Ja2jKJV2IPT5ZdJqWBc+M8K5LJelemYR
+Zoe50aT0jbN5YFRUkuU0cZZyqv98tZzTYO9hdG4sH4gSZg4OOmUtnP1xwSqLWdDf
+0RpnjDuxMwJM4m6G3UbaQ4w1K8hvUtZo9uC9+lLHq4eP9gcxnvi7Xg6mI3UXAXiL
+YXXWNY09kYXQ/jjrpLxvWIPwk6zb02jsuD08j4THp5kU4nfujj/GklerGJJp1ypI
+OEwV4+xckAeKGUBIHOpyQq1fn5bz8IituSF3xSxdT2qfMGsoXmvfo2l8T9QdmPyd
+b4ZGYhv24GFQZoyMAATLbfPmKvXJAqomSbp0RUjeRCom7dbD1FfLRbtpRD73zHar
+BhYYZNLDMls3IIQTFuRvNeJ7XfGwhkSE4rtY91J93eM77xNr4sXeYG+RQx4y5Hz9
+9Q/gLas2celP6Zp8Y4OECdveX3BA0ytI8L02wkoJ8ixZnpGskMl4A0UYI4w4jZ/z
+dqdpc9wPhkPj9j+eF2UInzWOavuCXNmQz1WkLP/qlR8DchJtUKlgZq9ThshK4gTE
+SNnmxzdpR6pYJGbEDdFyZFe5xHRWSlrC3WTbzg==
+=WBHf
+-----END PGP PUBLIC KEY BLOCK-----
diff --git 
a/grid5000/steps/data/setup/puppet/modules/env/templates/std/hwraid/hwraid.le-vert.net.key.erb
 
b/grid5000/steps/data/setup/puppet/modules/env/templates/std/hwraid/hwraid.le-vert.net.key.erb
new file mode 100644
index 0000000..c4ffa1f
--- /dev/null
+++ 
b/grid5000/steps/data/setup/puppet/modules/env/templates/std/hwraid/hwraid.le-vert.net.key.erb
@@ -0,0 +1,30 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBFHwGLoBCADGXHFostxbz4UzGFYtmox4pvyN1gMhq2KCuQ6f+FESa4HTd9L6
+XVhXWPCad3cdxBIls+41+AdZTWxWMu7DUdy8nMU1Ikfw6JeHcSx97G5BdxBVMjK4
+iMGfPdLfDgWf4BQ2h0dnTEWobt31WaqgNiNjNrKktqbymmF94pwYkwL53ydIA4zl
+8ZQRZooFigkS9WdoKjh30Pv/SWakILSLcSQFHK0dvSkeGd1NxT9dMNPAXXqLom4+
+7kCc0s04sS+0DwW16b0Hpb46mtsR9kzOnrE/Smj24uOGzNZen0oCc2Y7bfZlyaN+
+RlTkWEze7lemc4Byup/QWkhT0Er8F8uxexy5ABEBAAG0PEhXUmFpZCAoaHR0cDov
+L2h3cmFpZC5sZS12ZXJ0Lm5ldCkgPHJvb3RAaHdyYWlkLmxlLXZlcnQubmV0PokB
+OAQTAQIAIgUCUfAYugIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQYAUh
+DiOz07Rc4Af+N3dEZZHzLNVTjQ0+fCyeg8/flWOkR8DhP10cyoJhSHFTZRdXVshn
+kP4VmmUycVeURh76DmrIRe/9Oyca6aGXccRMqvq+HMgBPVwD5qNhcJPIuzqEvmlO
+6UIeW2ydil/v1pWu740fGntyFRQcsfqjReVPXw9K588F7MDMyL+31vLm6aorLSzR
+hvLhOmGisTs0wg2Oz9f4muauRy6cpQPw/Zi/P/F4WkQYscbHrSbhszj6OIg/vftR
+UbZ7QB26/+40B0ag4JzLpmj3scFxf/WdUl5LXazqhsbkurk7huV41BNKXi1+BS3c
+x6pFzWEHpiuG1j7U/nScGzEQpsMlUW9D+rkBDQRR8Bi6AQgAuhH1H0VLwcROI/5n
+9yTxSbTIZbyhUan3raAbit3pgo0zLagfUtp3vULVnm5ISqQcYFGLZoE1MUkmjGOL
+38W0lsIiZTaKOKXxBbLlPhhrvlXnNWAG/S1wnq7K+DV179KCTkUzaLRDbHvv999j
+9odBRtAkiTnCfHTMCN4AhydEejNxtlzJo4E5FecH4reimLI5euUdTltgCjixrbsa
+KbQftYpSMdXnLy2+00QZoXu0U/h4WZcMhOSEEiyGP9BY6m5G76n03HIeQ6eALDFu
+ryAgO+SB9rBrm/VN0kR/TZq0iA3uzLHC7zCw2aImipkr+rIuJOku0wH9MyowBbia
+bQtnCQARAQABiQEfBBgBAgAJBQJR8Bi6AhsMAAoJEGAFIQ4js9O0d5YH/3fNQgsC
+LvD0g2wdoksv5bG9CUOi9Bs0JHqI0LhXmPvMsbDojZ+zZle7KWNfK2227mWhmoG1
+WLujJSmTtxhEO1fXIdYjlDfk2uLJKuFi2wQX9n8dFDUmKY3CUJgeVZof1uQ/5C3D
+O06CcuOtf2d/+iijuW112aV1q1hoQqw71ojTET0iIV6lD/0i1eEBSSe1Ohb9yTGR
+VxTVrB78zU9hih4/Oq8wJT/Fv25aO1MDSc26CXAg0JA6IWvKal3BSPNhtz4L4FIg
+lXleArf9oJqxDO3TsV5zcLyxsIuRuxyP0+AKdSQUqv0dFi4Jf79OmvOmgwydhHjY
++f7quLbwiiDmPbU=
+=Yv6D
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/grid5000/steps/disable_checkpoint.yaml 
b/grid5000/steps/disable_checkpoint.yaml
new file mode 100644
index 0000000..cb571da
--- /dev/null
+++ b/grid5000/steps/disable_checkpoint.yaml
@@ -0,0 +1,3 @@
+- disable_checkpoint:
+  - on_checkpoint: redo
+  - exec_local: rm -f $${kameleon_cwd}/checkpoint_enabled
diff --git a/grid5000/steps/enable_checkpoint.yaml 
b/grid5000/steps/enable_checkpoint.yaml
new file mode 100644
index 0000000..8ac4751
--- /dev/null
+++ b/grid5000/steps/enable_checkpoint.yaml
@@ -0,0 +1,5 @@
+- enable_checkpoint:
+  - on_checkpoint: redo
+  - on_bootstrap_init:
+    - exec_local: rm -f $${kameleon_cwd}/checkpoint_enabled
+  - exec_local: touch $${kameleon_cwd}/checkpoint_enabled
diff --git a/grid5000/steps/env/bashrc b/grid5000/steps/env/bashrc
new file mode 100644
index 0000000..6306e37
--- /dev/null
+++ b/grid5000/steps/env/bashrc
@@ -0,0 +1,23 @@
+## aliases
+# If not running interactively, don't do anything
+export USER=${USER:-"root"}
+export HOME=${HOME:-"/root"}
+export PATH=/usr/bin:/usr/sbin:/bin:/sbin:$PATH
+export LC_ALL=${LC_ALL:-"POSIX"}
+
+export DEBIAN_FRONTEND=noninteractive
+
+if [ -t 1 ] ; then
+export TERM=xterm
+# for fast typing
+alias h='history'
+alias g='git status'
+alias l='ls -lah'
+alias ll='ls -lh'
+alias la='ls -Ah'
+
+# for human readable output
+alias ls='ls -h'
+alias df='df -h'
+alias du='du -h'
+fi
diff --git a/grid5000/steps/env/functions.sh b/grid5000/steps/env/functions.sh
new file mode 100644
index 0000000..8e9577b
--- /dev/null
+++ b/grid5000/steps/env/functions.sh
@@ -0,0 +1,203 @@
+## functions
+
+function fail {
+    echo $@ 1>&2
+    false
+}
+
+export -f fail
+
+function __download {
+    local src=$1
+    local dst=$2
+    if [ -n "$DOWNLOAD_SRC_URL" ]; then
+        src="$DOWNLOAD_SRC_URL"
+    fi
+    if [ -z "$src" ]; then
+        fail "No URL to download from"
+    fi
+    # If dst is unset or a directory, infers dst pathname from src
+    if [ -z "$dst" -o "${dst: -1}" == "/" ]; then
+        dst="$dst${src##*/}"
+        dst="${dst%%\?*}"
+    fi
+    local dstdir=${dst%/*}
+    if [ -n "$dstdir" -a "$dstdir" != "$dst" ]; then
+        mkdir -p $dstdir
+    fi
+    echo -n "Downloading: $src..."
+    # Put cURL first because it accept URIs (like file://...)
+    if which curl >/dev/null; then
+        echo " (cURL)"
+        curl -S --fail -# -L --retry 999 --retry-max-time 0 "$src" -o "$dst" 
2>&1
+    elif which wget >/dev/null; then
+        echo " (wget)"
+        wget --retry-connrefused --progress=bar:force "$src" -O "$dst" 2>&1
+    elif which python >/dev/null; then
+        echo " (python)"
+        python -c <<EOF
+import sys
+import time
+if sys.version_info >= (3,):
+    import urllib.request as urllib
+else:
+    import urllib
+
+
+def reporthook(count, block_size, total_size):
+    global start_time
+    if count == 0:
+        start_time = time.time()
+        return
+    duration = time.time() - start_time
+    progress_size = float(count * block_size)
+    if duration != 0:
+        if total_size == -1:
+            total_size = block_size
+            percent = 'Unknown size, '
+        else:
+            percent = '%.0f%%, ' % float(count * block_size * 100 / total_size)
+        speed = int(progress_size / (1024 * duration))
+        sys.stdout.write('\r%s%.2f MB, %d KB/s, %d seconds passed'
+                         % (percent, progress_size / (1024 * 1024), speed, 
duration))
+        sys.stdout.flush()
+
+urllib.urlretrieve('$src', '$dst', reporthook=reporthook)
+print('\n')
+EOF
+        true
+    else
+        fail "No way to download $src"
+    fi
+}
+
+export -f __download
+
+function __download_recipe_build() {
+    set -e
+    local recipe=$1
+    local version=${2:-latest}
+    local do_checksum=${3:-true}
+    local do_checksign=${4:-false}
+    local do_cache=${5:-false}
+    local builds_url=${6:-http://kameleon.imag.fr/builds}
+    local dest_dir="${7:-$recipe}"
+    local dest=""
+    mkdir -p $dest_dir
+    pushd $dest_dir > /dev/null
+    echo "Downloading $recipe ($version):"
+    __download $builds_url/${recipe}_$version.manifest
+    if [ "$do_checksign" == "true" ]; then
+        __download $builds_url/${recipe}_$version.manifest.sign
+        gpg --verify ${recipe}_$version.manifest{.sign,} || fail "Cannot 
verify signature"
+    fi
+    for f in $(< ${recipe}_$version.manifest); do
+        if [[ $f =~ ^$recipe-cache_ ]] && [ "$do_cache" != "true" ]; then
+            continue
+        fi
+        if [[ $f =~ \.sha[[:digit:]]+sum$ ]]; then
+            if [ "$do_checksum" == "true" ]; then
+                __download $builds_url/$f
+                ${f##*.} -c $f || fail "Cannot verify checksum"
+                if [ "$do_checksign" == "true" ]; then
+                    __download $builds_url/$f.sign
+                    gpg --verify $f{.sign,} || fail "Cannot verify signature"
+                fi
+            fi
+        else
+            __download $builds_url/$f
+            echo -n "Link to version-less filename: "
+            dest=${f%_*}.tar.${f#*.tar.}
+            ln -fv $f $dest
+        fi
+    done
+    popd > /dev/null
+    export UPSTREAM_TARBALL="$dest_dir/$dest"
+    set +e
+}
+
+export -f __download_recipe_build
+
+function __download_grid5000_image() {
+    set -e
+    local kaenv_name=$1
+    local kaenv_user=$2
+    local kaenv_version=$3
+    local remote=$4
+    local dest_dir=${5:-$kaenv_name}
+    mkdir -p $dest_dir
+    echo "Retrieve image from Grid'5000 environment '$kaenv_name'"
+    ${remote:+ssh $remote }which kaenv3 > /dev/null || fail "kaenv3 command 
not found (${remote:-localhost})"
+    # retrieve image[file], image[kind] and image[compression] from kaenv3
+    declare -A image
+    __kaenv() { local k=${2%%:*}; image[$k]=${2#*:}; }
+    mapfile -s 1 -t -c1 -C __kaenv < <(${remote:+ssh $remote 
}kaenv3${kaenv_user:+ -u $kaenv_user}${kaenv_version:+ --env-version 
$kaenv_version} -p $kaenv_name | grep -A3 -e '^image:' | sed -e 's/ //g')
+    [ -n "${image[file]}" ] || fail "Failed to retrieve environment 
$kaenv_name"
+    if [ "${image[compression]}" == "gzip" ]; then
+        image[compression]="gz"
+    elif [ "${image[compression]}" == "bzip2" ]; then
+        image[compression]="bz2"
+    elif [ "${image[compression]}" == "zstd" ]; then
+        image[compression]="zst"
+    fi
+    image[protocol]=${image[file]%%:*}
+    image[path]=${image[file]#*://}
+    image[filename]=${image[path]##*/}
+    local 
dest=$dest_dir/${image[filename]%%.*}.${image[kind]}.${image[compression]}
+    if [ "${image[kind]}" == "tar" ]; then
+        if [ "${image[protocol]}" == "http" -o "${image[protocol]}" == "https" 
]; then
+            __download ${image[file]} $dest
+        else
+            if  [ "${image[protocol]}" == "server" ]; then
+                # If server:// => see if available locally (NFS) or fail, same 
as if local:// <=> ""
+                echo "Image is server side, try and fetch it from local file 
${image[path]}"
+            fi
+            [ -r ${image[path]} ] || fail "Cannot retrieve ${image[file]}"
+            cp -v ${image[path]} $dest
+        fi
+    else # dd or whatever
+        fail "Image format${image[kind]:+ ${image[kind]}} is not supported"
+    fi
+    export UPSTREAM_TARBALL=$dest
+    set +e
+}
+
+export -f __download_grid5000_image
+
+function __find_linux_boot_device() {
+    local PDEVICE=`stat -c %04D /boot`
+    for file in $(find /dev -type b 2>/dev/null) ; do
+        local CURRENT_DEVICE=$(stat -c "%02t%02T" $file)
+        if [ $CURRENT_DEVICE = $PDEVICE ]; then
+            ROOTDEVICE="$file"
+            break;
+        fi
+    done
+    echo "$ROOTDEVICE"
+}
+
+export -f __find_linux_boot_device
+
+
+function __find_free_port() {
+  local begin_port=$1
+  local end_port=$2
+
+  local port=$begin_port
+  local ret=$(nc -z 127.0.0.1 $port && echo in use || echo free)
+  while [ $port -le $end_port ] && [ "$ret" == "in use" ]
+  do
+    local port=$[$port+1]
+    local ret=$(nc -z 127.0.0.1 $port && echo in use || echo free)
+  done
+
+  # manage loop exits
+  if [[ $port -gt $end_port ]]
+  then
+    fail "No free port available between $begin_port and $end_port"
+  fi
+
+  echo $port
+}
+
+export -f __find_free_port
diff --git a/grid5000/steps/export/debian/clean_dhcp_leases.yaml 
b/grid5000/steps/export/debian/clean_dhcp_leases.yaml
new file mode 100644
index 0000000..85ee860
--- /dev/null
+++ b/grid5000/steps/export/debian/clean_dhcp_leases.yaml
@@ -0,0 +1,2 @@
+- clean_dhcp_leases:
+  - exec_local: virt-customize -a $${image_disk}.$${image_format} 
--run-command "rm -rf /var/lib/dhcp/*"
diff --git a/grid5000/steps/export/do_qcow2_finish_works.yaml 
b/grid5000/steps/export/do_qcow2_finish_works.yaml
new file mode 100644
index 0000000..be80e25
--- /dev/null
+++ b/grid5000/steps/export/do_qcow2_finish_works.yaml
@@ -0,0 +1,44 @@
+# Install cloud_init in qcow2 output file (tarball must be exported 
beforehand, in order to not include cloud_init)
+- install_cloud_init:
+  - exec_local: |
+      if [[ "$${appliance_formats}" =~ "qcow2" ]]; then
+        echo "Install cloud_init in qcow2"
+        # First unset any proxy variable (set to http://127.0.0.1:8000 if 
kameleon's cache is enabled) so that virt-customise works ok
+        (for e in $(env | grep -i _proxy); do unset ${e%%=*}; done; 
virt-customize -a $${output}.qcow2 --install cloud-init)
+        echo "Configure datasource and timeout for cloud_init"
+        virt-customize -a $${output}.qcow2 --run-command 'printf 
"datasource_list: [  NoCloud, Ec2, None ]\n" > 
/etc/cloud/cloud.cfg.d/91-set-datasources.cfg'
+        virt-customize -a $${output}.qcow2 --run-command 'printf 
"datasource:\n  Ec2:\n    timeout: 3\n    max_wait: -1\n" > 
/etc/cloud/cloud.cfg.d/92-set-ec2-timeout.cfg'
+        # Remove DHCP hook to let cloud-init handle hostname
+        virt-customize -a $${output}.qcow2 --run-command 'rm -f 
/etc/dhcp/dhclient-exit-hooks.d/g5k-update-host-name'
+      else
+        echo "No qcow2 export, nothing to do."
+      fi
+
+- fix_interface_name:
+  - exec_local: |
+      if [[ "$${appliance_formats}" =~ "qcow2" && "$${distrib}" == "debian" 
]]; then
+        virt-customize -a $${output}.qcow2 --run-command 'sed -i 
s/ens3/enp0s2/ /etc/network/interfaces'
+      else
+        echo "Nothing to do."
+      fi
+
+- setup_uefi_boot:
+   - exec_local: |
+      if [[ "$${qemu_uefi}" == "true" ]] && [[ "$${arch}" == "aarch64" ]] && 
[[ "$${appliance_formats}" =~ "qcow2" ]]; then
+        echo "Setting up ARM64 UEFI boot for qcow2 image"
+        virt-customize \
+          -a $${output}.qcow2 \
+          --run-command 'if ! [ -e /boot/efi/EFI/BOOT/BOOTAA64.EFI ]; then 
mkdir -p /boot/efi/EFI/BOOT ; cp /boot/efi/EFI/$${distrib}/grubaa64.efi 
/boot/efi/EFI/BOOT/BOOTAA64.EFI; fi'
+      else
+        echo "Nothing to do."
+      fi
+
+- sparsify_qcow2_image:
+  - exec_local: |
+      if [[ "$${appliance_formats}" =~ "qcow2" ]]; then
+        echo "Compress and reduce qcow2 size"
+        virt-sparsify --compress $${output}.qcow2 $${output}.qcow2.sparsed
+        mv -f $${output}.qcow2.sparsed $${output}.qcow2
+      else
+        echo "No qcow2 export, nothing to do."
+      fi
diff --git a/grid5000/steps/export/export_g5k.yaml 
b/grid5000/steps/export/export_g5k.yaml
new file mode 100644
index 0000000..04b1358
--- /dev/null
+++ b/grid5000/steps/export/export_g5k.yaml
@@ -0,0 +1,84 @@
+# Generate a dsc file as used on grid'5000 by kaenv
+
+- dashes: "---" # kameleon eats my dash if I don't use this dirty hack :-(
+- g5k_version: "unknown"
+- g5k_kernel_path: "/vmlinuz"
+- g5k_initrd_path: "/initrd.img"
+- g5k_filesystem: "ext4"
+- g5k_author: "support-staff@lists.grid5000.fr"
+- g5k_visibility: "public"
+- g5k_destructive: "false"
+- g5k_tar_compression: "gzip"
+- g5k_postinst_compression: "gzip"
+
+# - save_as_tgz:
+#   - check_cmd_local: guestfish
+#   - check_cmd_local: gzip
+#   - exec_local: echo "Exporting appliance to $${output}.tgz"
+#   - exec_local: mkdir -p $${kameleon_cwd}/.mnt
+#   - exec_local: LIBGUESTFS_CACHEDIR=$${kameleon_cwd} guestmount --ro -i -a 
$${input} $${kameleon_cwd}/.mnt
+#   - exec_local: LIBGUESTFS_CACHEDIR=$${kameleon_cwd} tar -cf $${output}.tgz 
--gzip --numeric-owner --selinux --acls --xattrs -C $${kameleon_cwd}/.mnt .
+#   - exec_local: LIBGUESTFS_CACHEDIR=$${kameleon_cwd} guestunmount 
$${kameleon_cwd}/.mnt
+#   - exec_local: rmdir $${kameleon_cwd}/.mnt
+
+- generate_dsc:
+  - exec_local: echo "Creating description file for kaenv in $${output}.dsc"
+  - exec_local: |
+      if [[ "x$${g5k_variant}" != "xxen" ]]; then
+      cat << EOF > $${output}.dsc
+      $${dashes}
+      name: $${kameleon_recipe_name}
+      version: $${g5k_version}
+      description: $${distrib} $${release_number} ($${release}) for 
$${g5k_image_arch} - $${g5k_variant}
+      author: $${g5k_author}
+      visibility: $${g5k_visibility}
+      destructive: $${g5k_destructive}
+      os: linux
+      image:
+        file: $${g5k_tar_path}
+        kind: tar
+        compression: $${g5k_tar_compression}
+      postinstalls:
+      - archive: $${g5k_postinst_path}
+        compression: $${g5k_postinst_compression}
+        script: $${g5k_postinst_script}
+      boot:
+        kernel_params: "$${g5k_kernel_params}"
+        kernel: $${g5k_kernel_path}
+        initrd: $${g5k_initrd_path}
+      filesystem: $${g5k_filesystem}
+      partition_type: 131
+      multipart: false
+      EOF
+      else
+      cat << EOF > $${output}.dsc
+      $${dashes}
+      name: $${kameleon_recipe_name}
+      version: $${g5k_version}
+      description: $${distrib} $${release_number} ($${release}) for 
$${g5k_image_arch} - $${g5k_variant}
+      author: $${g5k_author}
+      visibility: $${g5k_visibility}
+      destructive: $${g5k_destructive}
+      os: xen
+      image:
+        file: $${g5k_tar_path}
+        kind: tar
+        compression: $${g5k_tar_compression}
+      postinstalls:
+      - archive: $${g5k_postinst_path}
+        compression: $${g5k_postinst_compression}
+        script: $${g5k_postinst_script}
+      boot:
+        kernel_params: "$${g5k_kernel_params}"
+        kernel: $${g5k_kernel_path}
+        initrd: $${g5k_initrd_path}
+        hypervisor: /hypervisor
+        hypervisor_params: "dom0_mem=4096M no-bootscrub"
+      filesystem: $${g5k_filesystem}
+      partition_type: 131
+      multipart: false
+      EOF
+      fi
+
+- generate_md5:
+  - exec_local: md5sum $${kameleon_recipe_name}.* > 
$${kameleon_recipe_name}.md5
diff --git a/grid5000/steps/export/export_vagrant_box.yaml 
b/grid5000/steps/export/export_vagrant_box.yaml
new file mode 100644
index 0000000..6c048ef
--- /dev/null
+++ b/grid5000/steps/export/export_vagrant_box.yaml
@@ -0,0 +1,42 @@
+- virtualbox_vmid: $${kameleon_recipe_name}_$${kameleon_short_uuid}
+- virtualbox_disk_filename: $${appliance_filename}.$${appliance_formats}
+- virtualbox_os_type: "Debian_64"
+- vagrant_box_filename: $${kameleon_cwd}/$${kameleon_recipe_name}.box
+
+- create_vbox_machine:
+  - on_export_clean:
+    - exec_local: |
+        if VBoxManage list vms | grep -q $${virtualbox_vmid}; then
+          echo "Deleting VBox machine $${virtualbox_vmid}"
+          VBoxManage unregistervm $${virtualbox_vmid} --delete
+        fi
+    - exec_local: |
+        if [ -e $${virtualbox_disk_filename} ]; then
+        echo "Deleting disk file $${virtualbox_disk_filename}"
+          rm $${virtualbox_disk_filename}
+        fi
+  - exec_local: echo "Creating VBox machine $${virtualbox_vmid}"
+  - exec_local: VBoxManage createvm --name $${virtualbox_vmid} --register
+  - exec_local: VBoxManage modifyvm $${virtualbox_vmid} --ostype 
$${virtualbox_os_type}
+  - exec_local: VBoxManage modifyvm $${virtualbox_vmid} --boot1 disk
+  - exec_local: VBoxManage modifyvm $${virtualbox_vmid} --memory 256
+  - exec_local: VBoxManage modifyvm $${virtualbox_vmid} --acpi on
+  - exec_local: VBoxManage modifyvm $${virtualbox_vmid} --nictype1 82540EM
+  - exec_local: VBoxManage modifyvm $${virtualbox_vmid} --nictype2 82540EM
+  - exec_local: VBoxManage modifyvm $${virtualbox_vmid} --nictype3 82540EM
+  - exec_local: VBoxManage modifyvm $${virtualbox_vmid} --nictype4 82540EM
+  - exec_local: VBoxManage modifyvm $${virtualbox_vmid} --nic1 nat 
--cableconnected1 on
+  - exec_local: VBoxManage storagectl $${virtualbox_vmid} --name "SATA 
Controller" --add sata --controller IntelAHCI --hostiocache on
+  - exec_local: |
+      VBoxManage storageattach $${virtualbox_vmid} \
+        --storagectl "SATA Controller" \
+        --port 0 \
+        --device 0 \
+        --type hdd \
+        --medium $${virtualbox_disk_filename}
+
+- save_box:
+  - check_cmd_local: vagrant
+  - exec_local: echo "Create vagrant box $${vagrant_box_filename}..."
+  - exec_local: rm -f $${vagrant_box_filename}
+  - exec_local: vagrant package --base $${virtualbox_vmid} --output 
$${vagrant_box_filename}
diff --git a/grid5000/steps/export/save_appliance_VM.yaml 
b/grid5000/steps/export/save_appliance_VM.yaml
new file mode 100644
index 0000000..b064d02
--- /dev/null
+++ b/grid5000/steps/export/save_appliance_VM.yaml
@@ -0,0 +1,23 @@
+#
+# Save Appliance from virtual machine
+#
+- export_appliance_script: $${kameleon_data_dir}/helpers/export_appliance.py
+
+# Zero free unallocated blocks from ext2/3 file-systems before export to
+# reduce image size
+- zerofree: true
+
+- save_appliance:
+  - check_cmd_local: python2
+  - exec_local: |
+      if [ "$${zerofree}" = "true" ]; then
+        EXPORT_OPTS="--zerofree"
+      else
+        EXPORT_OPTS=""
+      fi
+  - exec_local: |
+      python2 $${export_appliance_script} $${image_disk}.$${image_format} \
+        -o $${appliance_filename} \
+        --formats $${appliance_formats} \
+        --tar-compression-level $${appliance_tar_compression_level} \
+        --tar-excludes $${appliance_tar_excludes} $EXPORT_OPTS
diff --git a/grid5000/steps/setup/create_user.yaml 
b/grid5000/steps/setup/create_user.yaml
new file mode 100644
index 0000000..d7c75cf
--- /dev/null
+++ b/grid5000/steps/setup/create_user.yaml
@@ -0,0 +1,11 @@
+# Create User
+
+- shell: /bin/bash
+
+- add_user:
+  - exec_in: useradd -m $${name} -s $${shell}
+  - exec_in: echo -n '$${name}:$${password}' | chpasswd
+
+- add_to_groups:
+  - exec_in: |
+      usermod -G "$(echo $${groups} | tr ' ' ',')" $${name}
diff --git a/grid5000/steps/setup/debian/clean_system.yaml 
b/grid5000/steps/setup/debian/clean_system.yaml
new file mode 100644
index 0000000..399c339
--- /dev/null
+++ b/grid5000/steps/setup/debian/clean_system.yaml
@@ -0,0 +1,34 @@
+- enable_lighten: false
+
+- clean_user:
+  - on_setup_clean:
+    - exec_in: |
+        if id kameleon > /dev/null 2>&1; then
+          echo "Removing the kameleon user"
+          userdel -r kameleon 2> >(grep -v "userdel: kameleon mail spool 
(/var/mail/kameleon) not found" )
+        fi
+
+- clean_apt:
+  - on_setup_clean:
+    - apt-get_in: autoremove
+    - apt-get_in: autoclean
+    - apt-get_in: purge
+    - apt-get_in: clean
+    - exec_in: |
+        if [ $${enable_lighten} = true ]; then
+          rm -rf /var/lib/apt/lists/*
+          rm -rf /usr/share/locale/*
+          rm -rf /usr/share/man/*
+          rm -rf /usr/share/doc/*
+        fi
+
+- clean_network:
+  - on_setup_clean:
+    - exec_in: rm -rf /var/lib/dhcp/*
+
+- clean_udev:
+  - on_setup_clean:
+    - exec_in: rm -rf /etc/udev/rules.d/70-persistent-net.rules
+    - exec_in: rm -rf /dev/.udev/
+    - exec_in: touch /etc/udev/rules.d/70-persistent-net.rules
+    - exec_in: rm -rf /lib/udev/rules.d/75-persistent-net-generator.rules
\ No newline at end of file
diff --git a/grid5000/steps/setup/debian/clean_unnecessary_packages.yaml 
b/grid5000/steps/setup/debian/clean_unnecessary_packages.yaml
new file mode 100644
index 0000000..f9cfa37
--- /dev/null
+++ b/grid5000/steps/setup/debian/clean_unnecessary_packages.yaml
@@ -0,0 +1,9 @@
+- default_packages_no_clean: gnupg linux-image-$${deb_arch} console-setup 
rsync locales firmware-bnx2 firmware-bnx2x firmware-qlogic
+- arch_packages_no_clean: grub-pc grub-efi-amd64-bin
+- other_packages_no_clean:
+
+- clean_unnecessary_packages:
+    - on_setup_clean:
+        - exec_in: apt-get update && apt-get install -y debfoster
+        - exec_in: yes | debfoster --quiet --force -o MaxPriority=standard 
-oUseRecommends=yes $${default_packages_no_clean} $${arch_packages_no_clean} 
$${other_packages_no_clean} || true
+        - apt-get_in: clean
diff --git a/grid5000/steps/setup/debian/configure_apt_sources.yaml 
b/grid5000/steps/setup/debian/configure_apt_sources.yaml
new file mode 100644
index 0000000..e399db1
--- /dev/null
+++ b/grid5000/steps/setup/debian/configure_apt_sources.yaml
@@ -0,0 +1,53 @@
+# Software Install
+- deb_components: "main contrib non-free"
+- deb_backports: false
+
+- configure_source_list:
+  - write_in:
+    - /etc/apt/sources.list
+    - |
+      deb $${deb_mirror_uri} $${release} $${deb_components}
+      deb-src $${deb_mirror_uri} $${release} $${deb_components}
+  - test:
+    - exec_in: test "$${release}" != "sid"
+    - group:
+      - append_in:
+        - /etc/apt/sources.list
+        - |
+          deb $${deb_mirror_uri}  $${release}-updates $${deb_components}
+          deb-src $${deb_mirror_uri} $${release}-updates $${deb_components}
+      - test:
+        # cf. 
https://lists.debian.org/debian-devel-announce/2019/07/msg00004.html
+        - exec_in: test "$${release}" != "testing" -a "$${release}" != 
"bullseye"
+        - append_in:
+          - /etc/apt/sources.list
+          - |
+            deb http://security.debian.org/ $${release}/updates 
$${deb_components}
+            deb-src http://security.debian.org/ $${release}/updates 
$${deb_components}
+        - append_in:
+
+          - /etc/apt/sources.list
+          - |
+            deb http://security.debian.org/debian-security 
$${release}-security $${deb_components}
+            deb-src http://security.debian.org/debian-security 
$${release}-security $${deb_components}
+
+- add_backports:
+  - test:
+    - exec_in: test "$${deb_backports}" == "true" -a "$${release}" != 
"testing" -a "$${release}" != "sid"
+    - group:
+      - append_in:
+        - /etc/apt/sources.list
+        - |
+          deb $${deb_mirror_uri} $${release}-backports $${deb_components}
+          deb-src $${deb_mirror_uri} $${release}-backports $${deb_components}
+      - test:
+        # cf: https://www.lucas-nussbaum.net/blog/?p=947
+        - exec_in: test "$${release}" != "jessie"
+        - append_in:
+          - /etc/apt/apt.conf.d/99no-check-valid-until
+          - |
+            Acquire::Check-Valid-Until no;
+
+- update_repositories:
+  # Deactivate the check to make the cache system works after a while...
+  - apt-get_in: -o Acquire::Check-Valid-Until=false update
diff --git a/grid5000/steps/setup/debian/configure_system.yaml 
b/grid5000/steps/setup/debian/configure_system.yaml
new file mode 100644
index 0000000..252a310
--- /dev/null
+++ b/grid5000/steps/setup/debian/configure_system.yaml
@@ -0,0 +1,28 @@
+# System Config
+- grub_cmdline_linux: ""
+
+- configure_locales:
+  # set locales programtically, based on 
http://linux.livejournal.com/1880366.html
+  - exec_in: |
+      test ! -f /etc/locale.gen || \
+        (echo $${locales} | tr ' ' '\n' | xargs -I {} sed -i 's/^# {}/{}/' 
/etc/locale.gen)
+  - exec_in: locale-gen
+  - exec_in: update-locale LANG=$${lang}
+
+- set_timezone:
+  - exec_in: echo "$${timezone}" > /etc/timezone
+  - exec_in: ln -sf /usr/share/zoneinfo/$${timezone} /etc/localtime
+  - exec_in: "dpkg-reconfigure -f noninteractive tzdata 2>&1"
+
+- set_root_password:
+  - exec_in: echo -n 'root:$${root_password}' | chpasswd
+
+- configure_initramfs:
+  - write_in:
+    - /etc/initramfs-tools/conf.d/resume
+    - RESUME=none
+
+- configure_grub:
+  - exec_in: sed -i 's|^\(GRUB_CMDLINE_LINUX=\).*|\1"$${grub_cmdline_linux}"|' 
/etc/default/grub
+  - exec_in: update-grub
+
diff --git a/grid5000/steps/setup/debian/install_packages.yaml 
b/grid5000/steps/setup/debian/install_packages.yaml
new file mode 100644
index 0000000..a4b4c9a
--- /dev/null
+++ b/grid5000/steps/setup/debian/install_packages.yaml
@@ -0,0 +1,7 @@
+- apt_install_recommends: true
+
+- install_packages:
+  - test:
+    - exec_in: test "$${apt_install_recommends}" == "true"
+    - apt-get_in: install $${packages}
+    - apt-get_in: install --no-install-recommends $${packages}
diff --git a/grid5000/steps/setup/debian/minimal_install.yaml 
b/grid5000/steps/setup/debian/minimal_install.yaml
new file mode 100644
index 0000000..d1cdc69
--- /dev/null
+++ b/grid5000/steps/setup/debian/minimal_install.yaml
@@ -0,0 +1,6 @@
+
+- set_root_password:
+  - exec_in: echo -n 'root:$${root_password}' | chpasswd
+
+- upgrade_system:
+  - apt-get_in: dist-upgrade
diff --git a/grid5000/steps/setup/debian/run_orchestrator.yaml 
b/grid5000/steps/setup/debian/run_orchestrator.yaml
new file mode 100644
index 0000000..76074cd
--- /dev/null
+++ b/grid5000/steps/setup/debian/run_orchestrator.yaml
@@ -0,0 +1,43 @@
+# Provision a VM by launching a puppet agent (standalone)
+- puppet_build_path: "/tmp/puppet_recipes"
+- hiera_path: "/tmp/hiera"
+- puppet_local_path: $${kameleon_data_dir}/setup/puppet
+- hiera_local_path: $${kameleon_data_dir}/setup/hiera
+- version_file: modules/env/files/version
+- release_file: modules/env/files/min/image_versioning/release
+- kameleon_repo_name : "default"
+
+
+
+- import_puppet_recipes:
+  - exec_in: mkdir -p $${puppet_build_path}
+  - exec_local: rsync -e "ssh -F $${ssh_config_file}" -r 
$${puppet_local_path}/ $${kameleon_recipe_name}:$${puppet_build_path}
+- import_hiera:
+  - exec_in: mkdir -p $${hiera_path}
+  - exec_local: rsync -e "ssh -F $${ssh_config_file}" -r $${hiera_local_path}/ 
$${kameleon_recipe_name}:$${hiera_path}
+  - exec_in: puppet config set hiera_config $${hiera_path}/hiera.yaml
+
+# Store G5K environment release information
+- set_release:
+  - exec_in: echo 
"$${distrib}$${release_number}-$${g5k_image_arch}-$${g5k_variant}-$${g5k_version}"
 >> $${puppet_build_path}/$${release_file}
+  # this extracts last git commit hash from local repo
+  - pipe:
+    - exec_local: |
+        git rev-parse HEAD 2>/dev/null || echo "Error: Kameleon could find git 
log in local or in $HOME/.kameleon.d/repos/$${kameleon_repo_name}/" ;
+    - exec_in: cat - >> $${puppet_build_path}/$${release_file}
+# Also store version
+- set_version:
+  - exec_in: echo "$${g5k_version}" > $${puppet_build_path}/$${version_file}
+
+- run_puppet:
+  - exec_in: |
+      set +e
+      if [ -z "$${g5k_variant}" ]; then
+        VARIANT=std
+      else
+        VARIANT=$${g5k_variant}
+      fi
+      puppet apply --detailed-exitcodes -d 
--modulepath=$${puppet_build_path}/modules:/etc/puppet/code/modules 
$${puppet_build_path}/manifests/$VARIANT.pp | tee /tmp/puppet_exec.log
+      ret=$?
+      echo $ret
+      if [ $ret -eq 2 -o $ret -eq 0 ] ; then true ; else false ; fi # Set exit 
code to 0
diff --git a/grid5000/steps/setup/debian/setup_orchestrator.yaml 
b/grid5000/steps/setup/debian/setup_orchestrator.yaml
new file mode 100644
index 0000000..e6b67a5
--- /dev/null
+++ b/grid5000/steps/setup/debian/setup_orchestrator.yaml
@@ -0,0 +1,24 @@
+# Install and configure (if required) puppet
+# This is not made by the standard packet installation mechanism
+# because we want to add a specific version
+
+
+- script_name: puppet_install.sh
+- script_path: /tmp
+
+- get_standalone_puppet_script:
+  - exec_in: apt-get install -y wget lsb-release puppet gnupg 
apt-transport-https
+  # We also install stdlib module that contains some useful functions
+  # We force the version of puppetlabs-stdlib and puppetlabs-apt so that we 
use a version that works on our old version of puppet
+  - exec_in: apt-get install -y ca-certificates ; puppet module install 
puppetlabs-stdlib --version 5.2.0
+  - exec_in: puppet module install puppetlabs-apt --version 
$${puppetlabs_apt_version}
+  - on_setup_clean:
+    # module apt must be uninstalled BEFORE stdlib for dependency reasons
+    - exec_in: puppet module uninstall puppetlabs-apt
+    - exec_in: puppet module uninstall puppetlabs-stdlib
+    # We tagged packet as "automatically installed" to auto-remove them at the 
end of the orchestration step
+    - exec_in: apt-mark auto puppet lsb-release
+    - exec_in: apt-get --yes autoremove --purge | tee /tmp/temp_purge
+    # This is a bit of cleanup that SHOULD NOT OCCURS. But puppet is messy, 
and let this behind itself. So we clean it for him
+    - exec_in: grep -q "Removing puppet" /tmp/temp_purge && (rm -rf 
/etc/puppet && rc=$? || rc=$?)
+    - exec_in: apt-get autoremove -y
diff --git a/grid5000/steps/setup/debian/setup_vagrant_box.yaml 
b/grid5000/steps/setup/debian/setup_vagrant_box.yaml
new file mode 100644
index 0000000..fb1f827
--- /dev/null
+++ b/grid5000/steps/setup/debian/setup_vagrant_box.yaml
@@ -0,0 +1,77 @@
+- puppet_deb_source: "distib" #or "puppetlabs"
+- puppet_deb_url: "http://apt.puppetlabs.com/puppet-release-$${release}.deb";
+- virtualbox_deb_source: "distrib" #or "backports"
+
+- install_requirements:
+  - apt-get_in: install rsync curl linux-headers-amd64
+
+- install_virtualbox:
+  - test:
+    - exec_in: test "$${virtualbox_deb_source}" = "backports"
+    - group:
+      - write_in:
+        - /etc/apt/sources.list.d/virtualbox.list 
+        - deb $${deb_mirror_uri} $${release}-backports $${deb_components}
+      - apt-get_in: update
+      - apt-get_in: install virtualbox-guest-utils 
+      - exec_in: rm -f /etc/apt/sources.list.d/virtualbox.list 
+      - apt-get_in: update
+    - apt-get_in: install virtualbox-guest-utils 
+
+- enable_passwordless_sudo:
+  - exec_in: |
+      sed -i.bkp -e \
+      's/%sudo\s\+ALL=(ALL\(:ALL\)\?)\s\+ALL/%sudo ALL=NOPASSWD:ALL/g' \
+      /etc/sudoers
+
+- install_puppet:
+  - test:
+    - exec_in: test "$${puppet_deb_source}" = "puppetlabs"
+    - group:
+      - download_file_in:
+        - $${puppet_deb_url}
+        - $KAMELEON_WORKDIR/puppet.deb
+      - exec_in: dpkg -i $KAMELEON_WORKDIR/puppet.deb
+      - apt-get_in: update
+  - apt-get_in: install puppet
+  - exec_in: rm -f $KAMELEON_WORKDIR/puppet.deb
+
+- copy_insecure_sshkey:
+  - exec_in: mkdir -pm 700 /home/$${user_name}/.ssh/
+  - download_file_in:
+    - "https://raw.github.com/mitchellh/vagrant/master/keys/vagrant";
+    - /home/$${user_name}/.ssh/id_rsa
+  - download_file_in:
+    - "https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub";
+    - /home/$${user_name}/.ssh/id_rsa.pub
+  - exec_in: cp /home/$${user_name}/.ssh/id_rsa.pub 
/home/$${user_name}/.ssh/authorized_keys
+  - exec_in: chmod 0600 /home/$${user_name}/.ssh/*
+
+- config_ssh:
+  - exec_in: echo "UseDNS no" >> /etc/ssh/sshd_config
+  - write_in:
+    - /home/$${user_name}/.ssh/config
+    - |
+      Host *
+      ForwardX11 no
+      StrictHostKeyChecking no
+      PasswordAuthentication no
+      AddressFamily inet
+  - exec_in: chmod 0600 /home/$${user_name}/.ssh/config
+  - exec_in: rsync -ah /home/$${user_name}/.ssh/ /root/.ssh
+  - exec_in: |
+      if [ -e /root/.ssh/.kameleon_authorized_keys ]; then
+        cat /root/.ssh/.kameleon_authorized_keys >> /root/.ssh/authorized_keys
+      fi
+  - exec_in: chown "$${user_name}:$${user_name}" -R /home/$${user_name}
+
+- customize_motd:
+  - exec_in: echo 'Welcome to your Vagrant-built virtual machine.' > /etc/motd
+
+- fix_network_interface_for_vbox:
+  - exec_in: sed -i -e 's/ens3/enp0s3/g' /etc/network/interfaces
+
+- cleanup:
+  - exec_in: |
+      echo "Adding a 2 sec delay to the interface up, to make the dhclient 
happy"
+      echo "pre-up sleep 2" >> /etc/network/interfaces
diff --git a/notes.txt b/notes.txt
new file mode 100644
index 0000000..48966ed
--- /dev/null
+++ b/notes.txt
@@ -0,0 +1,3 @@
+* option "other_packages_no_clean" in global for yaml image builder
+  not properly documented, forced us to hunt down why explictily
+  installed packages were removed again after the setup step
diff --git a/steps/setup/#taler_install.yaml# b/steps/setup/#taler_install.yaml#
new file mode 100644
index 0000000..b28de9d
--- /dev/null
+++ b/steps/setup/#taler_install.yaml#
@@ -0,0 +1,7 @@
+- install_taler:
+    - exec_in : |
+         echo "deb https://deb.taler.net/apt/debian bullseye main" > 
/etc/apt/sources.list.d/taler.list
+         wget -O - https://taler.net/taler-systems.gpg.key | apt-key add -
+         apt-get update
+         apt-upgrade
+         apt-get install -y nginx postgresql-13 taler-exchange taler-auditor 
taler-merchant taler-exchange-offline taler-wallet-cli
diff --git a/steps/setup/.#taler_install.yaml b/steps/setup/.#taler_install.yaml
new file mode 120000
index 0000000..2a29569
--- /dev/null
+++ b/steps/setup/.#taler_install.yaml
@@ -0,0 +1 @@
+grothoff@lifeline.7254:1630226585
\ No newline at end of file
diff --git a/steps/setup/taler_install.yaml b/steps/setup/taler_install.yaml
new file mode 100644
index 0000000..b28de9d
--- /dev/null
+++ b/steps/setup/taler_install.yaml
@@ -0,0 +1,7 @@
+- install_taler:
+    - exec_in : |
+         echo "deb https://deb.taler.net/apt/debian bullseye main" > 
/etc/apt/sources.list.d/taler.list
+         wget -O - https://taler.net/taler-systems.gpg.key | apt-key add -
+         apt-get update
+         apt-upgrade
+         apt-get install -y nginx postgresql-13 taler-exchange taler-auditor 
taler-merchant taler-exchange-offline taler-wallet-cli
diff --git a/steps/setup/taler_install.yaml~ b/steps/setup/taler_install.yaml~
new file mode 100644
index 0000000..75f58bd
--- /dev/null
+++ b/steps/setup/taler_install.yaml~
@@ -0,0 +1,2 @@
+- install_ffmpeg:
+    - exec_in : apt-get update && apt-get install -y libsodium-dev
\ No newline at end of file

-- 
To stop receiving notification emails like this one, please contact
gnunet@gnunet.org.



reply via email to

[Prev in Thread] Current Thread [Next in Thread]