diff --git a/public/404.html b/public/404.html index 506ee230d75e936db3e3bcd8d7ddd5d15c776c42..4ee6587e3982179aa036d0945f42ad2e210411df 100644 --- a/public/404.html +++ b/public/404.html @@ -266,6 +266,34 @@ + + + + + + <li class="md-nav__item"> + <a href="/ch3-discover-ansible-tp/" class="md-nav__link"> + TP part 03 - Ansible + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="/ch4-extras-tp/" class="md-nav__link"> + TP Extras + </a> + </li> + + + + </ul> </nav> </li> @@ -328,6 +356,20 @@ + + + + + + <li class="md-nav__item"> + <a href="/ch3-discover-ansible-td/" class="md-nav__link"> + TD part 03 - Ansible + </a> + </li> + + + + </ul> </nav> </li> diff --git a/public/ch1-discover-docker-td/index.html b/public/ch1-discover-docker-td/index.html index 8b1bd393781ceb133983932b3250b5cb471159d4..f6ae7393f183efd1cf710d072911f3742f86d86c 100644 --- a/public/ch1-discover-docker-td/index.html +++ b/public/ch1-discover-docker-td/index.html @@ -273,6 +273,34 @@ + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-tp/" class="md-nav__link"> + TP part 03 - Ansible + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch4-extras-tp/" class="md-nav__link"> + TP Extras + </a> + </li> + + + + </ul> </nav> </li> @@ -509,6 +537,20 @@ + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-td/" class="md-nav__link"> + TD part 03 - Ansible + </a> + </li> + + + + </ul> </nav> </li> @@ -1133,7 +1175,7 @@ Head over to <a href="http://localhost:8888" target="_blank">http://localhost:88 <nav class="md-footer__inner md-grid" aria-label="Footer"> - <a href="../ch2-discover-github-actions-tp/" class="md-footer__link md-footer__link--prev" aria-label="Previous: TP part 02 - Github Actions" rel="prev"> + <a href="../ch4-extras-tp/" class="md-footer__link md-footer__link--prev" aria-label="Previous: TP Extras" rel="prev"> <div class="md-footer__button md-icon"> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z"/></svg> </div> @@ -1142,7 +1184,7 @@ Head over to <a href="http://localhost:8888" target="_blank">http://localhost:88 <span class="md-footer__direction"> Previous </span> - TP part 02 - Github Actions + TP Extras </div> </div> </a> diff --git a/public/ch1-discover-docker-tp/index.html b/public/ch1-discover-docker-tp/index.html index 7dbc1d97339050226f8193287a7c3d75ccb2145f..19ba80dc4770fbf36c749e1dc16ad90c53f829b7 100644 --- a/public/ch1-discover-docker-tp/index.html +++ b/public/ch1-discover-docker-tp/index.html @@ -492,6 +492,34 @@ + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-tp/" class="md-nav__link"> + TP part 03 - Ansible + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch4-extras-tp/" class="md-nav__link"> + TP Extras + </a> + </li> + + + + </ul> </nav> </li> @@ -554,6 +582,20 @@ + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-td/" class="md-nav__link"> + TD part 03 - Ansible + </a> + </li> + + + + </ul> </nav> </li> diff --git a/public/ch2-discover-github-actions-td/index.html b/public/ch2-discover-github-actions-td/index.html index 5700869bb2d5f47a6c20295caca9368a78f806a4..0c0ddf8aef83474436af10033b7dbcc1ee468176 100644 --- a/public/ch2-discover-github-actions-td/index.html +++ b/public/ch2-discover-github-actions-td/index.html @@ -273,6 +273,34 @@ + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-tp/" class="md-nav__link"> + TP part 03 - Ansible + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch4-extras-tp/" class="md-nav__link"> + TP Extras + </a> + </li> + + + + </ul> </nav> </li> @@ -433,6 +461,20 @@ + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-td/" class="md-nav__link"> + TD part 03 - Ansible + </a> + </li> + + + + </ul> </nav> </li> @@ -651,6 +693,21 @@ Now you are able to clone and publish work on your Github repository without ent </a> + + <a href="../ch3-discover-ansible-td/" class="md-footer__link md-footer__link--next" aria-label="Next: TD part 03 - Ansible" rel="next"> + <div class="md-footer__title"> + <div class="md-ellipsis"> + <span class="md-footer__direction"> + Next + </span> + TD part 03 - Ansible + </div> + </div> + <div class="md-footer__button md-icon"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M4 11v2h12l-5.5 5.5 1.42 1.42L19.84 12l-7.92-7.92L10.5 5.5 16 11H4Z"/></svg> + </div> + </a> + </nav> <div class="md-footer-meta md-typeset"> diff --git a/public/ch2-discover-github-actions-tp/index.html b/public/ch2-discover-github-actions-tp/index.html index 19e439aa40ba7a4ed6307b0d4ea0562d4a7be307..9a9cac7d56c7007c5837816d5745fb9b7fa1778c 100644 --- a/public/ch2-discover-github-actions-tp/index.html +++ b/public/ch2-discover-github-actions-tp/index.html @@ -404,6 +404,34 @@ + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-tp/" class="md-nav__link"> + TP part 03 - Ansible + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch4-extras-tp/" class="md-nav__link"> + TP Extras + </a> + </li> + + + + </ul> </nav> </li> @@ -466,6 +494,20 @@ + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-td/" class="md-nav__link"> + TD part 03 - Ansible + </a> + </li> + + + + </ul> </nav> </li> @@ -862,13 +904,13 @@ - <a href="../ch1-discover-docker-td/" class="md-footer__link md-footer__link--next" aria-label="Next: TD part 01 - Docker" rel="next"> + <a href="../ch3-discover-ansible-tp/" class="md-footer__link md-footer__link--next" aria-label="Next: TP part 03 - Ansible" rel="next"> <div class="md-footer__title"> <div class="md-ellipsis"> <span class="md-footer__direction"> Next </span> - TD part 01 - Docker + TP part 03 - Ansible </div> </div> <div class="md-footer__button md-icon"> diff --git a/public/ch3-discover-ansible-td/index.html b/public/ch3-discover-ansible-td/index.html new file mode 100644 index 0000000000000000000000000000000000000000..96fe2855051cb24923ea3b5eeee1860cfd943885 --- /dev/null +++ b/public/ch3-discover-ansible-td/index.html @@ -0,0 +1,653 @@ + +<!doctype html> +<html lang="en" class="no-js"> + <head> + + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width,initial-scale=1"> + + + + <link rel="icon" href="../assets/images/favicon.png"> + <meta name="generator" content="mkdocs-1.3.0, mkdocs-material-8.2.15"> + + + + <title>TD part 03 - Ansible - Devops</title> + + + + <link rel="stylesheet" href="../assets/stylesheets/main.c382b1dc.min.css"> + + + <link rel="stylesheet" href="../assets/stylesheets/palette.cc9b2e1e.min.css"> + + + + <meta name="theme-color" content="#e92063"> + + + + + + + + + <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin> + <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300i,400,400i,700,700i%7CRoboto+Mono:400,400i,700,700i&display=fallback"> + <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style> + + + + <script>__md_scope=new URL("..",location),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script> + + + + + + </head> + + + + + + + + <body dir="ltr" data-md-color-scheme="" data-md-color-primary="pink" data-md-color-accent=""> + + + + <input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="__drawer" autocomplete="off"> + <input class="md-toggle" data-md-toggle="search" type="checkbox" id="__search" autocomplete="off"> + <label class="md-overlay" for="__drawer"></label> + <div data-md-component="skip"> + + + <a href="#discover-ansible" class="md-skip"> + Skip to content + </a> + + </div> + <div data-md-component="announce"> + + </div> + + + + +<header class="md-header" data-md-component="header"> + <nav class="md-header__inner md-grid" aria-label="Header"> + <a href=".." title="Devops" class="md-header__button md-logo" aria-label="Devops" data-md-component="logo"> + + <img src="../assets/logo.png" alt="logo"> + + </a> + <label class="md-header__button md-icon" for="__drawer"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M3 6h18v2H3V6m0 5h18v2H3v-2m0 5h18v2H3v-2Z"/></svg> + </label> + <div class="md-header__title" data-md-component="header-title"> + <div class="md-header__ellipsis"> + <div class="md-header__topic"> + <span class="md-ellipsis"> + Devops + </span> + </div> + <div class="md-header__topic" data-md-component="header-topic"> + <span class="md-ellipsis"> + + TD part 03 - Ansible + + </span> + </div> + </div> + </div> + + + + <label class="md-header__button md-icon" for="__search"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z"/></svg> + </label> + <div class="md-search" data-md-component="search" role="dialog"> + <label class="md-search__overlay" for="__search"></label> + <div class="md-search__inner" role="search"> + <form class="md-search__form" name="search"> + <input type="text" class="md-search__input" name="query" aria-label="Search" placeholder="Search" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="search-query" required> + <label class="md-search__icon md-icon" for="__search"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z"/></svg> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z"/></svg> + </label> + <nav class="md-search__options" aria-label="Search"> + + <button type="reset" class="md-search__icon md-icon" aria-label="Clear" tabindex="-1"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M19 6.41 17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41Z"/></svg> + </button> + </nav> + + </form> + <div class="md-search__output"> + <div class="md-search__scrollwrap" data-md-scrollfix> + <div class="md-search-result" data-md-component="search-result"> + <div class="md-search-result__meta"> + Initializing search + </div> + <ol class="md-search-result__list"></ol> + </div> + </div> + </div> + </div> +</div> + + + </nav> + +</header> + + <div class="md-container" data-md-component="container"> + + + + + +<nav class="md-tabs" aria-label="Tabs" data-md-component="tabs"> + <div class="md-tabs__inner md-grid"> + <ul class="md-tabs__list"> + + + + + + + + + + <li class="md-tabs__item"> + <a href="../ch1-discover-docker-tp/" class="md-tabs__link"> + TP + </a> + </li> + + + + + + + + + + + + + + <li class="md-tabs__item"> + <a href="../ch1-discover-docker-td/" class="md-tabs__link md-tabs__link--active"> + TD + </a> + </li> + + + + </ul> + </div> +</nav> + + + + <main class="md-main" data-md-component="main"> + <div class="md-main__inner md-grid"> + + + + <div class="md-sidebar md-sidebar--primary" data-md-component="sidebar" data-md-type="navigation" > + <div class="md-sidebar__scrollwrap"> + <div class="md-sidebar__inner"> + + + + + +<nav class="md-nav md-nav--primary md-nav--lifted" aria-label="Navigation" data-md-level="0"> + <label class="md-nav__title" for="__drawer"> + <a href=".." title="Devops" class="md-nav__button md-logo" aria-label="Devops" data-md-component="logo"> + + <img src="../assets/logo.png" alt="logo"> + + </a> + Devops + </label> + + <ul class="md-nav__list" data-md-scrollfix> + + + + + + + + + + <li class="md-nav__item md-nav__item--nested"> + + + <input class="md-nav__toggle md-toggle" data-md-toggle="__nav_1" type="checkbox" id="__nav_1" > + + + + + <label class="md-nav__link" for="__nav_1"> + TP + <span class="md-nav__icon md-icon"></span> + </label> + + <nav class="md-nav" aria-label="TP" data-md-level="1"> + <label class="md-nav__title" for="__nav_1"> + <span class="md-nav__icon md-icon"></span> + TP + </label> + <ul class="md-nav__list" data-md-scrollfix> + + + + + + + <li class="md-nav__item"> + <a href="../ch1-discover-docker-tp/" class="md-nav__link"> + TP part 01 - Docker + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch2-discover-github-actions-tp/" class="md-nav__link"> + TP part 02 - Github Actions + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-tp/" class="md-nav__link"> + TP part 03 - Ansible + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch4-extras-tp/" class="md-nav__link"> + TP Extras + </a> + </li> + + + + + </ul> + </nav> + </li> + + + + + + + + + + + + + + <li class="md-nav__item md-nav__item--active md-nav__item--nested"> + + + <input class="md-nav__toggle md-toggle" data-md-toggle="__nav_2" type="checkbox" id="__nav_2" checked> + + + + + <label class="md-nav__link" for="__nav_2"> + TD + <span class="md-nav__icon md-icon"></span> + </label> + + <nav class="md-nav" aria-label="TD" data-md-level="1"> + <label class="md-nav__title" for="__nav_2"> + <span class="md-nav__icon md-icon"></span> + TD + </label> + <ul class="md-nav__list" data-md-scrollfix> + + + + + + + <li class="md-nav__item"> + <a href="../ch1-discover-docker-td/" class="md-nav__link"> + TD part 01 - Docker + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch2-discover-github-actions-td/" class="md-nav__link"> + TD part 02 - Github Actions + </a> + </li> + + + + + + + + + + + + <li class="md-nav__item md-nav__item--active"> + + <input class="md-nav__toggle md-toggle" data-md-toggle="toc" type="checkbox" id="__toc"> + + + + + + <label class="md-nav__link md-nav__link--active" for="__toc"> + TD part 03 - Ansible + <span class="md-nav__icon md-icon"></span> + </label> + + <a href="./" class="md-nav__link md-nav__link--active"> + TD part 03 - Ansible + </a> + + + +<nav class="md-nav md-nav--secondary" aria-label="Table of contents"> + + + + + + + <label class="md-nav__title" for="__toc"> + <span class="md-nav__icon md-icon"></span> + Table of contents + </label> + <ul class="md-nav__list" data-md-component="toc" data-md-scrollfix> + + <li class="md-nav__item"> + <a href="#prerequisites" class="md-nav__link"> + Prerequisites + </a> + +</li> + + <li class="md-nav__item"> + <a href="#ssh-remote-connection" class="md-nav__link"> + SSH remote connection + </a> + +</li> + + <li class="md-nav__item"> + <a href="#say-hello-from-ansible" class="md-nav__link"> + Say Hello from Ansible + </a> + +</li> + + <li class="md-nav__item"> + <a href="#setup-an-apache-server" class="md-nav__link"> + Setup an Apache Server + </a> + +</li> + + </ul> + +</nav> + + </li> + + + + + </ul> + </nav> + </li> + + + + </ul> +</nav> + </div> + </div> + </div> + + + + <div class="md-sidebar md-sidebar--secondary" data-md-component="sidebar" data-md-type="toc" > + <div class="md-sidebar__scrollwrap"> + <div class="md-sidebar__inner"> + + +<nav class="md-nav md-nav--secondary" aria-label="Table of contents"> + + + + + + + <label class="md-nav__title" for="__toc"> + <span class="md-nav__icon md-icon"></span> + Table of contents + </label> + <ul class="md-nav__list" data-md-component="toc" data-md-scrollfix> + + <li class="md-nav__item"> + <a href="#prerequisites" class="md-nav__link"> + Prerequisites + </a> + +</li> + + <li class="md-nav__item"> + <a href="#ssh-remote-connection" class="md-nav__link"> + SSH remote connection + </a> + +</li> + + <li class="md-nav__item"> + <a href="#say-hello-from-ansible" class="md-nav__link"> + Say Hello from Ansible + </a> + +</li> + + <li class="md-nav__item"> + <a href="#setup-an-apache-server" class="md-nav__link"> + Setup an Apache Server + </a> + +</li> + + </ul> + +</nav> + </div> + </div> + </div> + + + <div class="md-content" data-md-component="content"> + <article class="md-content__inner md-typeset"> + + + + +<h1 id="discover-ansible">Discover Ansible</h1> +<div class="admonition note"> +<p class="admonition-title">Note</p> +<p>Checkpoint: call us to check your results (don’t stay blocked on a checkpoint if we are busy, we can check ⅔ checkpoints at the same time) </p> +</div> +<div class="admonition question"> +<p class="admonition-title">Question</p> +<p>Point to document/report </p> +</div> +<div class="admonition tip"> +<p class="admonition-title">Tip</p> +<p>Interesting information</p> +</div> +<h3 id="prerequisites">Prerequisites</h3> +<p>Till now we have only been preparing our applications to be deployed. However we did not deploy anything. </p> +<p>That’s where Ansible takes place. Ansible is basically a tool to manage your servers, provision them and deploy your applications on them. </p> +<p>This is not the only solution on the market, you’ll hear also about Chef, Puppet, Terraform during your developper life. </p> +<p>All of them have their advantages and disadvantages, it is up to you to play with and make your own decisions.</p> +<p>This introduction will be pretty fast, it is just here to make you manipulate the tool a little bit with some simple ad hoc commands. You will go deeper into the practical part.</p> +<p>Do you Ansible?</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-0-1" name="__codelineno-0-1" href="#__codelineno-0-1"></a>$<span class="w"> </span>ansible<span class="w"> </span>--version +</code></pre></div> +<p>Check your installed version, config file location, associated python version and more. If you do not ansible, head to <a href="http://docs.ansible.com" target="_blank">Ansible doc</a>.</p> +<p>Unfortunately Ansible is not available on Windows, so if you’re using Windows you have two options : </p> +<ul> +<li>Use a virtual machine, with ansible already installed : ask for a .ova </li> +<li>Install a Windows Linux Subsystem by following this documentation and install ansible </li> +</ul> +<h3 id="ssh-remote-connection">SSH remote connection</h3> +<p>Each of you has normally received a server domain name that should be yourname-yourlastname-formation.takima.io and a private key to SSH to it. +This server is yours, you will be the only one to manipulate it. In order to play with it, you can simply ssh to it.</p> +<p>SSH basically means Secure Shell, it is both a software and a communication protocol that uses the protocol/port TCP/22 of your machines to communicate. It is called Secure because the communication is encrypted using your ssh key pair.</p> +<p>Before trying any command, you should know that your private key requires restricted permissions to be used. Change the rights of your key:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-1-1" name="__codelineno-1-1" href="#__codelineno-1-1"></a>chmod<span class="w"> </span><span class="m">400</span><span class="w"> </span><path_to_your_key><span class="w"> </span> +</code></pre></div> +<p>Now your key can be used to ssh to your server. Go on and hit :</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-2-1" name="__codelineno-2-1" href="#__codelineno-2-1"></a>ssh<span class="w"> </span>-i<span class="w"> </span><path_to_your_key><span class="w"> </span>admin@<your_server_domain_name> +</code></pre></div> +<p>Why do we have to add this admin@” ? Your machines run under a Debian distribution, and the default user is admin, this is why we specify which user we want to use.</p> +<p>Now you are connected to your instance, nothing important to see here. You can exit whenever you want using the command:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-3-1" name="__codelineno-3-1" href="#__codelineno-3-1"></a><span class="nb">exit</span> +</code></pre></div> +<p>Why do we show you how to make a remote SSH connection ? Because it is basically what does Ansible to communicate with your server. Now as you can already guess, Ansible will require some configurations to be able to access your machine.</p> +<h3 id="say-hello-from-ansible">Say Hello from Ansible</h3> +<p>We will simply use a ping command from Ansible to say hello to our server. Actually, the Ansible ping command does a bit more than just the usual bash ping command. If Ansible responds a “pong” to you, it means that your server is available, that the user provided exists and that Ansible was able to authenticate to your server. </p> +<p>In summary, it tells you that your Ansible configuration works.</p> +<p>First, we need to add our server name to our Ansible hosts list:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-4-1" name="__codelineno-4-1" href="#__codelineno-4-1"></a>$<span class="w"> </span>vim<span class="w"> </span>/etc/ansible/hosts +</code></pre></div> +<p>Add your server domain name into the file and save it. Now Ansible knows you remote host. Now let’s hit:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-5-1" name="__codelineno-5-1" href="#__codelineno-5-1"></a>$<span class="w"> </span>ansible<span class="w"> </span>all<span class="w"> </span>-m<span class="w"> </span>ping +</code></pre></div> +<p>And it… doesn’t work. Why? Because Ansible has been access denied as it did not provide either a user nor a private ssh key. Now try again with this one:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-6-1" name="__codelineno-6-1" href="#__codelineno-6-1"></a>$<span class="w"> </span>ansible<span class="w"> </span>all<span class="w"> </span>-m<span class="w"> </span>ping<span class="w"> </span>--private-key<span class="o">=</span><path_to_your_ssh_key><span class="w"> </span>-u<span class="w"> </span>admin +</code></pre></div> +<p>And now it should respond “pong”, which means you are successful with your configurations. +<img alt="ansible-ping" src="../assets/ansible-ping.png" /></p> +<div class="admonition check"> +<p class="admonition-title">Check</p> +<p>it replies "pong"</p> +</div> +<h3 id="setup-an-apache-server">Setup an Apache Server</h3> +<p>Now that we are able to access our instance with Ansible, let’s see how powerful Ansible can be for provisioning your web server. +We are going to ask Ansible to install Apache into your instance to make it a webserver.</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-7-1" name="__codelineno-7-1" href="#__codelineno-7-1"></a>$<span class="w"> </span>ansible<span class="w"> </span>all<span class="w"> </span>-m<span class="w"> </span>apt<span class="w"> </span>-a<span class="w"> </span><span class="s2">"name=apache2 state=present"</span><span class="w"> </span>--private-key<span class="o">=</span><path_to_your_ssh_key><span class="w"> </span>-u<span class="w"> </span>admin +</code></pre></div> +<p>And it… doesn’t work ! Actually, like in every system, you need to be root in order to install a software. Fortunately, Ansible can take care of this. Try again with the following command:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-8-1" name="__codelineno-8-1" href="#__codelineno-8-1"></a>$<span class="w"> </span>ansible<span class="w"> </span>all<span class="w"> </span>-m<span class="w"> </span>apt<span class="w"> </span>-a<span class="w"> </span><span class="s2">"name=apache2 state=present"</span><span class="w"> </span>--private-key<span class="o">=</span><path_to_your_ssh_key><span class="w"> </span>-u<span class="w"> </span>admin<span class="w"> </span>--become +</code></pre></div> +<p>The --become flag tells Ansible to perform the command as a super user. Keep in mind that the admin user is part of the wheel group which is the Debian super users group. It would not be possible using a normal user.</p> +<p>Now you have successfully installed Apache on your server. We will go on and create an html page for our website:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-9-1" name="__codelineno-9-1" href="#__codelineno-9-1"></a>$<span class="w"> </span>ansible<span class="w"> </span>all<span class="w"> </span>-m<span class="w"> </span>shell<span class="w"> </span>-a<span class="w"> </span><span class="s1">'echo "<html><h1>Hello World</h1></html>" > /var/www/html/index.html'</span><span class="w"> </span>--private-key<span class="o">=</span><path_to_your_ssh_key><span class="w"> </span>-u<span class="w"> </span>admin<span class="w"> </span>--become +</code></pre></div> +<p>Now start your Apache service:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-10-1" name="__codelineno-10-1" href="#__codelineno-10-1"></a>$<span class="w"> </span>ansible<span class="w"> </span>all<span class="w"> </span>-m<span class="w"> </span>service<span class="w"> </span>-a<span class="w"> </span><span class="s2">"name=apache2 state=started"</span><span class="w"> </span>--private-key<span class="o">=</span><path_to_your_ssh_key><span class="w"> </span>-u<span class="w"> </span>admin<span class="w"> </span>--become +</code></pre></div> +<p>Connect to your server from your browser and… it works ! Well done you’ve set up your very first server with Ansible. Now move on to the practical part.</p> +<p align="center">© Takima 2025</p> + + + </article> + </div> + </div> + + </main> + + <footer class="md-footer"> + + <nav class="md-footer__inner md-grid" aria-label="Footer"> + + + <a href="../ch2-discover-github-actions-td/" class="md-footer__link md-footer__link--prev" aria-label="Previous: TD part 02 - Github Actions" rel="prev"> + <div class="md-footer__button md-icon"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z"/></svg> + </div> + <div class="md-footer__title"> + <div class="md-ellipsis"> + <span class="md-footer__direction"> + Previous + </span> + TD part 02 - Github Actions + </div> + </div> + </a> + + + </nav> + + <div class="md-footer-meta md-typeset"> + <div class="md-footer-meta__inner md-grid"> + <div class="md-copyright"> + + + Made with + <a href="https://squidfunk.github.io/mkdocs-material/" target="_blank" rel="noopener"> + Material for MkDocs + </a> + +</div> + + </div> + </div> +</footer> + + </div> + <div class="md-dialog" data-md-component="dialog"> + <div class="md-dialog__inner md-typeset"></div> + </div> + <script id="__config" type="application/json">{"base": "..", "features": ["navigation.tabs", "navigation.instant"], "search": "../assets/javascripts/workers/search.2a1c317c.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.config.lang": "en", "search.config.pipeline": "trimmer, stopWordFilter", "search.config.separator": "[\\s\\-]+", "search.placeholder": "Search", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version.title": "Select version"}}</script> + + + <script src="../assets/javascripts/bundle.a6c66575.min.js"></script> + + + </body> +</html> \ No newline at end of file diff --git a/public/ch3-discover-ansible-tp/index.html b/public/ch3-discover-ansible-tp/index.html new file mode 100644 index 0000000000000000000000000000000000000000..27ac0b032016166570d4f550d57347fc4d27671a --- /dev/null +++ b/public/ch3-discover-ansible-tp/index.html @@ -0,0 +1,930 @@ + +<!doctype html> +<html lang="en" class="no-js"> + <head> + + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width,initial-scale=1"> + + + + <link rel="icon" href="../assets/images/favicon.png"> + <meta name="generator" content="mkdocs-1.3.0, mkdocs-material-8.2.15"> + + + + <title>TP part 03 - Ansible - Devops</title> + + + + <link rel="stylesheet" href="../assets/stylesheets/main.c382b1dc.min.css"> + + + <link rel="stylesheet" href="../assets/stylesheets/palette.cc9b2e1e.min.css"> + + + + <meta name="theme-color" content="#e92063"> + + + + + + + + + <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin> + <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300i,400,400i,700,700i%7CRoboto+Mono:400,400i,700,700i&display=fallback"> + <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style> + + + + <script>__md_scope=new URL("..",location),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script> + + + + + + </head> + + + + + + + + <body dir="ltr" data-md-color-scheme="" data-md-color-primary="pink" data-md-color-accent=""> + + + + <input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="__drawer" autocomplete="off"> + <input class="md-toggle" data-md-toggle="search" type="checkbox" id="__search" autocomplete="off"> + <label class="md-overlay" for="__drawer"></label> + <div data-md-component="skip"> + + + <a href="#discover-ansible" class="md-skip"> + Skip to content + </a> + + </div> + <div data-md-component="announce"> + + </div> + + + + +<header class="md-header" data-md-component="header"> + <nav class="md-header__inner md-grid" aria-label="Header"> + <a href=".." title="Devops" class="md-header__button md-logo" aria-label="Devops" data-md-component="logo"> + + <img src="../assets/logo.png" alt="logo"> + + </a> + <label class="md-header__button md-icon" for="__drawer"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M3 6h18v2H3V6m0 5h18v2H3v-2m0 5h18v2H3v-2Z"/></svg> + </label> + <div class="md-header__title" data-md-component="header-title"> + <div class="md-header__ellipsis"> + <div class="md-header__topic"> + <span class="md-ellipsis"> + Devops + </span> + </div> + <div class="md-header__topic" data-md-component="header-topic"> + <span class="md-ellipsis"> + + TP part 03 - Ansible + + </span> + </div> + </div> + </div> + + + + <label class="md-header__button md-icon" for="__search"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z"/></svg> + </label> + <div class="md-search" data-md-component="search" role="dialog"> + <label class="md-search__overlay" for="__search"></label> + <div class="md-search__inner" role="search"> + <form class="md-search__form" name="search"> + <input type="text" class="md-search__input" name="query" aria-label="Search" placeholder="Search" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="search-query" required> + <label class="md-search__icon md-icon" for="__search"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z"/></svg> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z"/></svg> + </label> + <nav class="md-search__options" aria-label="Search"> + + <button type="reset" class="md-search__icon md-icon" aria-label="Clear" tabindex="-1"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M19 6.41 17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41Z"/></svg> + </button> + </nav> + + </form> + <div class="md-search__output"> + <div class="md-search__scrollwrap" data-md-scrollfix> + <div class="md-search-result" data-md-component="search-result"> + <div class="md-search-result__meta"> + Initializing search + </div> + <ol class="md-search-result__list"></ol> + </div> + </div> + </div> + </div> +</div> + + + </nav> + +</header> + + <div class="md-container" data-md-component="container"> + + + + + +<nav class="md-tabs" aria-label="Tabs" data-md-component="tabs"> + <div class="md-tabs__inner md-grid"> + <ul class="md-tabs__list"> + + + + + + + + + + + + <li class="md-tabs__item"> + <a href="../ch1-discover-docker-tp/" class="md-tabs__link md-tabs__link--active"> + TP + </a> + </li> + + + + + + + + + + + + <li class="md-tabs__item"> + <a href="../ch1-discover-docker-td/" class="md-tabs__link"> + TD + </a> + </li> + + + + </ul> + </div> +</nav> + + + + <main class="md-main" data-md-component="main"> + <div class="md-main__inner md-grid"> + + + + <div class="md-sidebar md-sidebar--primary" data-md-component="sidebar" data-md-type="navigation" > + <div class="md-sidebar__scrollwrap"> + <div class="md-sidebar__inner"> + + + + + +<nav class="md-nav md-nav--primary md-nav--lifted" aria-label="Navigation" data-md-level="0"> + <label class="md-nav__title" for="__drawer"> + <a href=".." title="Devops" class="md-nav__button md-logo" aria-label="Devops" data-md-component="logo"> + + <img src="../assets/logo.png" alt="logo"> + + </a> + Devops + </label> + + <ul class="md-nav__list" data-md-scrollfix> + + + + + + + + + + + + <li class="md-nav__item md-nav__item--active md-nav__item--nested"> + + + <input class="md-nav__toggle md-toggle" data-md-toggle="__nav_1" type="checkbox" id="__nav_1" checked> + + + + + <label class="md-nav__link" for="__nav_1"> + TP + <span class="md-nav__icon md-icon"></span> + </label> + + <nav class="md-nav" aria-label="TP" data-md-level="1"> + <label class="md-nav__title" for="__nav_1"> + <span class="md-nav__icon md-icon"></span> + TP + </label> + <ul class="md-nav__list" data-md-scrollfix> + + + + + + + <li class="md-nav__item"> + <a href="../ch1-discover-docker-tp/" class="md-nav__link"> + TP part 01 - Docker + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch2-discover-github-actions-tp/" class="md-nav__link"> + TP part 02 - Github Actions + </a> + </li> + + + + + + + + + + + + <li class="md-nav__item md-nav__item--active"> + + <input class="md-nav__toggle md-toggle" data-md-toggle="toc" type="checkbox" id="__toc"> + + + + + + <label class="md-nav__link md-nav__link--active" for="__toc"> + TP part 03 - Ansible + <span class="md-nav__icon md-icon"></span> + </label> + + <a href="./" class="md-nav__link md-nav__link--active"> + TP part 03 - Ansible + </a> + + + +<nav class="md-nav md-nav--secondary" aria-label="Table of contents"> + + + + + + + <label class="md-nav__title" for="__toc"> + <span class="md-nav__icon md-icon"></span> + Table of contents + </label> + <ul class="md-nav__list" data-md-component="toc" data-md-scrollfix> + + <li class="md-nav__item"> + <a href="#goals" class="md-nav__link"> + Goals + </a> + +</li> + + <li class="md-nav__item"> + <a href="#introduction" class="md-nav__link"> + Introduction + </a> + + <nav class="md-nav" aria-label="Introduction"> + <ul class="md-nav__list"> + + <li class="md-nav__item"> + <a href="#inventories" class="md-nav__link"> + Inventories + </a> + +</li> + + <li class="md-nav__item"> + <a href="#facts" class="md-nav__link"> + Facts + </a> + +</li> + + </ul> + </nav> + +</li> + + <li class="md-nav__item"> + <a href="#playbooks" class="md-nav__link"> + Playbooks + </a> + + <nav class="md-nav" aria-label="Playbooks"> + <ul class="md-nav__list"> + + <li class="md-nav__item"> + <a href="#first-playbook" class="md-nav__link"> + First playbook + </a> + +</li> + + <li class="md-nav__item"> + <a href="#advanced-playbook" class="md-nav__link"> + Advanced Playbook + </a> + +</li> + + <li class="md-nav__item"> + <a href="#using-roles" class="md-nav__link"> + Using roles + </a> + +</li> + + </ul> + </nav> + +</li> + + <li class="md-nav__item"> + <a href="#deploy-your-app" class="md-nav__link"> + Deploy your App + </a> + +</li> + + <li class="md-nav__item"> + <a href="#front" class="md-nav__link"> + Front + </a> + +</li> + + <li class="md-nav__item"> + <a href="#continuous-deployment" class="md-nav__link"> + Continuous Deployment + </a> + +</li> + + </ul> + +</nav> + + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch4-extras-tp/" class="md-nav__link"> + TP Extras + </a> + </li> + + + + + </ul> + </nav> + </li> + + + + + + + + + + + + <li class="md-nav__item md-nav__item--nested"> + + + <input class="md-nav__toggle md-toggle" data-md-toggle="__nav_2" type="checkbox" id="__nav_2" > + + + + + <label class="md-nav__link" for="__nav_2"> + TD + <span class="md-nav__icon md-icon"></span> + </label> + + <nav class="md-nav" aria-label="TD" data-md-level="1"> + <label class="md-nav__title" for="__nav_2"> + <span class="md-nav__icon md-icon"></span> + TD + </label> + <ul class="md-nav__list" data-md-scrollfix> + + + + + + + <li class="md-nav__item"> + <a href="../ch1-discover-docker-td/" class="md-nav__link"> + TD part 01 - Docker + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch2-discover-github-actions-td/" class="md-nav__link"> + TD part 02 - Github Actions + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-td/" class="md-nav__link"> + TD part 03 - Ansible + </a> + </li> + + + + + </ul> + </nav> + </li> + + + + </ul> +</nav> + </div> + </div> + </div> + + + + <div class="md-sidebar md-sidebar--secondary" data-md-component="sidebar" data-md-type="toc" > + <div class="md-sidebar__scrollwrap"> + <div class="md-sidebar__inner"> + + +<nav class="md-nav md-nav--secondary" aria-label="Table of contents"> + + + + + + + <label class="md-nav__title" for="__toc"> + <span class="md-nav__icon md-icon"></span> + Table of contents + </label> + <ul class="md-nav__list" data-md-component="toc" data-md-scrollfix> + + <li class="md-nav__item"> + <a href="#goals" class="md-nav__link"> + Goals + </a> + +</li> + + <li class="md-nav__item"> + <a href="#introduction" class="md-nav__link"> + Introduction + </a> + + <nav class="md-nav" aria-label="Introduction"> + <ul class="md-nav__list"> + + <li class="md-nav__item"> + <a href="#inventories" class="md-nav__link"> + Inventories + </a> + +</li> + + <li class="md-nav__item"> + <a href="#facts" class="md-nav__link"> + Facts + </a> + +</li> + + </ul> + </nav> + +</li> + + <li class="md-nav__item"> + <a href="#playbooks" class="md-nav__link"> + Playbooks + </a> + + <nav class="md-nav" aria-label="Playbooks"> + <ul class="md-nav__list"> + + <li class="md-nav__item"> + <a href="#first-playbook" class="md-nav__link"> + First playbook + </a> + +</li> + + <li class="md-nav__item"> + <a href="#advanced-playbook" class="md-nav__link"> + Advanced Playbook + </a> + +</li> + + <li class="md-nav__item"> + <a href="#using-roles" class="md-nav__link"> + Using roles + </a> + +</li> + + </ul> + </nav> + +</li> + + <li class="md-nav__item"> + <a href="#deploy-your-app" class="md-nav__link"> + Deploy your App + </a> + +</li> + + <li class="md-nav__item"> + <a href="#front" class="md-nav__link"> + Front + </a> + +</li> + + <li class="md-nav__item"> + <a href="#continuous-deployment" class="md-nav__link"> + Continuous Deployment + </a> + +</li> + + </ul> + +</nav> + </div> + </div> + </div> + + + <div class="md-content" data-md-component="content"> + <article class="md-content__inner md-typeset"> + + + + +<h1 id="discover-ansible">Discover Ansible</h1> +<div class="admonition check"> +<p class="admonition-title">Check</p> +<p>Checkpoint: call us to check your results (don’t stay blocked on a checkpoint if we are busy, we can check ⅔ checkpoints at the same time) </p> +</div> +<div class="admonition question"> +<p class="admonition-title">Question</p> +<p>Point to document/report </p> +</div> +<div class="admonition tip"> +<p class="admonition-title">Tip</p> +<p>Interesting information</p> +</div> +<h2 id="goals">Goals</h2> +<p>Install and deploy your application automatically with ansible.</p> +<h2 id="introduction">Introduction</h2> +<h3 id="inventories">Inventories</h3> +<p>By default, Ansible's inventory is saved in the location <code>/etc/ansible/hosts</code> where you already defined your server. </p> +<p>The headings between brackets (eg: [webservers]) are used to group sets of hosts together, they are called, surprisingly, groups. You could regroup them by roles like database servers, front-ends, reverse proxies, build servers…</p> +<p>Let’s create a project specific inventory, in your project create an ansible directory, then create a new directory called inventories and in this folder a new file (<code>my-project/ansible/inventories/setup.yml</code>):</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-0-1" name="__codelineno-0-1" href="#__codelineno-0-1"></a><span class="nt">all</span><span class="p">:</span> +<a id="__codelineno-0-2" name="__codelineno-0-2" href="#__codelineno-0-2"></a><span class="w"> </span><span class="nt">vars</span><span class="p">:</span> +<a id="__codelineno-0-3" name="__codelineno-0-3" href="#__codelineno-0-3"></a><span class="w"> </span><span class="nt">ansible_user</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">admin</span> +<a id="__codelineno-0-4" name="__codelineno-0-4" href="#__codelineno-0-4"></a><span class="w"> </span><span class="nt">ansible_ssh_private_key_file</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">/path/to/private/key</span> +<a id="__codelineno-0-5" name="__codelineno-0-5" href="#__codelineno-0-5"></a><span class="w"> </span><span class="nt">children</span><span class="p">:</span> +<a id="__codelineno-0-6" name="__codelineno-0-6" href="#__codelineno-0-6"></a><span class="w"> </span><span class="nt">prod</span><span class="p">:</span> +<a id="__codelineno-0-7" name="__codelineno-0-7" href="#__codelineno-0-7"></a><span class="w"> </span><span class="nt">hosts</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">hostname or IP</span> +</code></pre></div> +<p>Test your inventory with the ping command:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-1-1" name="__codelineno-1-1" href="#__codelineno-1-1"></a>ansible<span class="w"> </span>all<span class="w"> </span>-i<span class="w"> </span>inventories/setup.yml<span class="w"> </span>-m<span class="w"> </span>ping +</code></pre></div> +<h3 id="facts">Facts</h3> +<p>Let’s get information about hosts: these kinds of variables, not set by the user but discovered are called <strong>facts</strong>. </p> +<p><strong>Facts</strong> are prefixed by ansible_ and represent information derived from speaking with your remote systems.</p> +<p>You will request your server to get your OS distribution, thanks to the setup module.</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-2-1" name="__codelineno-2-1" href="#__codelineno-2-1"></a>ansible<span class="w"> </span>all<span class="w"> </span>-i<span class="w"> </span>inventories/setup.yml<span class="w"> </span>-m<span class="w"> </span>setup<span class="w"> </span>-a<span class="w"> </span><span class="s2">"filter=ansible_distribution*"</span> +</code></pre></div> +<p>Earlier you installed Apache2 server on your machine, let’s remove it:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-3-1" name="__codelineno-3-1" href="#__codelineno-3-1"></a>ansible<span class="w"> </span>all<span class="w"> </span>-i<span class="w"> </span>inventories/setup.yml<span class="w"> </span>-m<span class="w"> </span>apt<span class="w"> </span>-a<span class="w"> </span><span class="s2">"name=apache2 state=absent"</span><span class="w"> </span>--become +</code></pre></div> +<p>With ansible, you just describe the state of your server and let ansible automatically update it for you. </p> +<p>If you run this command another time you won’t have the same output as apache2 would have been removed.</p> +<div class="admonition question"> +<p class="admonition-title">Question</p> +<p>3-1 Document your inventory and base commands</p> +</div> +<h2 id="playbooks">Playbooks</h2> +<h3 id="first-playbook">First playbook</h3> +<p>Let’s create a first very simple playbook in <code>my-project/ansible/playbook.yml</code>:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-4-1" name="__codelineno-4-1" href="#__codelineno-4-1"></a><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">hosts</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">all</span> +<a id="__codelineno-4-2" name="__codelineno-4-2" href="#__codelineno-4-2"></a><span class="w"> </span><span class="nt">gather_facts</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">false</span> +<a id="__codelineno-4-3" name="__codelineno-4-3" href="#__codelineno-4-3"></a><span class="w"> </span><span class="nt">become</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">true</span> +<a id="__codelineno-4-4" name="__codelineno-4-4" href="#__codelineno-4-4"></a> +<a id="__codelineno-4-5" name="__codelineno-4-5" href="#__codelineno-4-5"></a><span class="w"> </span><span class="nt">tasks</span><span class="p">:</span> +<a id="__codelineno-4-6" name="__codelineno-4-6" href="#__codelineno-4-6"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">Test connection</span> +<a id="__codelineno-4-7" name="__codelineno-4-7" href="#__codelineno-4-7"></a><span class="w"> </span><span class="nt">ping</span><span class="p">:</span> +</code></pre></div> +<p>Just execute your playbook:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-5-1" name="__codelineno-5-1" href="#__codelineno-5-1"></a>ansible-playbook<span class="w"> </span>-i<span class="w"> </span>inventories/setup.yml<span class="w"> </span>playbook.yml +</code></pre></div> +<p>You can check your playbooks before playing them using the option: <code>--syntax-check</code></p> +<h3 id="advanced-playbook">Advanced Playbook</h3> +<p>Let’s create a playbook to install docker on your server, follow the documentation and create the corresponding tasks: <a href="https://docs.docker.com/engine/install/debian/#install-using-the-repository" target="_blank">https://docs.docker.com/engine/install/debian/#install-using-the-repository</a>.</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-6-1" name="__codelineno-6-1" href="#__codelineno-6-1"></a><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">hosts</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">all</span> +<a id="__codelineno-6-2" name="__codelineno-6-2" href="#__codelineno-6-2"></a><span class="w w-Error"> </span><span class="nt">gather_facts</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">true</span> +<a id="__codelineno-6-3" name="__codelineno-6-3" href="#__codelineno-6-3"></a><span class="w"> </span><span class="nt">become</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">true</span> +<a id="__codelineno-6-4" name="__codelineno-6-4" href="#__codelineno-6-4"></a> +<a id="__codelineno-6-5" name="__codelineno-6-5" href="#__codelineno-6-5"></a> +<a id="__codelineno-6-6" name="__codelineno-6-6" href="#__codelineno-6-6"></a><span class="w"> </span><span class="nt">tasks</span><span class="p">:</span> +<a id="__codelineno-6-7" name="__codelineno-6-7" href="#__codelineno-6-7"></a><span class="w"> </span><span class="c1"># Install prerequisites for Docker</span> +<a id="__codelineno-6-8" name="__codelineno-6-8" href="#__codelineno-6-8"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">Install required packages</span> +<a id="__codelineno-6-9" name="__codelineno-6-9" href="#__codelineno-6-9"></a><span class="w"> </span><span class="nt">apt</span><span class="p">:</span> +<a id="__codelineno-6-10" name="__codelineno-6-10" href="#__codelineno-6-10"></a><span class="w"> </span><span class="nt">name</span><span class="p">:</span> +<a id="__codelineno-6-11" name="__codelineno-6-11" href="#__codelineno-6-11"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">apt-transport-https</span> +<a id="__codelineno-6-12" name="__codelineno-6-12" href="#__codelineno-6-12"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">ca-certificates</span> +<a id="__codelineno-6-13" name="__codelineno-6-13" href="#__codelineno-6-13"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">curl</span> +<a id="__codelineno-6-14" name="__codelineno-6-14" href="#__codelineno-6-14"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">gnupg</span> +<a id="__codelineno-6-15" name="__codelineno-6-15" href="#__codelineno-6-15"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">lsb-release</span> +<a id="__codelineno-6-16" name="__codelineno-6-16" href="#__codelineno-6-16"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">python3-venv</span> +<a id="__codelineno-6-17" name="__codelineno-6-17" href="#__codelineno-6-17"></a><span class="w"> </span><span class="nt">state</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">latest</span> +<a id="__codelineno-6-18" name="__codelineno-6-18" href="#__codelineno-6-18"></a><span class="w"> </span><span class="nt">update_cache</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">yes</span> +<a id="__codelineno-6-19" name="__codelineno-6-19" href="#__codelineno-6-19"></a> +<a id="__codelineno-6-20" name="__codelineno-6-20" href="#__codelineno-6-20"></a> +<a id="__codelineno-6-21" name="__codelineno-6-21" href="#__codelineno-6-21"></a><span class="w"> </span><span class="c1"># Add Docker’s official GPG key</span> +<a id="__codelineno-6-22" name="__codelineno-6-22" href="#__codelineno-6-22"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">Add Docker GPG key</span> +<a id="__codelineno-6-23" name="__codelineno-6-23" href="#__codelineno-6-23"></a><span class="w"> </span><span class="nt">apt_key</span><span class="p">:</span> +<a id="__codelineno-6-24" name="__codelineno-6-24" href="#__codelineno-6-24"></a><span class="w"> </span><span class="nt">url</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">https://download.docker.com/linux/debian/gpg</span> +<a id="__codelineno-6-25" name="__codelineno-6-25" href="#__codelineno-6-25"></a><span class="w"> </span><span class="nt">state</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">present</span> +<a id="__codelineno-6-26" name="__codelineno-6-26" href="#__codelineno-6-26"></a> +<a id="__codelineno-6-27" name="__codelineno-6-27" href="#__codelineno-6-27"></a> +<a id="__codelineno-6-28" name="__codelineno-6-28" href="#__codelineno-6-28"></a><span class="w"> </span><span class="c1"># Set up the Docker stable repository</span> +<a id="__codelineno-6-29" name="__codelineno-6-29" href="#__codelineno-6-29"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">Add Docker APT repository</span> +<a id="__codelineno-6-30" name="__codelineno-6-30" href="#__codelineno-6-30"></a><span class="w"> </span><span class="nt">apt_repository</span><span class="p">:</span> +<a id="__codelineno-6-31" name="__codelineno-6-31" href="#__codelineno-6-31"></a><span class="w"> </span><span class="nt">repo</span><span class="p">:</span><span class="w"> </span><span class="s">"deb</span><span class="nv"> </span><span class="s">[arch=amd64]</span><span class="nv"> </span><span class="s">https://download.docker.com/linux/debian</span><span class="nv"> </span><span class="s">{{</span><span class="nv"> </span><span class="s">ansible_facts['distribution_release']</span><span class="nv"> </span><span class="s">}}</span><span class="nv"> </span><span class="s">stable"</span> +<a id="__codelineno-6-32" name="__codelineno-6-32" href="#__codelineno-6-32"></a><span class="w"> </span><span class="nt">state</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">present</span> +<a id="__codelineno-6-33" name="__codelineno-6-33" href="#__codelineno-6-33"></a><span class="w"> </span><span class="nt">update_cache</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">yes</span> +<a id="__codelineno-6-34" name="__codelineno-6-34" href="#__codelineno-6-34"></a> +<a id="__codelineno-6-35" name="__codelineno-6-35" href="#__codelineno-6-35"></a> +<a id="__codelineno-6-36" name="__codelineno-6-36" href="#__codelineno-6-36"></a><span class="w"> </span><span class="c1"># Install Docker</span> +<a id="__codelineno-6-37" name="__codelineno-6-37" href="#__codelineno-6-37"></a><span class="w"> </span><span class="w w-Error"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">Install Docker</span> +<a id="__codelineno-6-38" name="__codelineno-6-38" href="#__codelineno-6-38"></a><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">apt</span><span class="p p-Indicator">:</span> +<a id="__codelineno-6-39" name="__codelineno-6-39" href="#__codelineno-6-39"></a><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">docker-ce</span> +<a id="__codelineno-6-40" name="__codelineno-6-40" href="#__codelineno-6-40"></a><span class="w"> </span><span class="nt">state</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">present</span> +<a id="__codelineno-6-41" name="__codelineno-6-41" href="#__codelineno-6-41"></a> +<a id="__codelineno-6-42" name="__codelineno-6-42" href="#__codelineno-6-42"></a> +<a id="__codelineno-6-43" name="__codelineno-6-43" href="#__codelineno-6-43"></a><span class="w"> </span><span class="c1"># Install Python3 and pip3</span> +<a id="__codelineno-6-44" name="__codelineno-6-44" href="#__codelineno-6-44"></a><span class="w"> </span><span class="w w-Error"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">Install Python3 and pip3</span> +<a id="__codelineno-6-45" name="__codelineno-6-45" href="#__codelineno-6-45"></a><span class="w"> </span><span class="nt">apt</span><span class="p">:</span> +<a id="__codelineno-6-46" name="__codelineno-6-46" href="#__codelineno-6-46"></a><span class="w"> </span><span class="nt">name</span><span class="p">:</span> +<a id="__codelineno-6-47" name="__codelineno-6-47" href="#__codelineno-6-47"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">python3</span> +<a id="__codelineno-6-48" name="__codelineno-6-48" href="#__codelineno-6-48"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">python3-pip</span> +<a id="__codelineno-6-49" name="__codelineno-6-49" href="#__codelineno-6-49"></a><span class="w"> </span><span class="nt">state</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">present</span> +<a id="__codelineno-6-50" name="__codelineno-6-50" href="#__codelineno-6-50"></a> +<a id="__codelineno-6-51" name="__codelineno-6-51" href="#__codelineno-6-51"></a> +<a id="__codelineno-6-52" name="__codelineno-6-52" href="#__codelineno-6-52"></a><span class="w"> </span><span class="c1"># Create a virtual environment for Python packages</span> +<a id="__codelineno-6-53" name="__codelineno-6-53" href="#__codelineno-6-53"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">Create a virtual environment for Docker SDK</span> +<a id="__codelineno-6-54" name="__codelineno-6-54" href="#__codelineno-6-54"></a><span class="w"> </span><span class="nt">command</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">python3 -m venv /opt/docker_venv</span> +<a id="__codelineno-6-55" name="__codelineno-6-55" href="#__codelineno-6-55"></a><span class="w"> </span><span class="nt">args</span><span class="p">:</span> +<a id="__codelineno-6-56" name="__codelineno-6-56" href="#__codelineno-6-56"></a><span class="w"> </span><span class="nt">creates</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">/opt/docker_venv</span><span class="w"> </span><span class="c1"># Only runs if this directory doesn’t exist</span> +<a id="__codelineno-6-57" name="__codelineno-6-57" href="#__codelineno-6-57"></a> +<a id="__codelineno-6-58" name="__codelineno-6-58" href="#__codelineno-6-58"></a> +<a id="__codelineno-6-59" name="__codelineno-6-59" href="#__codelineno-6-59"></a><span class="w"> </span><span class="c1"># Install Docker SDK for Python in the virtual environment</span> +<a id="__codelineno-6-60" name="__codelineno-6-60" href="#__codelineno-6-60"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">Install Docker SDK for Python in virtual environment</span> +<a id="__codelineno-6-61" name="__codelineno-6-61" href="#__codelineno-6-61"></a><span class="w"> </span><span class="nt">command</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">/opt/docker_venv/bin/pip install docker</span> +<a id="__codelineno-6-62" name="__codelineno-6-62" href="#__codelineno-6-62"></a> +<a id="__codelineno-6-63" name="__codelineno-6-63" href="#__codelineno-6-63"></a> +<a id="__codelineno-6-64" name="__codelineno-6-64" href="#__codelineno-6-64"></a><span class="w"> </span><span class="c1"># Ensure Docker is running</span> +<a id="__codelineno-6-65" name="__codelineno-6-65" href="#__codelineno-6-65"></a><span class="w"> </span><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">Make sure Docker is running</span> +<a id="__codelineno-6-66" name="__codelineno-6-66" href="#__codelineno-6-66"></a><span class="w"> </span><span class="nt">service</span><span class="p">:</span> +<a id="__codelineno-6-67" name="__codelineno-6-67" href="#__codelineno-6-67"></a><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">docker</span> +<a id="__codelineno-6-68" name="__codelineno-6-68" href="#__codelineno-6-68"></a><span class="w"> </span><span class="nt">state</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">started</span> +<a id="__codelineno-6-69" name="__codelineno-6-69" href="#__codelineno-6-69"></a><span class="w"> </span><span class="nt">tags</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">docker</span> +</code></pre></div> +<p>Good news, we now have docker installed on our server. One task was created to be sure docker was running, you could check this with an ad-hoc command or by connecting to the server until you really trust ansible.</p> +<div class="admonition check"> +<p class="admonition-title">Check</p> +<p>Docker installed on remote server</p> +</div> +<h3 id="using-roles">Using roles</h3> +<p>Our docker install playbook is nice and all but it will be cleaner to have in a specific place, in a role for example. Create a docker role and move the installation task there:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-7-1" name="__codelineno-7-1" href="#__codelineno-7-1"></a>ansible-galaxy<span class="w"> </span>init<span class="w"> </span>roles/docker +</code></pre></div> +<p>Call the docker role from your playbook to check your refactor and your installation.</p> +<p>Initialized role has a couple of directories, keep only the one you will need:</p> +<ul> +<li>tasks - contains the main list of tasks to be executed by the role.</li> +<li>handlers - contains handlers, which may be used by this role or outside.</li> +</ul> +<div class="admonition question"> +<p class="admonition-title">Question</p> +<p>3-2 Document your playbook</p> +</div> +<h2 id="deploy-your-app">Deploy your App</h2> +<p>Time has come to deploy your application to your Ansible managed server.</p> +<p>Create specific roles for each part of your application and use the Ansible module: docker_container to start your dockerized application. Here is what a docker_container task should look like:</p> +<div class="highlight"><pre><span></span><code><a id="__codelineno-8-1" name="__codelineno-8-1" href="#__codelineno-8-1"></a><span class="p p-Indicator">-</span><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">Run HTTPD</span> +<a id="__codelineno-8-2" name="__codelineno-8-2" href="#__codelineno-8-2"></a><span class="w"> </span><span class="nt">docker_container</span><span class="p">:</span> +<a id="__codelineno-8-3" name="__codelineno-8-3" href="#__codelineno-8-3"></a><span class="w"> </span><span class="nt">name</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">httpd</span> +<a id="__codelineno-8-4" name="__codelineno-8-4" href="#__codelineno-8-4"></a><span class="w"> </span><span class="nt">image</span><span class="p">:</span><span class="w"> </span><span class="l l-Scalar l-Scalar-Plain">your image name from DockerHub</span> +</code></pre></div> +<p>You must have at least this roles :</p> +<ul> +<li>install docker</li> +<li>create network</li> +<li>launch database</li> +<li>launch app</li> +<li>launch proxy</li> +</ul> +<div class="admonition note"> +<p class="admonition-title">Note</p> +<ul> +<li>You will need to add env variables on app and database tasks. Ansible is able to modify the variables either in the .env for the db or in the application.yml for the app.</li> +<li>Don’t forget to use existing module for example to create the network</li> +<li>Don't forget to use the right python interpreter when creating the docker network (refer to <code>ansible_python_interpreter</code> variable usage)</li> +</ul> +</div> +<div class="admonition link"> +<p class="admonition-title">Link</p> +<ul> +<li><a href="https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#ansible-collections-community-docker-docker-container-module" target="_blank">docker_container module documentation</a></li> +<li><a href="https://docs.ansible.com/ansible/latest/collections/community/docker/docker_network_module.html#ansible-collections-community-docker-docker-network-module" target="_blank">docker_network module documentation</a></li> +</ul> +</div> +<div class="admonition check"> +<p class="admonition-title">Check</p> +<p>You should be able to access your API on your server.</p> +</div> +<div class="admonition question"> +<p class="admonition-title">Question</p> +<p>3-3 Document your docker_container tasks configuration.</p> +</div> +<h2 id="front">Front</h2> +<p>If you have reached the end of each TP, you are able to access your api through your server.</p> +<p>Your database, api and httpd must be up on your server and deployed with your Github Actions.</p> +<p>Everything under the hood of docker-compose.</p> +<p>Usually when we have an API we also have something called a front part to display our information.</p> +<p>That's your bonus part to do, you can find the code of the <a href="https://github.com/takima-training/devops-front" target="_blank">front ready</a>.</p> +<p>You have to customize your httpd server to make the redirection correct between the API and the front. The httpd server is a proxy within your system.</p> +<div class="admonition check"> +<p class="admonition-title">Check</p> +<p>Front working</p> +</div> +<h2 id="continuous-deployment">Continuous Deployment</h2> +<div class="admonition note"> +<p class="admonition-title">Note</p> +<p>Do this part in a separate workflow.</p> +</div> +<p>Configure Github action to automatically deploy your application when you release it on the production branch of your github repository.</p> +<ul> +<li>It is a little bit overkilled to launch an Ansible job for deploying on one unique server. Therefore you ssh to your machine with your encrypted private key and only relaunch your http api backend application.</li> +<li>You like challenges and overkilled solutions, you run your Ansible script through a Docker image (that provides Ansible, of course) and you use a VAULT to encrypt your private data.</li> +</ul> +<p><img alt="architecture_image" src="../assets/architecture.png" /></p> +<div class="admonition check"> +<p class="admonition-title">Check</p> +<p>Full CI/CD pipeline in action.</p> +</div> +<p align="center">© Takima 2025</p> + + + </article> + </div> + </div> + + </main> + + <footer class="md-footer"> + + <nav class="md-footer__inner md-grid" aria-label="Footer"> + + + <a href="../ch2-discover-github-actions-tp/" class="md-footer__link md-footer__link--prev" aria-label="Previous: TP part 02 - Github Actions" rel="prev"> + <div class="md-footer__button md-icon"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z"/></svg> + </div> + <div class="md-footer__title"> + <div class="md-ellipsis"> + <span class="md-footer__direction"> + Previous + </span> + TP part 02 - Github Actions + </div> + </div> + </a> + + + + <a href="../ch4-extras-tp/" class="md-footer__link md-footer__link--next" aria-label="Next: TP Extras" rel="next"> + <div class="md-footer__title"> + <div class="md-ellipsis"> + <span class="md-footer__direction"> + Next + </span> + TP Extras + </div> + </div> + <div class="md-footer__button md-icon"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M4 11v2h12l-5.5 5.5 1.42 1.42L19.84 12l-7.92-7.92L10.5 5.5 16 11H4Z"/></svg> + </div> + </a> + + </nav> + + <div class="md-footer-meta md-typeset"> + <div class="md-footer-meta__inner md-grid"> + <div class="md-copyright"> + + + Made with + <a href="https://squidfunk.github.io/mkdocs-material/" target="_blank" rel="noopener"> + Material for MkDocs + </a> + +</div> + + </div> + </div> +</footer> + + </div> + <div class="md-dialog" data-md-component="dialog"> + <div class="md-dialog__inner md-typeset"></div> + </div> + <script id="__config" type="application/json">{"base": "..", "features": ["navigation.tabs", "navigation.instant"], "search": "../assets/javascripts/workers/search.2a1c317c.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.config.lang": "en", "search.config.pipeline": "trimmer, stopWordFilter", "search.config.separator": "[\\s\\-]+", "search.placeholder": "Search", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version.title": "Select version"}}</script> + + + <script src="../assets/javascripts/bundle.a6c66575.min.js"></script> + + + </body> +</html> \ No newline at end of file diff --git a/public/ch4-extras-tp/index.html b/public/ch4-extras-tp/index.html new file mode 100644 index 0000000000000000000000000000000000000000..9bb6dcde5d9cdf37d3f34cda18d049d905125d42 --- /dev/null +++ b/public/ch4-extras-tp/index.html @@ -0,0 +1,658 @@ + +<!doctype html> +<html lang="en" class="no-js"> + <head> + + <meta charset="utf-8"> + <meta name="viewport" content="width=device-width,initial-scale=1"> + + + + <link rel="icon" href="../assets/images/favicon.png"> + <meta name="generator" content="mkdocs-1.3.0, mkdocs-material-8.2.15"> + + + + <title>TP Extras - Devops</title> + + + + <link rel="stylesheet" href="../assets/stylesheets/main.c382b1dc.min.css"> + + + <link rel="stylesheet" href="../assets/stylesheets/palette.cc9b2e1e.min.css"> + + + + <meta name="theme-color" content="#e92063"> + + + + + + + + + <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin> + <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300i,400,400i,700,700i%7CRoboto+Mono:400,400i,700,700i&display=fallback"> + <style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style> + + + + <script>__md_scope=new URL("..",location),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script> + + + + + + </head> + + + + + + + + <body dir="ltr" data-md-color-scheme="" data-md-color-primary="pink" data-md-color-accent=""> + + + + <input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="__drawer" autocomplete="off"> + <input class="md-toggle" data-md-toggle="search" type="checkbox" id="__search" autocomplete="off"> + <label class="md-overlay" for="__drawer"></label> + <div data-md-component="skip"> + + + <a href="#go-further" class="md-skip"> + Skip to content + </a> + + </div> + <div data-md-component="announce"> + + </div> + + + + +<header class="md-header" data-md-component="header"> + <nav class="md-header__inner md-grid" aria-label="Header"> + <a href=".." title="Devops" class="md-header__button md-logo" aria-label="Devops" data-md-component="logo"> + + <img src="../assets/logo.png" alt="logo"> + + </a> + <label class="md-header__button md-icon" for="__drawer"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M3 6h18v2H3V6m0 5h18v2H3v-2m0 5h18v2H3v-2Z"/></svg> + </label> + <div class="md-header__title" data-md-component="header-title"> + <div class="md-header__ellipsis"> + <div class="md-header__topic"> + <span class="md-ellipsis"> + Devops + </span> + </div> + <div class="md-header__topic" data-md-component="header-topic"> + <span class="md-ellipsis"> + + TP Extras + + </span> + </div> + </div> + </div> + + + + <label class="md-header__button md-icon" for="__search"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z"/></svg> + </label> + <div class="md-search" data-md-component="search" role="dialog"> + <label class="md-search__overlay" for="__search"></label> + <div class="md-search__inner" role="search"> + <form class="md-search__form" name="search"> + <input type="text" class="md-search__input" name="query" aria-label="Search" placeholder="Search" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="search-query" required> + <label class="md-search__icon md-icon" for="__search"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z"/></svg> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z"/></svg> + </label> + <nav class="md-search__options" aria-label="Search"> + + <button type="reset" class="md-search__icon md-icon" aria-label="Clear" tabindex="-1"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M19 6.41 17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41Z"/></svg> + </button> + </nav> + + </form> + <div class="md-search__output"> + <div class="md-search__scrollwrap" data-md-scrollfix> + <div class="md-search-result" data-md-component="search-result"> + <div class="md-search-result__meta"> + Initializing search + </div> + <ol class="md-search-result__list"></ol> + </div> + </div> + </div> + </div> +</div> + + + </nav> + +</header> + + <div class="md-container" data-md-component="container"> + + + + + +<nav class="md-tabs" aria-label="Tabs" data-md-component="tabs"> + <div class="md-tabs__inner md-grid"> + <ul class="md-tabs__list"> + + + + + + + + + + + + <li class="md-tabs__item"> + <a href="../ch1-discover-docker-tp/" class="md-tabs__link md-tabs__link--active"> + TP + </a> + </li> + + + + + + + + + + + + <li class="md-tabs__item"> + <a href="../ch1-discover-docker-td/" class="md-tabs__link"> + TD + </a> + </li> + + + + </ul> + </div> +</nav> + + + + <main class="md-main" data-md-component="main"> + <div class="md-main__inner md-grid"> + + + + <div class="md-sidebar md-sidebar--primary" data-md-component="sidebar" data-md-type="navigation" > + <div class="md-sidebar__scrollwrap"> + <div class="md-sidebar__inner"> + + + + + +<nav class="md-nav md-nav--primary md-nav--lifted" aria-label="Navigation" data-md-level="0"> + <label class="md-nav__title" for="__drawer"> + <a href=".." title="Devops" class="md-nav__button md-logo" aria-label="Devops" data-md-component="logo"> + + <img src="../assets/logo.png" alt="logo"> + + </a> + Devops + </label> + + <ul class="md-nav__list" data-md-scrollfix> + + + + + + + + + + + + <li class="md-nav__item md-nav__item--active md-nav__item--nested"> + + + <input class="md-nav__toggle md-toggle" data-md-toggle="__nav_1" type="checkbox" id="__nav_1" checked> + + + + + <label class="md-nav__link" for="__nav_1"> + TP + <span class="md-nav__icon md-icon"></span> + </label> + + <nav class="md-nav" aria-label="TP" data-md-level="1"> + <label class="md-nav__title" for="__nav_1"> + <span class="md-nav__icon md-icon"></span> + TP + </label> + <ul class="md-nav__list" data-md-scrollfix> + + + + + + + <li class="md-nav__item"> + <a href="../ch1-discover-docker-tp/" class="md-nav__link"> + TP part 01 - Docker + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch2-discover-github-actions-tp/" class="md-nav__link"> + TP part 02 - Github Actions + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-tp/" class="md-nav__link"> + TP part 03 - Ansible + </a> + </li> + + + + + + + + + + + + <li class="md-nav__item md-nav__item--active"> + + <input class="md-nav__toggle md-toggle" data-md-toggle="toc" type="checkbox" id="__toc"> + + + + + + <label class="md-nav__link md-nav__link--active" for="__toc"> + TP Extras + <span class="md-nav__icon md-icon"></span> + </label> + + <a href="./" class="md-nav__link md-nav__link--active"> + TP Extras + </a> + + + +<nav class="md-nav md-nav--secondary" aria-label="Table of contents"> + + + + + + + <label class="md-nav__title" for="__toc"> + <span class="md-nav__icon md-icon"></span> + Table of contents + </label> + <ul class="md-nav__list" data-md-component="toc" data-md-scrollfix> + + <li class="md-nav__item"> + <a href="#goals" class="md-nav__link"> + Goals + </a> + +</li> + + <li class="md-nav__item"> + <a href="#load-balancing" class="md-nav__link"> + Load balancing + </a> + + <nav class="md-nav" aria-label="Load balancing"> + <ul class="md-nav__list"> + + <li class="md-nav__item"> + <a href="#redundancy" class="md-nav__link"> + Redundancy + </a> + +</li> + + <li class="md-nav__item"> + <a href="#actual-load-balancing" class="md-nav__link"> + Actual load balancing + </a> + +</li> + + <li class="md-nav__item"> + <a href="#grafana" class="md-nav__link"> + Grafana + </a> + +</li> + + </ul> + </nav> + +</li> + + </ul> + +</nav> + + </li> + + + + + </ul> + </nav> + </li> + + + + + + + + + + + + <li class="md-nav__item md-nav__item--nested"> + + + <input class="md-nav__toggle md-toggle" data-md-toggle="__nav_2" type="checkbox" id="__nav_2" > + + + + + <label class="md-nav__link" for="__nav_2"> + TD + <span class="md-nav__icon md-icon"></span> + </label> + + <nav class="md-nav" aria-label="TD" data-md-level="1"> + <label class="md-nav__title" for="__nav_2"> + <span class="md-nav__icon md-icon"></span> + TD + </label> + <ul class="md-nav__list" data-md-scrollfix> + + + + + + + <li class="md-nav__item"> + <a href="../ch1-discover-docker-td/" class="md-nav__link"> + TD part 01 - Docker + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch2-discover-github-actions-td/" class="md-nav__link"> + TD part 02 - Github Actions + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-td/" class="md-nav__link"> + TD part 03 - Ansible + </a> + </li> + + + + + </ul> + </nav> + </li> + + + + </ul> +</nav> + </div> + </div> + </div> + + + + <div class="md-sidebar md-sidebar--secondary" data-md-component="sidebar" data-md-type="toc" > + <div class="md-sidebar__scrollwrap"> + <div class="md-sidebar__inner"> + + +<nav class="md-nav md-nav--secondary" aria-label="Table of contents"> + + + + + + + <label class="md-nav__title" for="__toc"> + <span class="md-nav__icon md-icon"></span> + Table of contents + </label> + <ul class="md-nav__list" data-md-component="toc" data-md-scrollfix> + + <li class="md-nav__item"> + <a href="#goals" class="md-nav__link"> + Goals + </a> + +</li> + + <li class="md-nav__item"> + <a href="#load-balancing" class="md-nav__link"> + Load balancing + </a> + + <nav class="md-nav" aria-label="Load balancing"> + <ul class="md-nav__list"> + + <li class="md-nav__item"> + <a href="#redundancy" class="md-nav__link"> + Redundancy + </a> + +</li> + + <li class="md-nav__item"> + <a href="#actual-load-balancing" class="md-nav__link"> + Actual load balancing + </a> + +</li> + + <li class="md-nav__item"> + <a href="#grafana" class="md-nav__link"> + Grafana + </a> + +</li> + + </ul> + </nav> + +</li> + + </ul> + +</nav> + </div> + </div> + </div> + + + <div class="md-content" data-md-component="content"> + <article class="md-content__inner md-typeset"> + + + + +<h1 id="go-further">Go further</h1> +<div class="admonition check"> +<p class="admonition-title">Check</p> +<p>Checkpoint: call us to check your results (don’t stay blocked on a checkpoint if we are busy, we can check ⅔ checkpoints at the same time) </p> +</div> +<div class="admonition question"> +<p class="admonition-title">Question</p> +<p>Point to document/report </p> +</div> +<div class="admonition tip"> +<p class="admonition-title">Tip</p> +<p>Interesting information</p> +</div> +<h2 id="goals">Goals</h2> +<p>Setup the extra infrastructure tools and become the devops master.</p> +<h2 id="load-balancing">Load balancing</h2> +<h3 id="redundancy">Redundancy</h3> +<p>In this section our goal will be to have a redundant backend. Instead of having a single backend application let’s have 2 (Why not more? Do not forget that your server does not have unlimited resources). </p> +<p>Setup 2 backends instances: backend-1/backend-2 or backend-blue/backend green (my personal favorite). </p> +<h3 id="actual-load-balancing">Actual load balancing</h3> +<p>Set up your reverse proxy to do some actual load-balancing between your 2 backends using: <a href="https://httpd.apache.org/docs/2.4/en/mod/mod_proxy_balancer.html" target="_blank">Mod proxy balancer</a>. </p> +<p>Start by setting up your load balancing on your dev environment, once you are satisfied with the behavior deploy it to your production. </p> +<div class="admonition tip"> +<p class="admonition-title">Tip</p> +<p>Ask yourself: why can we that easily load balance between our backends? Heard of sticky sessions or stateless apps? </p> +</div> +<div class="admonition check"> +<p class="admonition-title">Check</p> +<p>Checkpoint: do you loadbalance? </p> +</div> +<h3 id="grafana">Grafana</h3> +<p>Setup Grafana with Ansible to monitor your instance: <a href="https://galaxy.ansible.com/cloudalchemy/grafana" target="_blank">Ansible Grafana</a> </p> +<p>Useful links:</p> +<ul> +<li><a href="https://grafana.com/docs/grafana/latest/installation/configuration" target="_blank">Grafana installation</a> </li> +<li><a href="https://github.com/grafana/grafana" target="_blank">Grafana repository</a></li> +</ul> +<div class="admonition check"> +<p class="admonition-title">Check</p> +<p>Checkpoint: do you Grafana? </p> +</div> +<p align="center">© Takima 2025</p> + + + </article> + </div> + </div> + + </main> + + <footer class="md-footer"> + + <nav class="md-footer__inner md-grid" aria-label="Footer"> + + + <a href="../ch3-discover-ansible-tp/" class="md-footer__link md-footer__link--prev" aria-label="Previous: TP part 03 - Ansible" rel="prev"> + <div class="md-footer__button md-icon"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z"/></svg> + </div> + <div class="md-footer__title"> + <div class="md-ellipsis"> + <span class="md-footer__direction"> + Previous + </span> + TP part 03 - Ansible + </div> + </div> + </a> + + + + <a href="../ch1-discover-docker-td/" class="md-footer__link md-footer__link--next" aria-label="Next: TD part 01 - Docker" rel="next"> + <div class="md-footer__title"> + <div class="md-ellipsis"> + <span class="md-footer__direction"> + Next + </span> + TD part 01 - Docker + </div> + </div> + <div class="md-footer__button md-icon"> + <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M4 11v2h12l-5.5 5.5 1.42 1.42L19.84 12l-7.92-7.92L10.5 5.5 16 11H4Z"/></svg> + </div> + </a> + + </nav> + + <div class="md-footer-meta md-typeset"> + <div class="md-footer-meta__inner md-grid"> + <div class="md-copyright"> + + + Made with + <a href="https://squidfunk.github.io/mkdocs-material/" target="_blank" rel="noopener"> + Material for MkDocs + </a> + +</div> + + </div> + </div> +</footer> + + </div> + <div class="md-dialog" data-md-component="dialog"> + <div class="md-dialog__inner md-typeset"></div> + </div> + <script id="__config" type="application/json">{"base": "..", "features": ["navigation.tabs", "navigation.instant"], "search": "../assets/javascripts/workers/search.2a1c317c.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.config.lang": "en", "search.config.pipeline": "trimmer, stopWordFilter", "search.config.separator": "[\\s\\-]+", "search.placeholder": "Search", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version.title": "Select version"}}</script> + + + <script src="../assets/javascripts/bundle.a6c66575.min.js"></script> + + + </body> +</html> \ No newline at end of file diff --git a/public/cheatsheet/index.html b/public/cheatsheet/index.html index 20c92ce0aa4d25a2dcceb69cde83b68fc2081a40..6961a13ef05bf2b347f8c44626d9c10c1dc79798 100644 --- a/public/cheatsheet/index.html +++ b/public/cheatsheet/index.html @@ -271,6 +271,34 @@ + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-tp/" class="md-nav__link"> + TP part 03 - Ansible + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="../ch4-extras-tp/" class="md-nav__link"> + TP Extras + </a> + </li> + + + + </ul> </nav> </li> @@ -333,6 +361,20 @@ + + + + + + <li class="md-nav__item"> + <a href="../ch3-discover-ansible-td/" class="md-nav__link"> + TD part 03 - Ansible + </a> + </li> + + + + </ul> </nav> </li> diff --git a/public/index.html b/public/index.html index 1528771d86f5b4e8dc5b3a3aceb339138b919d13..8bad91a11061d3d3854559c2fb453e7c7dc024c7 100644 --- a/public/index.html +++ b/public/index.html @@ -271,6 +271,34 @@ + + + + + + <li class="md-nav__item"> + <a href="ch3-discover-ansible-tp/" class="md-nav__link"> + TP part 03 - Ansible + </a> + </li> + + + + + + + + + + <li class="md-nav__item"> + <a href="ch4-extras-tp/" class="md-nav__link"> + TP Extras + </a> + </li> + + + + </ul> </nav> </li> @@ -333,6 +361,20 @@ + + + + + + <li class="md-nav__item"> + <a href="ch3-discover-ansible-td/" class="md-nav__link"> + TD part 03 - Ansible + </a> + </li> + + + + </ul> </nav> </li> @@ -374,7 +416,7 @@ <h1 id="devops-in-action-guide">Devops in Action - Guide</h1> <p>For each step you have a TD to discover the subject and a TP to put it into practice.<br /> The TPs follow each other and the goal is to make you start from a local application and get to an application delivered in production and accessible to all.<br /> -For that we will give you each a server and a Java application.</p> +For that we will give you each a server and a Java application. </p> <p><strong>Part 1 - Docker session</strong></p> <p><a href="ch1-discover-docker-td/">Docker TDs are available here</a> <br /> <a href="ch1-discover-docker-tp/">Docker TPs are available here</a> <br /> @@ -382,7 +424,11 @@ For that we will give you each a server and a Java application.</p> <p><strong>Part 2 - Github Action session</strong></p> <p><a href="ch2-discover-github-actions-td/">Github Actions TDs are available here</a> <br /> <a href="ch2-discover-github-actions-tp/">Github Actions TPs are available here</a> <br /> -<a href="https://docs.google.com/presentation/d/15nEzGSqN1z0Uvy6sVu7svJGzD1I8Odb2kU8c6fGlUzw/edit?usp=sharing">Github Actions slides are availabe here</a> </p> +<a href="https://docs.google.com/presentation/d/15nEzGSqN1z0Uvy6sVu7svJGzD1I8Odb2kU8c6fGlUzw/edit?usp=sharing">Github Actions slides are availabe here</a> </p> +<p><strong>Part 3 - Ansible session</strong></p> +<p><a href="ch3-discover-ansible-td/">Ansible TDs are available here</a> <br /> +<a href="ch3-discover-ansible-tp/">Ansible TPs are available here</a> <br /> +<a href="https://docs.google.com/presentation/d/1_-5gmD1SGuOQiiXfFYR00uPDMDql0a_a2X6ZCLDMQ5k/edit?usp=sharing">Ansible slides are available here</a> </p> <p><strong>Please read the indications carefully, most of the time what you need is in front of your eyes!</strong></p> <p align="center">© Takima 2025</p> diff --git a/public/search/search_index.json b/public/search/search_index.json index 4fe54ef763a435768c5624933e43da9bab3e5188..1cd5012461a0de34f86f0a891c7c2040ba001330 100644 --- a/public/search/search_index.json +++ b/public/search/search_index.json @@ -1 +1 @@ -{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Devops in Action - Guide For each step you have a TD to discover the subject and a TP to put it into practice. The TPs follow each other and the goal is to make you start from a local application and get to an application delivered in production and accessible to all. For that we will give you each a server and a Java application. Part 1 - Docker session Docker TDs are available here Docker TPs are available here Docker slides are available here Part 2 - Github Action session Github Actions TDs are available here Github Actions TPs are available here Github Actions slides are availabe here Please read the indications carefully, most of the time what you need is in front of your eyes! \u00a9 Takima 2025","title":"Devops in Action - Guide"},{"location":"#devops-in-action-guide","text":"For each step you have a TD to discover the subject and a TP to put it into practice. The TPs follow each other and the goal is to make you start from a local application and get to an application delivered in production and accessible to all. For that we will give you each a server and a Java application. Part 1 - Docker session Docker TDs are available here Docker TPs are available here Docker slides are available here Part 2 - Github Action session Github Actions TDs are available here Github Actions TPs are available here Github Actions slides are availabe here Please read the indications carefully, most of the time what you need is in front of your eyes! \u00a9 Takima 2025","title":"Devops in Action - Guide"},{"location":"ch1-discover-docker-td/","text":"Discover Docker Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information Setup Prerequisites There are no specific skills needed for this tutorial beyond a basic comfort with the command line and using a text editor. Prior experience in developing web applications will be helpful but is not required. As you proceed further along the tutorial, we'll make use of https://cloud.docker.com/. Setting up your computer Getting all the tooling setup on your computer can be a daunting task, but getting Docker up and running on your favorite OS has become very easy. The getting started guide on Docker has detailed instructions for setting up Docker on Mac , Linux and Windows If you're using Docker for Windows make sure you have shared your drive. Important note If you're using an older version of Windows or MacOS you may need to use Docker Machine instead. All commands work in either bash or Powershell on Windows Once you are done installing Docker, test your Docker installation by running the following: docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from library/hello-world 03f4658f8b78: Pull complete a3ed95caeb02: Pull complete Digest: sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 Status: Downloaded newer image for hello-world:latest Hello from Docker. ... This message shows that your installation appears to be working correctly. Running your first container Now that you have everything setup, it's time to get our hands dirty. In this section, you are going to run an Alpine Linux container (a lightweight linux distribution) on your system and get a taste of the docker run command. To get started, let's run the following in our terminal: docker pull alpine Note Depending on how you've installed docker on your system, you might see a permission denied error after running the above command. Try the commands from the Getting Started tutorial to verify your installation . If you're on Linux, you may need to prefix your docker commands with sudo . Alternatively you can create a docker group to get rid of this issue. The pull command fetches the alpine image from the Docker registry and saves it in our system. You can use the docker images command to see a list of all images on your system. docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE alpine latest c51f86c28340 4 weeks ago 1 .109 MB hello-world latest 690ed74de00f 5 months ago 960 B 1.1 Docker Run Great! Let's now run a Docker container based on this image. To do that you are going to use the docker run command. docker run alpine ls -l total 48 drwxr-xr-x 2 root root 4096 Mar 2 16:20 bin drwxr-xr-x 5 root root 360 Mar 18 09:47 dev drwxr-xr-x 13 root root 4096 Mar 18 09:47 etc drwxr-xr-x 2 root root 4096 Mar 2 16:20 home drwxr-xr-x 5 root root 4096 Mar 2 16:20 lib ...... ...... What happened? Behind the scenes, a lot of stuff happened. When you call run : 1. The Docker client contacts the Docker daemon. The Docker daemon checks local store if the image (alpine in this case) is available locally, and if not, downloads it from Docker Store. (Since we have issued docker pull alpine before, the download step is not necessary) The Docker daemon creates the container and then runs a command in that container. The Docker daemon streams the output of the command to the Docker client When you run docker run alpine , you provided a command ( ls -l ), so Docker started the command specified and you saw the listing. Let's try something more exciting. docker run alpine echo \"hello from alpine\" hello from alpine OK, that's some actual output. In this case, the Docker client dutifully ran the echo command in our alpine container and then exited it. If you've noticed, all of that happened pretty quickly. Imagine booting up a virtual machine, running a command and then killing it. Now you know why they say containers are fast! Try another command. docker run alpine /bin/sh Wait, nothing happened! Is that a bug? Well, no. These interactive shells will exit after running any scripted commands, unless they are run in an interactive terminal - so for this example to not exit, you need to docker run -it alpine /bin/sh . You are now inside the container shell and you can try out a few commands like ls -l , uname -a and others. Exit out of the container by giving the exit command. Ok, now it's time to see the docker ps command. The docker ps command shows you all containers that are currently running. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES Since no containers are running, you see a blank line. Let's try a more useful variant: docker ps -a docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 36171a5da744 alpine \"/bin/sh\" 5 minutes ago Exited ( 0 ) 2 minutes ago fervent_newton a6a9d46d0b2f alpine \"echo 'hello from alp\" 6 minutes ago Exited ( 0 ) 6 minutes ago lonely_kilby ff0a5c3750b9 alpine \"ls -l\" 8 minutes ago Exited ( 0 ) 8 minutes ago elated_ramanujan c317d0a9e3d2 hello-world \"/hello\" 34 seconds ago Exited ( 0 ) 12 minutes ago stupefied_mcclintock What you see above is a list of all containers that you ran. Notice that the STATUS column shows that these containers exited a few minutes ago. You're probably wondering if there is a way to run more than just one command in a container. Let's try that now: docker run -it alpine /bin/sh / # ls bin dev etc home lib linuxrc media mnt proc root run sbin sys tmp usr var / # uname -a Linux 97916e8cb5dc 4.4.27-moby #1 SMP Wed Oct 26 14:01:48 UTC 2016 x86_64 Linux Running the run command with the -it flags attaches us to an interactive tty in the container. Now you can run as many commands in the container as you want. Take some time to run your favorite commands. Tip run -it is a very useful command to debug at the lowest level a container. That concludes a whirlwind tour of the docker run command which would most likely be the command you'll use most often. It makes sense to spend some time getting comfortable with it. To find out more about run , use docker run --help to see a list of all flags it supports. As you proceed further, we'll see a few more variants of docker run. 1.2 Terminology In the last section, you saw a lot of Docker-specific jargon which might be confusing to some. So before you go further, let's clarify some terminology that is used frequently in the Docker ecosystem. Images - The file system and configuration of our application which are used to create containers. To find out more about a Docker image, run docker inspect alpine . In the demo above, you used the docker pull command to download the alpine image. When you executed the command docker run hello-world , it also did a docker pull behind the scenes to download the hello-world image. Containers - Running instances of Docker images \u2014 containers run the actual applications. A container includes an application and all of its dependencies. It shares the kernel with other containers, and runs as an isolated process in user space on the host OS. You created a container using docker run which you did using the alpine image that you downloaded. A list of running containers can be seen using the docker ps command. Docker daemon - The background service running on the host that manages building, running and distributing Docker containers. Docker client - The command line tool that allows the user to interact with the Docker daemon. Docker Store - A registry of Docker images, where you can find trusted and enterprise ready containers, plugins, and Docker editions. You'll be using this later in this tutorial. 2.0 Webapps with Docker Great! So you have now looked at docker run , played with a Docker container and also got the hang of some terminology. Armed with all this knowledge, you are now ready to get to the real stuff \u2014 deploying web applications with Docker. 2.1 Run a static website in a container Note Code for this section is in this repo in the static-site directory Let's start by taking baby-steps. First, we'll use Docker to run a static website in a container. The website is based on an existing image. We'll pull a Docker image from Docker Store, run the container, and see how easy it is to set up a web server. The image that you are going to use is a single-page website that was already created for this demo and is available on the Docker Store as dockersamples/static-site . You can download and run the image directly in one go using docker run as follows. docker run -d dockersamples/static-site Note The current version of this image doesn't run without the -d flag. The -d flag enables detached mode, which detaches the running container from the terminal/shell and returns your prompt after the container starts. We are debugging the problem with this image but for now, use -d even for this first example. Tip -d is a very useful option. So, what happens when you run this command? Since the image doesn't exist on your Docker host, the Docker daemon first fetches it from the registry and then runs it as a container. Now that the server is running, do you see the website? What port is it running on? And more importantly, how do you access the container directly from our host machine? Actually, you probably won't be able to answer any of these questions yet! \u263a In this case, the client didn't tell the Docker Engine to publish any of the ports, so you need to re-run the docker run command to add this instruction. Let's re-run the command with some new flags to publish ports and pass your name to the container to customize the message displayed. We'll use the -d option again to run the container in detached mode. First, stop the container that you have just launched. In order to do this, we need the container ID. Since we ran the container in detached mode, we don't have to launch another terminal to do this. Run docker ps to view the running containers. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a7a0e504ca3e dockersamples/static-site \"/bin/sh -c 'cd /usr/\" 28 seconds ago Up 26 seconds 80 /tcp, 443 /tcp stupefied_mahavira Check out the CONTAINER ID column. You will need to use this CONTAINER ID value, a long sequence of characters, to identify the container you want to stop, and then to remove it. The example below provides the CONTAINER ID on our system; you should use the value that you see in your terminal. docker stop a7a0e504ca3e docker rm a7a0e504ca3e Note A cool feature is that you do not need to specify the entire CONTAINER ID . You can just specify a few starting characters and if it is unique among all the containers that you have launched, the Docker client will intelligently pick it up. Now, let's launch a container in detached mode as shown below: docker run --name static-site -e AUTHOR = \"Enter Your Name Here\" -d -P dockersamples/static-site e61d12292d69556eabe2a44c16cbd54486b2527e2ce4f95438e504afb7b02810 In the above command: -d will create a container with the process detached from our terminal -P will publish all the exposed container ports to random ports on the Docker host -e is how you pass environment variables to the container. --name allows you to specify a container name AUTHOR is the environment variable name and Your Name is the value that you can pass. Now you can see the ports by running the docker port command. docker port static-site 443 /tcp -> 0 .0.0.0:32772 80 /tcp -> 0 .0.0.0:32773 You can open your freshly created website on http://localhost:[YOUR_PORT_FOR 80/tcp] . For our example this is http://localhost:32773 . You can now open http://localhost:[YOUR_PORT_FOR 80/tcp] to see your site live! For our example, this is: http://192.168.99.100:32773 . You can also run a second webserver at the same time, specifying a custom host port mapping to the container's webserver. docker run --name static-site-2 -e AUTHOR = \"Enter Your Name Here\" -d -p 8888 :80 dockersamples/static-site To deploy this on a real server you would just need to install Docker, and run the above docker command (as in this case you can see the AUTHOR is Docker which we passed as an environment variable). Now that you've seen how to run a webserver inside a Docker container, how do you create your own Docker image? This is the question we'll explore in the next section. But first, let's stop and remove the containers since you won't be using them anymore. docker stop static-site docker rm static-site Let's use a shortcut to remove the second site: docker rm -f static-site-2 Tip rm -f is a very useful option Run docker ps to make sure the containers are gone. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 2.2 Docker Images In this section, let's dive deeper into what Docker images are. You will build your own image, use that image to run an application locally, and finally, push some of your own images to Docker Cloud. Docker images are the basis of containers. In the previous example, you pulled the dockersamples/static-site image from the. registry and asked the Docker client to run a container based on that image. To see the list of images that are available locally on your system, run the docker images command. docker images REPOSITORY TAG IMAGE ID CREATED SIZE dockersamples/static-site latest 92a386b6e686 2 hours ago 190 .5 MB nginx latest af4b3d7d5401 3 hours ago 190 .5 MB python 2 .7 1c32174fd534 14 hours ago 676 .8 MB postgres 9 .4 88d845ac7a88 14 hours ago 263 .6 MB containous/traefik latest 27b4e0c6b2fd 4 days ago 20 .75 MB node 0 .10 42426a5cba5f 6 days ago 633 .7 MB redis latest 4f5f397d4b7c 7 days ago 177 .5 MB mongo latest 467eb21035a8 7 days ago 309 .7 MB alpine 3 .3 70c557e50ed6 8 days ago 4 .794 MB java 7 21f6ce84e43c 8 days ago 587 .7 MB Above is a list of images that I've pulled from the registry and those I've created myself (we'll shortly see how). You will have a different list of images on your machine. The TAG refers to a particular snapshot of the image and the ID is the corresponding unique identifier for that image. For simplicity, you can think of an image akin to a git repository - images can be committed with changes and have multiple. versions. When you do not provide a specific version number, the client defaults to latest. For example you could pull a specific version of ubuntu image as follows: docker pull ubuntu:12.04 If you do not specify the version number of the image then, as mentioned, the Docker client will default to a version named latest . So for example, the docker pull command given below will pull an image named ubuntu:latest : docker pull ubuntu To get a new Docker image you can either get it from a registry (such as the Docker Store) or create your own. There are hundreds of thousands of images available on Docker Store . You can also search for images directly from the command line using docker search . An important distinction with regard to images is between base images and child images . Base images are images that have no parent images, usually images with an OS like ubuntu, alpine or debian. Child images are images that build on base images and add additional functionality. Another key concept is the idea of official images and user images. (Both of which can be base images or child images.) Official images are Docker sanctioned images. Docker, Inc. sponsors a dedicated team that is responsible for reviewing and publishing all Official Repositories content. This team works in collaboration with upstream software maintainers, security experts, and the broader Docker community. These are not prefixed by an organization or user name. In the list of images above, the python , node , alpine and nginx images are official (base) images. To find out more about them, check out the Official Images Documentation . User images are images created and shared by users like you. They build on base images and add additional functionality. Typically these are formatted as user/image-name . The user value in the image name is your Docker Store user or organization name. 2.3 Create your first image Now that you have a better understanding of images, it's time to create your own. Our main objective here is to create an image that sandboxes a small Flask application. The goal of this exercise is to create a Docker image which will run a Flask app. We'll do this by first pulling together the components for a random cat picture generator built with Python Flask, then dockerizing it by writing a Dockerfile . Finally, we'll build the image, and then run it. 2.3.1 Create a Python Flask app that displays random cat pix. For the purposes of this workshop, we've created a fun little Python Flask app that displays a random cat .gif every time it is loaded - because, you know, who doesn't like cats? Start by creating a directory called flask-app where we'll create the following files: app.py requirements.txt templates/index.html Dockerfile Make sure to cd flask-app before you start creating the files, because you don't want to start adding a whole bunch of other random files to your image. app.py Create the app.py with the following content: from flask import Flask , render_template import random app = Flask ( __name__ ) # list of cat images images = [ \"https://c.tenor.com/GTcT7HODLRgAAAAM/smiling-cat-creepy-cat.gif\" , \"https://media0.giphy.com/media/10dU7AN7xsi1I4/giphy.webp?cid=ecf05e47gk63rd81vzlot57qmebr7drtgf6a3khmzvjsdtu7&rid=giphy.webp&ct=g\" , \"https://media0.giphy.com/media/S6VGjvmFRu5Qk/giphy.webp?cid=ecf05e478yofpawrhffnnvb3sgjkos96vyfo5mtqhds35as6&rid=giphy.webp&ct=g\" , \"https://media3.giphy.com/media/JIX9t2j0ZTN9S/200w.webp?cid=ecf05e47gk63rd81vzlot57qmebr7drtgf6a3khmzvjsdtu7&rid=200w.webp&ct=g\" ] @app . route ( '/' ) def index (): url = random . choice ( images ) return render_template ( 'index.html' , url = url ) if __name__ == \"__main__\" : app . run ( host = \"0.0.0.0\" ) requirements.txt In order to install the Python modules required for our app, we need to create a file called requirements.txt and add the following line to that file: Flask==3.1.0 templates/index.html Create a directory called templates and create an index.html file in that directory with the following content in it: < html > < head > < style type = \"text/css\" > body { background : black ; color : white ; } div . container { max-width : 500 px ; margin : 100 px auto ; border : 20 px solid white ; padding : 10 px ; text-align : center ; } h4 { text-transform : uppercase ; } </ style > </ head > < body > < div class = \"container\" > < h4 > Cat Gif of the day </ h4 > < img src = \"{{url}}\" /> < p >< small > Courtesy: < a href = \"http://www.buzzfeed.com/copyranter/the-best-cat-gif-post-in-the-history-of-cat-gifs\" > Buzzfeed </ a ></ small ></ p > </ div > </ body > </ html > 2.3.2 Write a Dockerfile We want to create a Docker image with this web app. As mentioned above, all user images are based on a base image. Since our application is written in Python, we will build our own Python image based on Alpine . We'll do that using a Dockerfile. A Dockerfile is a text file that contains a list of commands that the Docker daemon calls while creating an image. The Dockerfile contains all the information that Docker needs to know to run the app \u2014 a base Docker image to run from, location of your project code, any dependencies it has, and what commands to run at start-up. It is a simple way to automate the image creation process. The best part is that the commands you write in a Dockerfile are almost identical to their equivalent Linux commands. This means you don't really have to learn new syntax to create your own Dockerfiles. 1 - Create a file called Dockerfile, and add content to it as described below. We'll start by specifying our base image, using the FROM keyword. We are using alpine:3.21.0, a lightweight Linux distribution that helps keep our container small and efficient: FROM alpine:3.21.0 2 - Next, we need to install Python 3, pip, and other system dependencies required for our application. The apk add command is used to install packages in Alpine Linux. We use --no-cache to prevent unnecessary image bloat. Add the following RUN command: RUN apk add --no-cache build-base libffi-dev openssl-dev py3-pip python3 3 - Now, we set the working directory inside the container. This ensures that all subsequent commands run within this directory: WORKDIR /usr/src/app 4 - To create an isolated Python environment, we set up a virtual environment inside our container. This helps prevent conflicts between system-wide and project-specific dependencies: RUN python3 -m venv venv 5 - To ensure that all commands within the container use the virtual environment by default, we modify the PATH environment variable: ENV PATH = \"/usr/src/app/venv/bin: $PATH \" 6 - Next, we copy the application's dependencies file (requirements.txt) into the container and install the necessary Python packages. We also upgrade pip to the latest version to ensure compatibility: COPY requirements.txt ./ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r requirements.txt 7 - Copy the files you have created earlier into our image by using COPY command. COPY app.py ./ COPY templates/index.html ./templates/ 8 - Since our Flask application runs on port 5000, we specify that this port should be exposed. This does not automatically publish the port but serves as documentation and can be used by orchestration tools: EXPOSE 5000 9 - The last step is the command for running the application which is simply - python ./app.py . Use the CMD command to do that: CMD [ \"python\" , \"/usr/src/app/app.py\" ] The primary purpose of CMD is to tell the container which command it should run by default when it is started. 10 - Verify your Dockerfile. Our Dockerfile is now ready. This is how it looks: # our base image FROM alpine:3.21.0 # Install Python 3, pip, and system dependencies RUN apk add --no-cache build-base libffi-dev openssl-dev py3-pip python3 # Set the working directory WORKDIR /usr/src/app # Create and activate a virtual environment RUN python3 -m venv venv # Use the virtual environment for all commands ENV PATH = \"/usr/src/app/venv/bin: $PATH \" # Copy and install dependencies COPY requirements.txt ./ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r requirements.txt # Copy application files COPY app.py ./ COPY templates/index.html ./templates/ # Expose the application port EXPOSE 5000 # Run the application inside the virtual environment CMD [ \"python\" , \"/usr/src/app/app.py\" ] 2.3.3 Build the image Now that you have your Dockerfile , you can build your image. The docker build command does the heavy-lifting of creating a docker image from a Dockerfile . When you run the docker build command given below, make sure to replace <YOUR_USERNAME> with your username. This username should be the same one you created when registering on Docker Cloud . If you haven't done that yet, please go ahead and create an account. The docker build command is quite simple - it takes an optional tag name with the -t flag, and the location of the directory containing the Dockerfile - the . indicates the current directory: docker build -t <YOUR_USERNAME>/myfirstapp . If you don't have the alpine:3.21.0 image, the client will first pull the image and then create your image. Therefore, your output on running the command will look different from mine. If everything went well, your image should be ready! Run docker images and see if your image ( <YOUR_USERNAME>/myfirstapp ) shows. 2.3.4 Run your image The next step in this section is to run the image and see if it actually works. docker run -p 8888 :5000 --name myfirstapp YOUR_USERNAME/myfirstapp * Serving Flask app 'app' * Debug mode: off WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. * Running on all addresses ( 0 .0.0.0 ) * Running on http://127.0.0.1:5000 * Running on http://172.17.0.2:5000 Press CTRL+C to quit Head over to http://localhost:8888 and your app should be live. Note If you are using Docker Machine, you may need to open up another terminal and determine the container ip address using docker-machine ip default . Hit the Refresh button in the web browser to see a few more cat images. Check Show us your running flask-app ! 2.3.4 Dockerfile commands summary Here's a quick summary of the few basic commands we used in our Dockerfile. FROM starts the Dockerfile. It is a requirement that the Dockerfile must start with the FROM command. Images are created in layers, which means you can use another image as the base image for your own. The FROM command defines your base layer. As arguments, it takes the name of the image. Optionally, you can add the Docker Cloud username of the maintainer and image version, in the format username/imagename:version . RUN is used to build up the Image you're creating. For each RUN command, Docker will run the command then create a new layer of the image. This way you can roll back your image to previous states easily. The syntax for a RUN instruction is to place the full text of the shell command after the RUN (e.g., RUN mkdir /user/local/foo ). This will automatically run in a /bin/sh shell. You can define a different shell like this: RUN /bin/bash -c 'mkdir /user/local/foo ' COPY copies local files into the container. CMD defines the commands that will run on the Image at start-up. Unlike a RUN , this does not create a new layer for the Image, but simply runs the command. There can only be one CMD per a Dockerfile/Image. If you need to run multiple commands, the best way to do that is to have the CMD run a script. CMD requires that you tell it where to run the command, unlike RUN . So example CMD commands would be: CMD [ \"python\" , \"./app.py\" ] CMD [ \"/bin/bash\" , \"echo\" , \"Hello World\" ] EXPOSE creates a hint for users of an image which ports provide services. It is included in the information which can be retrieved via docker inspect <container-id> . Note The EXPOSE command does not actually make any ports accessible to the host! Instead, this requires publishing ports by means of the -p flag when using docker run . Note If you want to learn more about Dockerfiles, check out Best practices for writing Dockerfiles . (source: https://github.com/docker/labs/tree/master/beginner ) Now that you know how to run docker container and create Dockerfiles let\u2019s move on to the practical part.","title":"TD part 01 - Docker"},{"location":"ch1-discover-docker-td/#discover-docker","text":"Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information","title":"Discover Docker"},{"location":"ch1-discover-docker-td/#setup","text":"","title":"Setup"},{"location":"ch1-discover-docker-td/#prerequisites","text":"There are no specific skills needed for this tutorial beyond a basic comfort with the command line and using a text editor. Prior experience in developing web applications will be helpful but is not required. As you proceed further along the tutorial, we'll make use of https://cloud.docker.com/.","title":"Prerequisites"},{"location":"ch1-discover-docker-td/#setting-up-your-computer","text":"Getting all the tooling setup on your computer can be a daunting task, but getting Docker up and running on your favorite OS has become very easy. The getting started guide on Docker has detailed instructions for setting up Docker on Mac , Linux and Windows If you're using Docker for Windows make sure you have shared your drive. Important note If you're using an older version of Windows or MacOS you may need to use Docker Machine instead. All commands work in either bash or Powershell on Windows Once you are done installing Docker, test your Docker installation by running the following: docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from library/hello-world 03f4658f8b78: Pull complete a3ed95caeb02: Pull complete Digest: sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 Status: Downloaded newer image for hello-world:latest Hello from Docker. ... This message shows that your installation appears to be working correctly.","title":"Setting up your computer"},{"location":"ch1-discover-docker-td/#running-your-first-container","text":"Now that you have everything setup, it's time to get our hands dirty. In this section, you are going to run an Alpine Linux container (a lightweight linux distribution) on your system and get a taste of the docker run command. To get started, let's run the following in our terminal: docker pull alpine Note Depending on how you've installed docker on your system, you might see a permission denied error after running the above command. Try the commands from the Getting Started tutorial to verify your installation . If you're on Linux, you may need to prefix your docker commands with sudo . Alternatively you can create a docker group to get rid of this issue. The pull command fetches the alpine image from the Docker registry and saves it in our system. You can use the docker images command to see a list of all images on your system. docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE alpine latest c51f86c28340 4 weeks ago 1 .109 MB hello-world latest 690ed74de00f 5 months ago 960 B","title":"Running your first container"},{"location":"ch1-discover-docker-td/#11-docker-run","text":"Great! Let's now run a Docker container based on this image. To do that you are going to use the docker run command. docker run alpine ls -l total 48 drwxr-xr-x 2 root root 4096 Mar 2 16:20 bin drwxr-xr-x 5 root root 360 Mar 18 09:47 dev drwxr-xr-x 13 root root 4096 Mar 18 09:47 etc drwxr-xr-x 2 root root 4096 Mar 2 16:20 home drwxr-xr-x 5 root root 4096 Mar 2 16:20 lib ...... ...... What happened? Behind the scenes, a lot of stuff happened. When you call run : 1. The Docker client contacts the Docker daemon. The Docker daemon checks local store if the image (alpine in this case) is available locally, and if not, downloads it from Docker Store. (Since we have issued docker pull alpine before, the download step is not necessary) The Docker daemon creates the container and then runs a command in that container. The Docker daemon streams the output of the command to the Docker client When you run docker run alpine , you provided a command ( ls -l ), so Docker started the command specified and you saw the listing. Let's try something more exciting. docker run alpine echo \"hello from alpine\" hello from alpine OK, that's some actual output. In this case, the Docker client dutifully ran the echo command in our alpine container and then exited it. If you've noticed, all of that happened pretty quickly. Imagine booting up a virtual machine, running a command and then killing it. Now you know why they say containers are fast! Try another command. docker run alpine /bin/sh Wait, nothing happened! Is that a bug? Well, no. These interactive shells will exit after running any scripted commands, unless they are run in an interactive terminal - so for this example to not exit, you need to docker run -it alpine /bin/sh . You are now inside the container shell and you can try out a few commands like ls -l , uname -a and others. Exit out of the container by giving the exit command. Ok, now it's time to see the docker ps command. The docker ps command shows you all containers that are currently running. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES Since no containers are running, you see a blank line. Let's try a more useful variant: docker ps -a docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 36171a5da744 alpine \"/bin/sh\" 5 minutes ago Exited ( 0 ) 2 minutes ago fervent_newton a6a9d46d0b2f alpine \"echo 'hello from alp\" 6 minutes ago Exited ( 0 ) 6 minutes ago lonely_kilby ff0a5c3750b9 alpine \"ls -l\" 8 minutes ago Exited ( 0 ) 8 minutes ago elated_ramanujan c317d0a9e3d2 hello-world \"/hello\" 34 seconds ago Exited ( 0 ) 12 minutes ago stupefied_mcclintock What you see above is a list of all containers that you ran. Notice that the STATUS column shows that these containers exited a few minutes ago. You're probably wondering if there is a way to run more than just one command in a container. Let's try that now: docker run -it alpine /bin/sh / # ls bin dev etc home lib linuxrc media mnt proc root run sbin sys tmp usr var / # uname -a Linux 97916e8cb5dc 4.4.27-moby #1 SMP Wed Oct 26 14:01:48 UTC 2016 x86_64 Linux Running the run command with the -it flags attaches us to an interactive tty in the container. Now you can run as many commands in the container as you want. Take some time to run your favorite commands. Tip run -it is a very useful command to debug at the lowest level a container. That concludes a whirlwind tour of the docker run command which would most likely be the command you'll use most often. It makes sense to spend some time getting comfortable with it. To find out more about run , use docker run --help to see a list of all flags it supports. As you proceed further, we'll see a few more variants of docker run.","title":"1.1 Docker Run"},{"location":"ch1-discover-docker-td/#12-terminology","text":"In the last section, you saw a lot of Docker-specific jargon which might be confusing to some. So before you go further, let's clarify some terminology that is used frequently in the Docker ecosystem. Images - The file system and configuration of our application which are used to create containers. To find out more about a Docker image, run docker inspect alpine . In the demo above, you used the docker pull command to download the alpine image. When you executed the command docker run hello-world , it also did a docker pull behind the scenes to download the hello-world image. Containers - Running instances of Docker images \u2014 containers run the actual applications. A container includes an application and all of its dependencies. It shares the kernel with other containers, and runs as an isolated process in user space on the host OS. You created a container using docker run which you did using the alpine image that you downloaded. A list of running containers can be seen using the docker ps command. Docker daemon - The background service running on the host that manages building, running and distributing Docker containers. Docker client - The command line tool that allows the user to interact with the Docker daemon. Docker Store - A registry of Docker images, where you can find trusted and enterprise ready containers, plugins, and Docker editions. You'll be using this later in this tutorial.","title":"1.2 Terminology"},{"location":"ch1-discover-docker-td/#20-webapps-with-docker","text":"Great! So you have now looked at docker run , played with a Docker container and also got the hang of some terminology. Armed with all this knowledge, you are now ready to get to the real stuff \u2014 deploying web applications with Docker.","title":"2.0 Webapps with Docker"},{"location":"ch1-discover-docker-td/#21-run-a-static-website-in-a-container","text":"Note Code for this section is in this repo in the static-site directory Let's start by taking baby-steps. First, we'll use Docker to run a static website in a container. The website is based on an existing image. We'll pull a Docker image from Docker Store, run the container, and see how easy it is to set up a web server. The image that you are going to use is a single-page website that was already created for this demo and is available on the Docker Store as dockersamples/static-site . You can download and run the image directly in one go using docker run as follows. docker run -d dockersamples/static-site Note The current version of this image doesn't run without the -d flag. The -d flag enables detached mode, which detaches the running container from the terminal/shell and returns your prompt after the container starts. We are debugging the problem with this image but for now, use -d even for this first example. Tip -d is a very useful option. So, what happens when you run this command? Since the image doesn't exist on your Docker host, the Docker daemon first fetches it from the registry and then runs it as a container. Now that the server is running, do you see the website? What port is it running on? And more importantly, how do you access the container directly from our host machine? Actually, you probably won't be able to answer any of these questions yet! \u263a In this case, the client didn't tell the Docker Engine to publish any of the ports, so you need to re-run the docker run command to add this instruction. Let's re-run the command with some new flags to publish ports and pass your name to the container to customize the message displayed. We'll use the -d option again to run the container in detached mode. First, stop the container that you have just launched. In order to do this, we need the container ID. Since we ran the container in detached mode, we don't have to launch another terminal to do this. Run docker ps to view the running containers. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a7a0e504ca3e dockersamples/static-site \"/bin/sh -c 'cd /usr/\" 28 seconds ago Up 26 seconds 80 /tcp, 443 /tcp stupefied_mahavira Check out the CONTAINER ID column. You will need to use this CONTAINER ID value, a long sequence of characters, to identify the container you want to stop, and then to remove it. The example below provides the CONTAINER ID on our system; you should use the value that you see in your terminal. docker stop a7a0e504ca3e docker rm a7a0e504ca3e Note A cool feature is that you do not need to specify the entire CONTAINER ID . You can just specify a few starting characters and if it is unique among all the containers that you have launched, the Docker client will intelligently pick it up. Now, let's launch a container in detached mode as shown below: docker run --name static-site -e AUTHOR = \"Enter Your Name Here\" -d -P dockersamples/static-site e61d12292d69556eabe2a44c16cbd54486b2527e2ce4f95438e504afb7b02810 In the above command: -d will create a container with the process detached from our terminal -P will publish all the exposed container ports to random ports on the Docker host -e is how you pass environment variables to the container. --name allows you to specify a container name AUTHOR is the environment variable name and Your Name is the value that you can pass. Now you can see the ports by running the docker port command. docker port static-site 443 /tcp -> 0 .0.0.0:32772 80 /tcp -> 0 .0.0.0:32773 You can open your freshly created website on http://localhost:[YOUR_PORT_FOR 80/tcp] . For our example this is http://localhost:32773 . You can now open http://localhost:[YOUR_PORT_FOR 80/tcp] to see your site live! For our example, this is: http://192.168.99.100:32773 . You can also run a second webserver at the same time, specifying a custom host port mapping to the container's webserver. docker run --name static-site-2 -e AUTHOR = \"Enter Your Name Here\" -d -p 8888 :80 dockersamples/static-site To deploy this on a real server you would just need to install Docker, and run the above docker command (as in this case you can see the AUTHOR is Docker which we passed as an environment variable). Now that you've seen how to run a webserver inside a Docker container, how do you create your own Docker image? This is the question we'll explore in the next section. But first, let's stop and remove the containers since you won't be using them anymore. docker stop static-site docker rm static-site Let's use a shortcut to remove the second site: docker rm -f static-site-2 Tip rm -f is a very useful option Run docker ps to make sure the containers are gone. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES","title":"2.1 Run a static website in a container"},{"location":"ch1-discover-docker-td/#22-docker-images","text":"In this section, let's dive deeper into what Docker images are. You will build your own image, use that image to run an application locally, and finally, push some of your own images to Docker Cloud. Docker images are the basis of containers. In the previous example, you pulled the dockersamples/static-site image from the. registry and asked the Docker client to run a container based on that image. To see the list of images that are available locally on your system, run the docker images command. docker images REPOSITORY TAG IMAGE ID CREATED SIZE dockersamples/static-site latest 92a386b6e686 2 hours ago 190 .5 MB nginx latest af4b3d7d5401 3 hours ago 190 .5 MB python 2 .7 1c32174fd534 14 hours ago 676 .8 MB postgres 9 .4 88d845ac7a88 14 hours ago 263 .6 MB containous/traefik latest 27b4e0c6b2fd 4 days ago 20 .75 MB node 0 .10 42426a5cba5f 6 days ago 633 .7 MB redis latest 4f5f397d4b7c 7 days ago 177 .5 MB mongo latest 467eb21035a8 7 days ago 309 .7 MB alpine 3 .3 70c557e50ed6 8 days ago 4 .794 MB java 7 21f6ce84e43c 8 days ago 587 .7 MB Above is a list of images that I've pulled from the registry and those I've created myself (we'll shortly see how). You will have a different list of images on your machine. The TAG refers to a particular snapshot of the image and the ID is the corresponding unique identifier for that image. For simplicity, you can think of an image akin to a git repository - images can be committed with changes and have multiple. versions. When you do not provide a specific version number, the client defaults to latest. For example you could pull a specific version of ubuntu image as follows: docker pull ubuntu:12.04 If you do not specify the version number of the image then, as mentioned, the Docker client will default to a version named latest . So for example, the docker pull command given below will pull an image named ubuntu:latest : docker pull ubuntu To get a new Docker image you can either get it from a registry (such as the Docker Store) or create your own. There are hundreds of thousands of images available on Docker Store . You can also search for images directly from the command line using docker search . An important distinction with regard to images is between base images and child images . Base images are images that have no parent images, usually images with an OS like ubuntu, alpine or debian. Child images are images that build on base images and add additional functionality. Another key concept is the idea of official images and user images. (Both of which can be base images or child images.) Official images are Docker sanctioned images. Docker, Inc. sponsors a dedicated team that is responsible for reviewing and publishing all Official Repositories content. This team works in collaboration with upstream software maintainers, security experts, and the broader Docker community. These are not prefixed by an organization or user name. In the list of images above, the python , node , alpine and nginx images are official (base) images. To find out more about them, check out the Official Images Documentation . User images are images created and shared by users like you. They build on base images and add additional functionality. Typically these are formatted as user/image-name . The user value in the image name is your Docker Store user or organization name.","title":"2.2 Docker Images"},{"location":"ch1-discover-docker-td/#23-create-your-first-image","text":"Now that you have a better understanding of images, it's time to create your own. Our main objective here is to create an image that sandboxes a small Flask application. The goal of this exercise is to create a Docker image which will run a Flask app. We'll do this by first pulling together the components for a random cat picture generator built with Python Flask, then dockerizing it by writing a Dockerfile . Finally, we'll build the image, and then run it.","title":"2.3 Create your first image"},{"location":"ch1-discover-docker-td/#231-create-a-python-flask-app-that-displays-random-cat-pix","text":"For the purposes of this workshop, we've created a fun little Python Flask app that displays a random cat .gif every time it is loaded - because, you know, who doesn't like cats? Start by creating a directory called flask-app where we'll create the following files: app.py requirements.txt templates/index.html Dockerfile Make sure to cd flask-app before you start creating the files, because you don't want to start adding a whole bunch of other random files to your image.","title":"2.3.1 Create a Python Flask app that displays random cat pix."},{"location":"ch1-discover-docker-td/#apppy","text":"Create the app.py with the following content: from flask import Flask , render_template import random app = Flask ( __name__ ) # list of cat images images = [ \"https://c.tenor.com/GTcT7HODLRgAAAAM/smiling-cat-creepy-cat.gif\" , \"https://media0.giphy.com/media/10dU7AN7xsi1I4/giphy.webp?cid=ecf05e47gk63rd81vzlot57qmebr7drtgf6a3khmzvjsdtu7&rid=giphy.webp&ct=g\" , \"https://media0.giphy.com/media/S6VGjvmFRu5Qk/giphy.webp?cid=ecf05e478yofpawrhffnnvb3sgjkos96vyfo5mtqhds35as6&rid=giphy.webp&ct=g\" , \"https://media3.giphy.com/media/JIX9t2j0ZTN9S/200w.webp?cid=ecf05e47gk63rd81vzlot57qmebr7drtgf6a3khmzvjsdtu7&rid=200w.webp&ct=g\" ] @app . route ( '/' ) def index (): url = random . choice ( images ) return render_template ( 'index.html' , url = url ) if __name__ == \"__main__\" : app . run ( host = \"0.0.0.0\" )","title":"app.py"},{"location":"ch1-discover-docker-td/#requirementstxt","text":"In order to install the Python modules required for our app, we need to create a file called requirements.txt and add the following line to that file: Flask==3.1.0","title":"requirements.txt"},{"location":"ch1-discover-docker-td/#templatesindexhtml","text":"Create a directory called templates and create an index.html file in that directory with the following content in it: < html > < head > < style type = \"text/css\" > body { background : black ; color : white ; } div . container { max-width : 500 px ; margin : 100 px auto ; border : 20 px solid white ; padding : 10 px ; text-align : center ; } h4 { text-transform : uppercase ; } </ style > </ head > < body > < div class = \"container\" > < h4 > Cat Gif of the day </ h4 > < img src = \"{{url}}\" /> < p >< small > Courtesy: < a href = \"http://www.buzzfeed.com/copyranter/the-best-cat-gif-post-in-the-history-of-cat-gifs\" > Buzzfeed </ a ></ small ></ p > </ div > </ body > </ html >","title":"templates/index.html"},{"location":"ch1-discover-docker-td/#232-write-a-dockerfile","text":"We want to create a Docker image with this web app. As mentioned above, all user images are based on a base image. Since our application is written in Python, we will build our own Python image based on Alpine . We'll do that using a Dockerfile. A Dockerfile is a text file that contains a list of commands that the Docker daemon calls while creating an image. The Dockerfile contains all the information that Docker needs to know to run the app \u2014 a base Docker image to run from, location of your project code, any dependencies it has, and what commands to run at start-up. It is a simple way to automate the image creation process. The best part is that the commands you write in a Dockerfile are almost identical to their equivalent Linux commands. This means you don't really have to learn new syntax to create your own Dockerfiles. 1 - Create a file called Dockerfile, and add content to it as described below. We'll start by specifying our base image, using the FROM keyword. We are using alpine:3.21.0, a lightweight Linux distribution that helps keep our container small and efficient: FROM alpine:3.21.0 2 - Next, we need to install Python 3, pip, and other system dependencies required for our application. The apk add command is used to install packages in Alpine Linux. We use --no-cache to prevent unnecessary image bloat. Add the following RUN command: RUN apk add --no-cache build-base libffi-dev openssl-dev py3-pip python3 3 - Now, we set the working directory inside the container. This ensures that all subsequent commands run within this directory: WORKDIR /usr/src/app 4 - To create an isolated Python environment, we set up a virtual environment inside our container. This helps prevent conflicts between system-wide and project-specific dependencies: RUN python3 -m venv venv 5 - To ensure that all commands within the container use the virtual environment by default, we modify the PATH environment variable: ENV PATH = \"/usr/src/app/venv/bin: $PATH \" 6 - Next, we copy the application's dependencies file (requirements.txt) into the container and install the necessary Python packages. We also upgrade pip to the latest version to ensure compatibility: COPY requirements.txt ./ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r requirements.txt 7 - Copy the files you have created earlier into our image by using COPY command. COPY app.py ./ COPY templates/index.html ./templates/ 8 - Since our Flask application runs on port 5000, we specify that this port should be exposed. This does not automatically publish the port but serves as documentation and can be used by orchestration tools: EXPOSE 5000 9 - The last step is the command for running the application which is simply - python ./app.py . Use the CMD command to do that: CMD [ \"python\" , \"/usr/src/app/app.py\" ] The primary purpose of CMD is to tell the container which command it should run by default when it is started. 10 - Verify your Dockerfile. Our Dockerfile is now ready. This is how it looks: # our base image FROM alpine:3.21.0 # Install Python 3, pip, and system dependencies RUN apk add --no-cache build-base libffi-dev openssl-dev py3-pip python3 # Set the working directory WORKDIR /usr/src/app # Create and activate a virtual environment RUN python3 -m venv venv # Use the virtual environment for all commands ENV PATH = \"/usr/src/app/venv/bin: $PATH \" # Copy and install dependencies COPY requirements.txt ./ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r requirements.txt # Copy application files COPY app.py ./ COPY templates/index.html ./templates/ # Expose the application port EXPOSE 5000 # Run the application inside the virtual environment CMD [ \"python\" , \"/usr/src/app/app.py\" ]","title":"2.3.2 Write a Dockerfile"},{"location":"ch1-discover-docker-td/#233-build-the-image","text":"Now that you have your Dockerfile , you can build your image. The docker build command does the heavy-lifting of creating a docker image from a Dockerfile . When you run the docker build command given below, make sure to replace <YOUR_USERNAME> with your username. This username should be the same one you created when registering on Docker Cloud . If you haven't done that yet, please go ahead and create an account. The docker build command is quite simple - it takes an optional tag name with the -t flag, and the location of the directory containing the Dockerfile - the . indicates the current directory: docker build -t <YOUR_USERNAME>/myfirstapp . If you don't have the alpine:3.21.0 image, the client will first pull the image and then create your image. Therefore, your output on running the command will look different from mine. If everything went well, your image should be ready! Run docker images and see if your image ( <YOUR_USERNAME>/myfirstapp ) shows.","title":"2.3.3 Build the image"},{"location":"ch1-discover-docker-td/#234-run-your-image","text":"The next step in this section is to run the image and see if it actually works. docker run -p 8888 :5000 --name myfirstapp YOUR_USERNAME/myfirstapp * Serving Flask app 'app' * Debug mode: off WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. * Running on all addresses ( 0 .0.0.0 ) * Running on http://127.0.0.1:5000 * Running on http://172.17.0.2:5000 Press CTRL+C to quit Head over to http://localhost:8888 and your app should be live. Note If you are using Docker Machine, you may need to open up another terminal and determine the container ip address using docker-machine ip default . Hit the Refresh button in the web browser to see a few more cat images. Check Show us your running flask-app !","title":"2.3.4 Run your image"},{"location":"ch1-discover-docker-td/#234-dockerfile-commands-summary","text":"Here's a quick summary of the few basic commands we used in our Dockerfile. FROM starts the Dockerfile. It is a requirement that the Dockerfile must start with the FROM command. Images are created in layers, which means you can use another image as the base image for your own. The FROM command defines your base layer. As arguments, it takes the name of the image. Optionally, you can add the Docker Cloud username of the maintainer and image version, in the format username/imagename:version . RUN is used to build up the Image you're creating. For each RUN command, Docker will run the command then create a new layer of the image. This way you can roll back your image to previous states easily. The syntax for a RUN instruction is to place the full text of the shell command after the RUN (e.g., RUN mkdir /user/local/foo ). This will automatically run in a /bin/sh shell. You can define a different shell like this: RUN /bin/bash -c 'mkdir /user/local/foo ' COPY copies local files into the container. CMD defines the commands that will run on the Image at start-up. Unlike a RUN , this does not create a new layer for the Image, but simply runs the command. There can only be one CMD per a Dockerfile/Image. If you need to run multiple commands, the best way to do that is to have the CMD run a script. CMD requires that you tell it where to run the command, unlike RUN . So example CMD commands would be: CMD [ \"python\" , \"./app.py\" ] CMD [ \"/bin/bash\" , \"echo\" , \"Hello World\" ] EXPOSE creates a hint for users of an image which ports provide services. It is included in the information which can be retrieved via docker inspect <container-id> . Note The EXPOSE command does not actually make any ports accessible to the host! Instead, this requires publishing ports by means of the -p flag when using docker run . Note If you want to learn more about Dockerfiles, check out Best practices for writing Dockerfiles . (source: https://github.com/docker/labs/tree/master/beginner ) Now that you know how to run docker container and create Dockerfiles let\u2019s move on to the practical part.","title":"2.3.4 Dockerfile commands summary"},{"location":"ch1-discover-docker-tp/","text":"Discover Docker Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time). Question Point to document/report. Tip Interesting information. Goals Good practice Do not forget to document what you do along the steps, the documentation provided will be evaluated as your report. Create an appropriate file structure, 1 folder per image. Target application 3-tiers application: HTTP server Backend API Database For each of those applications, we will follow the same process: choose the appropriate docker base image, create and configure this image, put our application specifics inside and at some point have it running. Our final goal is to have a 3-tier web API running. Base images HTTP server Backend API Database Database Basics We will use the image: postgres:17.2-alpine. Let\u2019s have a simple postgres server running, here is what would be a minimal Dockerfile: FROM postgres:17.2-alpine ENV POSTGRES_DB = db \\ POSTGRES_USER = usr \\ POSTGRES_PASSWORD = pwd Build this image and start a container properly. Your Postgres DB should be up and running. Check that everything is running smoothly with the docker command of your choice. Don\u2019t forget to name your docker image and container. Tip If you have difficulties go back to part 2.3.3 Build the image and 2.3.4 Run your image on TD01 - Docker ( TD 1 Discover Docker ). Re-run your database with adminer . Don't forget --network app-network to enable adminer/database communication. We use -\u2013network instead of -\u2013link because the latter is deprecated. Tip Don't forget to create your network docker network create app-network Also, does it seem right to have passwords written in plain text in a file? You may rather define those environment parameters when running the image using the flag -e . Question 1-1 For which reason is it better to run the container with a flag -e to give the environment variables rather than put them directly in the Dockerfile? It would be nice to have our database structure initialized with the docker image as well as some initial data. Any sql scripts found in /docker-entrypoint-initdb.d will be executed in alphabetical order, therefore let\u2019s add a couple scripts to our image: Tip Don't forget to restart the adminer: docker run \\ -p \"8090:8080\" \\ --net = app-network \\ --name = adminer \\ -d \\ adminer Init database 01-CreateScheme.sql CREATE TABLE public . departments ( id SERIAL PRIMARY KEY , name VARCHAR ( 20 ) NOT NULL ); CREATE TABLE public . students ( id SERIAL PRIMARY KEY , department_id INT NOT NULL REFERENCES departments ( id ), first_name VARCHAR ( 20 ) NOT NULL , last_name VARCHAR ( 20 ) NOT NULL ); 02-InsertData.sql INSERT INTO departments ( name ) VALUES ( 'IRC' ); INSERT INTO departments ( name ) VALUES ( 'ETI' ); INSERT INTO departments ( name ) VALUES ( 'CGP' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 1 , 'Eli' , 'Copter' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 2 , 'Emma' , 'Carena' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 2 , 'Jack' , 'Uzzi' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 3 , 'Aude' , 'Javel' ); Rebuild your image and check that your scripts have been executed at startup and that the data is present in your container. Tip When we talk about /docker-entrypoint-initdb.d it means inside the container, so you have to copy your directory's content and the container\u2019s directory. Persist data You may have noticed that if your database container gets destroyed then all your data is reset, a database must persist data durably. Use volumes to persist data on the host disk. -v /my/own/datadir:/var/lib/postgresql/data Check that data survives when your container gets destroyed. Link Docker volumes Question 1-2 Why do we need a volume to be attached to our postgres container? Question 1-3 Document your database container essentials: commands and Dockerfile. Backend API Basics For starters, we will simply run a Java hello-world class in our containers, only after will we be running a jar. In both cases, choose the proper image keeping in mind that we only need a Java runtime . Here is a complex Java Hello World implementation: Main.java public class Main { public static void main ( String [] args ) { System . out . println ( \"Hello World!\" ); } } 1- Compile with your target Java: javac Main.java . 2- Write dockerfile. FROM # TODO: Choose a java JRE # TODO: Add the compiled java (aka bytecode, aka .class) # TODO: Run the Java with: \u201cjava Main\u201d command. 3- Now, to launch app you have to do the same thing that Basic step 1. Here you have a first glimpse of your backend application. In the next step we will simply enrich the build (using maven instead of a minimalistic javac) and execute a jar instead of a simple .class. \u2192 If it\u2019s a success you must see \u201cHello Word\u201d in your console. Multistage build In the previous section we were building Java code on our machine to have it running on a docker container. Wouldn\u2019t it be great to have Docker handle the build as well? You probably noticed that the default openjdk docker images contain... Well... a JDK! Create a multistage build using the Multistage . Your Dockerfile should look like this: FROM eclipse-temurin:21-jdk-alpine # Build Main.java with JDK # TODO : in next steps (not now) FROM eclipse-temurin:21-jre-alpine # Copy resource from previous stage COPY --from = 0 /usr/src/Main.class . # Run java code with the JRE # TODO : in next steps (not now) Don\u2019t fill the Dockerfile now, we will have to do it in the next steps. Backend simple api We will deploy a Springboot application providing a simple API with a single greeting endpoint. Create your Springboot application on: Spring Initializer . Use the following config: Project: Maven Language: Java 21 Spring Boot: 3.4.2 Packaging: Jar Dependencies: Spring Web Generate the project and give it a simple GreetingController class: package fr.takima.training.simpleapi.controller ; import org.springframework.web.bind.annotation.* ; import java.util.concurrent.atomic.AtomicLong ; @RestController public class GreetingController { private static final String template = \"Hello, %s!\" ; private final AtomicLong counter = new AtomicLong (); @GetMapping ( \"/\" ) public Greeting greeting ( @RequestParam ( value = \"name\" , defaultValue = \"World\" ) String name ) { return new Greeting ( counter . incrementAndGet (), String . format ( template , name )); } record Greeting ( long id , String content ) {} } You can now build and start your application, of course you will need maven and a jdk-21. How convenient would it be to have a virtual container to build and run our simplistic API? Oh wait, we have docker, here is how you could build and run your application with Docker: # Build stage FROM eclipse-temurin:21-jdk-alpine AS myapp-build ENV MYAPP_HOME = /opt/myapp WORKDIR $MYAPP_HOME RUN apk add --no-cache maven COPY pom.xml . COPY src ./src RUN mvn package -DskipTests # Run stage FROM eclipse-temurin:21-jre-alpine ENV MYAPP_HOME = /opt/myapp WORKDIR $MYAPP_HOME COPY --from = myapp-build $MYAPP_HOME /target/*.jar $MYAPP_HOME /myapp.jar ENTRYPOINT [ \"java\" , \"-jar\" , \"myapp.jar\" ] Question 1-4 Why do we need a multistage build? And explain each step of this dockerfile. Check A working Springboot application with a simple HelloWorld endpoint. Did you notice that maven downloads all libraries on every image build? You can contribute to saving the planet caching libraries when maven pom file has not been changed by running the goal: mvn dependency:go-offline . Backend API Let\u2019s now build and run the backend API connected to the database. You can get the zipped source code here: simple-api . You can replace only your src directory and the pom.xml file with the ones available in the repository. Adjust the configuration in simple-api/src/main/resources/application.yml (this is the application configuration). How to access the database container from your backend application? Use the deprecated --link or create a docker network . Once everything is properly bound, you should be able to access your application API, for example on: /departments/IRC/students . [ { \"id\" : 1 , \"firstname\" : \"Eli\" , \"lastname\" : \"Copter\" , \"department\" : { \"id\" : 1 , \"name\" : \"IRC\" } } ] Explore your API other endpoints, have a look at the controllers in the source code. Check A simple web API on top of your database. Http server Basics Choose an appropriate base image. Create a simple landing page: index.html and put it inside your container. It should be enough for now, start your container and check that everything is working as expected. Here are commands that you may want to try to do so: docker stats docker inspect docker logs Link Httpd Getting Started Configuration You are using the default apache configuration, and it will be enough for now, you use yours by copying it in your image. Use docker exec to retrieve this default configuration from your running container /usr/local/apache2/conf/httpd.conf . Note You can also use docker cp . Reverse proxy We will configure the http server as a simple reverse proxy server in front of our application, this server could be used to deliver a front-end application, to configure SSL or to handle load balancing. So this can be quite useful even though in our case we will keep things simple. Here is the documentation: Reverse Proxy . Add the following to the configuration, and you should be all set: <VirtualHost *:80> ProxyPreserveHost On ProxyPass / http://YOUR_BACKEND_LINK:8080/ ProxyPassReverse / http://YOUR_BACKEND_LINK:8080/ </VirtualHost> LoadModule proxy_module modules/mod_proxy.so LoadModule proxy_http_module modules/mod_proxy_http.so Question 1-5 Why do we need a reverse proxy? Check Checkpoint: a working application through a reverse proxy. Link application Docker-compose 1- Install docker-compose if the docker compose command does not work . You may have noticed that this can be quite painful to orchestrate manually the start, stop and rebuild of our containers. Thankfully, a useful tool called docker-compose comes in handy in those situations. 2- Let\u2019s create a docker-compose.yml file with the following structure to define and drive our containers: services : backend : build : #TODO networks : #TODO depends_on : #TODO database : build : #TODO networks : #TODO httpd : build : #TODO ports : #TODO networks : #TODO depends_on : #TODO networks : #TODO volumes : #TODO The docker-compose will handle the three containers for us. The file above is a basic example of structure, you need to add more parameters and think about the cleanest and most optimized approach like you would do in a company (for example: env variables, volumes, restart policies and processes segregation). Once your containers are orchestrated as services by docker-compose you should have a perfectly running application, make sure you can access your API on localhost . Note The ports of both your backend and database should not be opened to your host machine. Question 1-6 Why is docker-compose so important? Question 1-7 Document docker-compose most important commands. Question 1-8 Document your docker-compose file. Check A working 3-tier application running with docker-compose. Publish Your docker images are stored locally, let\u2019s publish them, so they can be used by other team members or on other machines. You will need a Docker Hub account. 1- Connect to your freshly created account with docker login . 2- Tag your image. For now, we have been only using the latest tag, now that we want to publish it, let\u2019s add some meaningful version information to our images. docker tag my-database USERNAME/my-database:1.0 3- Then push your image to dockerhub: docker push USERNAME/my-database:1.0 Dockerhub is not the only docker image registry, and you can also self-host your images (this is obviously the choice of most companies). Once you publish your images to dockerhub, you will see them in your account: having some documentation for your image would be quite useful if you want to use those later. Question 1-9 Document your publication commands and published images in dockerhub. Question 1-10 Why do we put our images into an online repo? \u00a9 Takima 2025","title":"TP part 01 - Docker"},{"location":"ch1-discover-docker-tp/#discover-docker","text":"Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time). Question Point to document/report. Tip Interesting information.","title":"Discover Docker"},{"location":"ch1-discover-docker-tp/#goals","text":"","title":"Goals"},{"location":"ch1-discover-docker-tp/#good-practice","text":"Do not forget to document what you do along the steps, the documentation provided will be evaluated as your report. Create an appropriate file structure, 1 folder per image.","title":"Good practice"},{"location":"ch1-discover-docker-tp/#target-application","text":"3-tiers application: HTTP server Backend API Database For each of those applications, we will follow the same process: choose the appropriate docker base image, create and configure this image, put our application specifics inside and at some point have it running. Our final goal is to have a 3-tier web API running.","title":"Target application"},{"location":"ch1-discover-docker-tp/#base-images","text":"HTTP server Backend API Database","title":"Base images"},{"location":"ch1-discover-docker-tp/#database","text":"","title":"Database"},{"location":"ch1-discover-docker-tp/#basics","text":"We will use the image: postgres:17.2-alpine. Let\u2019s have a simple postgres server running, here is what would be a minimal Dockerfile: FROM postgres:17.2-alpine ENV POSTGRES_DB = db \\ POSTGRES_USER = usr \\ POSTGRES_PASSWORD = pwd Build this image and start a container properly. Your Postgres DB should be up and running. Check that everything is running smoothly with the docker command of your choice. Don\u2019t forget to name your docker image and container. Tip If you have difficulties go back to part 2.3.3 Build the image and 2.3.4 Run your image on TD01 - Docker ( TD 1 Discover Docker ). Re-run your database with adminer . Don't forget --network app-network to enable adminer/database communication. We use -\u2013network instead of -\u2013link because the latter is deprecated. Tip Don't forget to create your network docker network create app-network Also, does it seem right to have passwords written in plain text in a file? You may rather define those environment parameters when running the image using the flag -e . Question 1-1 For which reason is it better to run the container with a flag -e to give the environment variables rather than put them directly in the Dockerfile? It would be nice to have our database structure initialized with the docker image as well as some initial data. Any sql scripts found in /docker-entrypoint-initdb.d will be executed in alphabetical order, therefore let\u2019s add a couple scripts to our image: Tip Don't forget to restart the adminer: docker run \\ -p \"8090:8080\" \\ --net = app-network \\ --name = adminer \\ -d \\ adminer","title":"Basics"},{"location":"ch1-discover-docker-tp/#init-database","text":"01-CreateScheme.sql CREATE TABLE public . departments ( id SERIAL PRIMARY KEY , name VARCHAR ( 20 ) NOT NULL ); CREATE TABLE public . students ( id SERIAL PRIMARY KEY , department_id INT NOT NULL REFERENCES departments ( id ), first_name VARCHAR ( 20 ) NOT NULL , last_name VARCHAR ( 20 ) NOT NULL ); 02-InsertData.sql INSERT INTO departments ( name ) VALUES ( 'IRC' ); INSERT INTO departments ( name ) VALUES ( 'ETI' ); INSERT INTO departments ( name ) VALUES ( 'CGP' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 1 , 'Eli' , 'Copter' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 2 , 'Emma' , 'Carena' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 2 , 'Jack' , 'Uzzi' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 3 , 'Aude' , 'Javel' ); Rebuild your image and check that your scripts have been executed at startup and that the data is present in your container. Tip When we talk about /docker-entrypoint-initdb.d it means inside the container, so you have to copy your directory's content and the container\u2019s directory.","title":"Init database"},{"location":"ch1-discover-docker-tp/#persist-data","text":"You may have noticed that if your database container gets destroyed then all your data is reset, a database must persist data durably. Use volumes to persist data on the host disk. -v /my/own/datadir:/var/lib/postgresql/data Check that data survives when your container gets destroyed. Link Docker volumes Question 1-2 Why do we need a volume to be attached to our postgres container? Question 1-3 Document your database container essentials: commands and Dockerfile.","title":"Persist data"},{"location":"ch1-discover-docker-tp/#backend-api","text":"","title":"Backend API"},{"location":"ch1-discover-docker-tp/#basics_1","text":"For starters, we will simply run a Java hello-world class in our containers, only after will we be running a jar. In both cases, choose the proper image keeping in mind that we only need a Java runtime . Here is a complex Java Hello World implementation: Main.java public class Main { public static void main ( String [] args ) { System . out . println ( \"Hello World!\" ); } } 1- Compile with your target Java: javac Main.java . 2- Write dockerfile. FROM # TODO: Choose a java JRE # TODO: Add the compiled java (aka bytecode, aka .class) # TODO: Run the Java with: \u201cjava Main\u201d command. 3- Now, to launch app you have to do the same thing that Basic step 1. Here you have a first glimpse of your backend application. In the next step we will simply enrich the build (using maven instead of a minimalistic javac) and execute a jar instead of a simple .class. \u2192 If it\u2019s a success you must see \u201cHello Word\u201d in your console.","title":"Basics"},{"location":"ch1-discover-docker-tp/#multistage-build","text":"In the previous section we were building Java code on our machine to have it running on a docker container. Wouldn\u2019t it be great to have Docker handle the build as well? You probably noticed that the default openjdk docker images contain... Well... a JDK! Create a multistage build using the Multistage . Your Dockerfile should look like this: FROM eclipse-temurin:21-jdk-alpine # Build Main.java with JDK # TODO : in next steps (not now) FROM eclipse-temurin:21-jre-alpine # Copy resource from previous stage COPY --from = 0 /usr/src/Main.class . # Run java code with the JRE # TODO : in next steps (not now) Don\u2019t fill the Dockerfile now, we will have to do it in the next steps.","title":"Multistage build"},{"location":"ch1-discover-docker-tp/#backend-simple-api","text":"We will deploy a Springboot application providing a simple API with a single greeting endpoint. Create your Springboot application on: Spring Initializer . Use the following config: Project: Maven Language: Java 21 Spring Boot: 3.4.2 Packaging: Jar Dependencies: Spring Web Generate the project and give it a simple GreetingController class: package fr.takima.training.simpleapi.controller ; import org.springframework.web.bind.annotation.* ; import java.util.concurrent.atomic.AtomicLong ; @RestController public class GreetingController { private static final String template = \"Hello, %s!\" ; private final AtomicLong counter = new AtomicLong (); @GetMapping ( \"/\" ) public Greeting greeting ( @RequestParam ( value = \"name\" , defaultValue = \"World\" ) String name ) { return new Greeting ( counter . incrementAndGet (), String . format ( template , name )); } record Greeting ( long id , String content ) {} } You can now build and start your application, of course you will need maven and a jdk-21. How convenient would it be to have a virtual container to build and run our simplistic API? Oh wait, we have docker, here is how you could build and run your application with Docker: # Build stage FROM eclipse-temurin:21-jdk-alpine AS myapp-build ENV MYAPP_HOME = /opt/myapp WORKDIR $MYAPP_HOME RUN apk add --no-cache maven COPY pom.xml . COPY src ./src RUN mvn package -DskipTests # Run stage FROM eclipse-temurin:21-jre-alpine ENV MYAPP_HOME = /opt/myapp WORKDIR $MYAPP_HOME COPY --from = myapp-build $MYAPP_HOME /target/*.jar $MYAPP_HOME /myapp.jar ENTRYPOINT [ \"java\" , \"-jar\" , \"myapp.jar\" ] Question 1-4 Why do we need a multistage build? And explain each step of this dockerfile. Check A working Springboot application with a simple HelloWorld endpoint. Did you notice that maven downloads all libraries on every image build? You can contribute to saving the planet caching libraries when maven pom file has not been changed by running the goal: mvn dependency:go-offline .","title":"Backend simple api"},{"location":"ch1-discover-docker-tp/#backend-api_1","text":"Let\u2019s now build and run the backend API connected to the database. You can get the zipped source code here: simple-api . You can replace only your src directory and the pom.xml file with the ones available in the repository. Adjust the configuration in simple-api/src/main/resources/application.yml (this is the application configuration). How to access the database container from your backend application? Use the deprecated --link or create a docker network . Once everything is properly bound, you should be able to access your application API, for example on: /departments/IRC/students . [ { \"id\" : 1 , \"firstname\" : \"Eli\" , \"lastname\" : \"Copter\" , \"department\" : { \"id\" : 1 , \"name\" : \"IRC\" } } ] Explore your API other endpoints, have a look at the controllers in the source code. Check A simple web API on top of your database.","title":"Backend API"},{"location":"ch1-discover-docker-tp/#http-server","text":"","title":"Http server"},{"location":"ch1-discover-docker-tp/#basics_2","text":"","title":"Basics"},{"location":"ch1-discover-docker-tp/#choose-an-appropriate-base-image","text":"Create a simple landing page: index.html and put it inside your container. It should be enough for now, start your container and check that everything is working as expected. Here are commands that you may want to try to do so: docker stats docker inspect docker logs Link Httpd Getting Started","title":"Choose an appropriate base image."},{"location":"ch1-discover-docker-tp/#configuration","text":"You are using the default apache configuration, and it will be enough for now, you use yours by copying it in your image. Use docker exec to retrieve this default configuration from your running container /usr/local/apache2/conf/httpd.conf . Note You can also use docker cp .","title":"Configuration"},{"location":"ch1-discover-docker-tp/#reverse-proxy","text":"We will configure the http server as a simple reverse proxy server in front of our application, this server could be used to deliver a front-end application, to configure SSL or to handle load balancing. So this can be quite useful even though in our case we will keep things simple. Here is the documentation: Reverse Proxy . Add the following to the configuration, and you should be all set: <VirtualHost *:80> ProxyPreserveHost On ProxyPass / http://YOUR_BACKEND_LINK:8080/ ProxyPassReverse / http://YOUR_BACKEND_LINK:8080/ </VirtualHost> LoadModule proxy_module modules/mod_proxy.so LoadModule proxy_http_module modules/mod_proxy_http.so Question 1-5 Why do we need a reverse proxy? Check Checkpoint: a working application through a reverse proxy.","title":"Reverse proxy"},{"location":"ch1-discover-docker-tp/#link-application","text":"","title":"Link application"},{"location":"ch1-discover-docker-tp/#docker-compose","text":"1- Install docker-compose if the docker compose command does not work . You may have noticed that this can be quite painful to orchestrate manually the start, stop and rebuild of our containers. Thankfully, a useful tool called docker-compose comes in handy in those situations. 2- Let\u2019s create a docker-compose.yml file with the following structure to define and drive our containers: services : backend : build : #TODO networks : #TODO depends_on : #TODO database : build : #TODO networks : #TODO httpd : build : #TODO ports : #TODO networks : #TODO depends_on : #TODO networks : #TODO volumes : #TODO The docker-compose will handle the three containers for us. The file above is a basic example of structure, you need to add more parameters and think about the cleanest and most optimized approach like you would do in a company (for example: env variables, volumes, restart policies and processes segregation). Once your containers are orchestrated as services by docker-compose you should have a perfectly running application, make sure you can access your API on localhost . Note The ports of both your backend and database should not be opened to your host machine. Question 1-6 Why is docker-compose so important? Question 1-7 Document docker-compose most important commands. Question 1-8 Document your docker-compose file. Check A working 3-tier application running with docker-compose.","title":"Docker-compose"},{"location":"ch1-discover-docker-tp/#publish","text":"Your docker images are stored locally, let\u2019s publish them, so they can be used by other team members or on other machines. You will need a Docker Hub account. 1- Connect to your freshly created account with docker login . 2- Tag your image. For now, we have been only using the latest tag, now that we want to publish it, let\u2019s add some meaningful version information to our images. docker tag my-database USERNAME/my-database:1.0 3- Then push your image to dockerhub: docker push USERNAME/my-database:1.0 Dockerhub is not the only docker image registry, and you can also self-host your images (this is obviously the choice of most companies). Once you publish your images to dockerhub, you will see them in your account: having some documentation for your image would be quite useful if you want to use those later. Question 1-9 Document your publication commands and published images in dockerhub. Question 1-10 Why do we put our images into an online repo? \u00a9 Takima 2025","title":"Publish"},{"location":"ch2-discover-github-actions-td/","text":"Discover Github Note Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information Setup Prerequisites Even if it seems pretty usual to use Git in the development world, not every project is managed with this tool. The main goal here is to have you create and set up a Github account before using it for further purposes. Git will be required as well as it is a must have. You might want to start with Sign up to Github First step is (if not already done) to sign up to Github with your school mail address and fill the required information. We recommend you to use an individual free plan for the next steps of this project. You can eventually fill the last page but it\u2019s not really important. Select \u201cComplete setup\u201d. There you are, your (probably not first) Github account is set up. Yay ! Now, let\u2019s move on to the next step ! Project forking and publishing For this part, we are going to fork the project that will be used for the rest of the lesson (I mean, till the end of the week). Now you own the project under your Github workspace, you can basically do whatever you want on this project. However we recommend not to modify the whole java and maven content if you still want this project to compile. First of all, make sure the git CLI is installed on your computer and then clone the project on your computer to be able to modify it locally. Securing Github access There are actually two different ways of cloning and publishing a project. By default, Github will propose you to clone by HTTPS link. Copy to clipboard, then open a new terminal and enter : $ git clone <project_url_with_https> Git will probably ask you to authenticate in order to be able to clone the repository. It will ask you the same thing every time you want to publish your work on a branch. This might be painful and you don\u2019t want to do this. The second option is \u201cuse SSH\u201d and the link starts with \u201cgit@github.com:\u2026\u201d, but there is a prerequisite to use this solution, you\u2019ll need to create an SSH key and have it added to your account. Fine, then tape: $ ssh-keygen -t rsa -b 4096 -f ~/.ssh/ { theNameOfYourKeyPair } It will ask you to enter and confirm a passphrase, this is for security purposes but we will let it empty for this course. Well done, you\u2019ve generated a new RSA key pair of 4096 bits size. If you do \u201cls ~/.ssh\u201d you\u2019ll see new files inside your folder, one is named theNameOfYourKeyPair and the other one theNameOfYourKeyPair.pub. The first one is your private key to NEVER communicate to anyone and the second one is you public key. Let\u2019s take a look to this last one, enter \u201ccat ~/.ssh/theNameOfYourKeyPair.pub\u201d: Something like this will appear on you terminal, this is the content of your public key that you will communicate to Github. Copy the whole content and past it to you Github account under \u201cSettings\u201d and \u201cSSH and GPG keys\u201d. Click on New SSH key and paste the content of your public key. Give it a name and validate the operation. Now try to clone the repository again with the git@ prefix. It will ask you to select a key pair to perform the action. Take the one you\u2019ve just indicated to Github and press enter. Now you are able to clone and publish work on your Github repository without entering a password every time, I hope you enjoy this. Let\u2019s publish Open the project inside your favorite IDE (I hope it\u2019s IntelliJ) and open the file README.md. Modify this file entering, for example \u201cThis project is now mine\u201d. Save it and check that Git has correctly seen you changes $ git status You\u2019ll see you file colored in red. This means that Git has seen you\u2019ve made some modifications to this file, but it will not take them into account once you will publish them. Then ask git to add them to your work. $ git add . Actually, we did not ask him to add our file, but to add any modification made to any file inside our working directory. Now if you enter \u201cgit status\u201d again you\u2019ll see that your file is colored in green. You work will be taken into account, hopefully. Let\u2019s commit this work: $ git commit -m \u201cThe message of your commit\u201d Now if you try to \u201cgit status\u201d again you\u2019ll see that your workspace is \u201cclean\u201d. Git created a new reference with all the changes you\u2019ve made. If you go on and enter: $ git log You\u2019ll see the message of you last commit on top of the references. However you cannot see the changes on the Github website because we did not publish yet our work. Let\u2019s do it ! $ git push origin master This command literally means \u201cI want to publish my work on the distant/remote branch master\u201d. And now you can see that your work is published online ! Big up guys ! Configure your repository Git is one of the most useful tool you\u2019ll find in your developer life. Almost everybody uses it and most of the time you\u2019ll have to work with other people on project using Github. However you\u2019ll find many people that use it wrongly, and many people that will create things you don\u2019t want to merge in you production branch. Let\u2019s secure a bit our labor to prevent any fool to throw it away. Go back to your project on the Github webpage and click on settings. Go to Branches and you\u2019ll see that your default branch is master. Fine, it means that every time you connect on your repository, this branch will be displayed. Just under this indication, you\u2019ll see a Branch protection rule. Try to add one. You\u2019ll see a bunch of options, most of them are very useful working in team (especially asking for pull request and review before merging inside master branch). You can also select options to block push force (when someone does push -f) because it doesn\u2019t take care of Git warning messages that usually prevent you from pushing. As you are working alone on this project we will only add the name \u201cmaster\u201d to the naming pattern and let the rest as it is. It will only prevent you from doing bad things on you master branch. Finally, be aware that all the work you do on Github is public by default. Therefore you should or you must NEVER publish any password on your repository. Thankfully you can turn your repository to private from the options and there are Environment Variables that you can set and secure (I mean encrypt) inside your Github repository under Secrets. Git basic commands Clone a project $ git clone <url_of_the_project> Fetch distant modifications without merging them into your branch $ git fetch -p Fetch distant modifications and merge them into you branch $ git pull Add your changes to the workspace $ git add . Commit your changes $ git commit -m \u201cYour message\u201d Publish your changes $ git push origin <name_of_the_remote_branch>","title":"TD part 02 - Github Actions"},{"location":"ch2-discover-github-actions-td/#discover-github","text":"Note Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information","title":"Discover Github"},{"location":"ch2-discover-github-actions-td/#setup","text":"","title":"Setup"},{"location":"ch2-discover-github-actions-td/#prerequisites","text":"Even if it seems pretty usual to use Git in the development world, not every project is managed with this tool. The main goal here is to have you create and set up a Github account before using it for further purposes. Git will be required as well as it is a must have. You might want to start with","title":"Prerequisites"},{"location":"ch2-discover-github-actions-td/#sign-up-to-github","text":"First step is (if not already done) to sign up to Github with your school mail address and fill the required information. We recommend you to use an individual free plan for the next steps of this project. You can eventually fill the last page but it\u2019s not really important. Select \u201cComplete setup\u201d. There you are, your (probably not first) Github account is set up. Yay ! Now, let\u2019s move on to the next step !","title":"Sign up to Github"},{"location":"ch2-discover-github-actions-td/#project-forking-and-publishing","text":"For this part, we are going to fork the project that will be used for the rest of the lesson (I mean, till the end of the week). Now you own the project under your Github workspace, you can basically do whatever you want on this project. However we recommend not to modify the whole java and maven content if you still want this project to compile. First of all, make sure the git CLI is installed on your computer and then clone the project on your computer to be able to modify it locally.","title":"Project forking and publishing"},{"location":"ch2-discover-github-actions-td/#securing-github-access","text":"There are actually two different ways of cloning and publishing a project. By default, Github will propose you to clone by HTTPS link. Copy to clipboard, then open a new terminal and enter : $ git clone <project_url_with_https> Git will probably ask you to authenticate in order to be able to clone the repository. It will ask you the same thing every time you want to publish your work on a branch. This might be painful and you don\u2019t want to do this. The second option is \u201cuse SSH\u201d and the link starts with \u201cgit@github.com:\u2026\u201d, but there is a prerequisite to use this solution, you\u2019ll need to create an SSH key and have it added to your account. Fine, then tape: $ ssh-keygen -t rsa -b 4096 -f ~/.ssh/ { theNameOfYourKeyPair } It will ask you to enter and confirm a passphrase, this is for security purposes but we will let it empty for this course. Well done, you\u2019ve generated a new RSA key pair of 4096 bits size. If you do \u201cls ~/.ssh\u201d you\u2019ll see new files inside your folder, one is named theNameOfYourKeyPair and the other one theNameOfYourKeyPair.pub. The first one is your private key to NEVER communicate to anyone and the second one is you public key. Let\u2019s take a look to this last one, enter \u201ccat ~/.ssh/theNameOfYourKeyPair.pub\u201d: Something like this will appear on you terminal, this is the content of your public key that you will communicate to Github. Copy the whole content and past it to you Github account under \u201cSettings\u201d and \u201cSSH and GPG keys\u201d. Click on New SSH key and paste the content of your public key. Give it a name and validate the operation. Now try to clone the repository again with the git@ prefix. It will ask you to select a key pair to perform the action. Take the one you\u2019ve just indicated to Github and press enter. Now you are able to clone and publish work on your Github repository without entering a password every time, I hope you enjoy this.","title":"Securing Github access"},{"location":"ch2-discover-github-actions-td/#lets-publish","text":"Open the project inside your favorite IDE (I hope it\u2019s IntelliJ) and open the file README.md. Modify this file entering, for example \u201cThis project is now mine\u201d. Save it and check that Git has correctly seen you changes $ git status You\u2019ll see you file colored in red. This means that Git has seen you\u2019ve made some modifications to this file, but it will not take them into account once you will publish them. Then ask git to add them to your work. $ git add . Actually, we did not ask him to add our file, but to add any modification made to any file inside our working directory. Now if you enter \u201cgit status\u201d again you\u2019ll see that your file is colored in green. You work will be taken into account, hopefully. Let\u2019s commit this work: $ git commit -m \u201cThe message of your commit\u201d Now if you try to \u201cgit status\u201d again you\u2019ll see that your workspace is \u201cclean\u201d. Git created a new reference with all the changes you\u2019ve made. If you go on and enter: $ git log You\u2019ll see the message of you last commit on top of the references. However you cannot see the changes on the Github website because we did not publish yet our work. Let\u2019s do it ! $ git push origin master This command literally means \u201cI want to publish my work on the distant/remote branch master\u201d. And now you can see that your work is published online ! Big up guys !","title":"Let\u2019s publish"},{"location":"ch2-discover-github-actions-td/#configure-your-repository","text":"Git is one of the most useful tool you\u2019ll find in your developer life. Almost everybody uses it and most of the time you\u2019ll have to work with other people on project using Github. However you\u2019ll find many people that use it wrongly, and many people that will create things you don\u2019t want to merge in you production branch. Let\u2019s secure a bit our labor to prevent any fool to throw it away. Go back to your project on the Github webpage and click on settings. Go to Branches and you\u2019ll see that your default branch is master. Fine, it means that every time you connect on your repository, this branch will be displayed. Just under this indication, you\u2019ll see a Branch protection rule. Try to add one. You\u2019ll see a bunch of options, most of them are very useful working in team (especially asking for pull request and review before merging inside master branch). You can also select options to block push force (when someone does push -f) because it doesn\u2019t take care of Git warning messages that usually prevent you from pushing. As you are working alone on this project we will only add the name \u201cmaster\u201d to the naming pattern and let the rest as it is. It will only prevent you from doing bad things on you master branch. Finally, be aware that all the work you do on Github is public by default. Therefore you should or you must NEVER publish any password on your repository. Thankfully you can turn your repository to private from the options and there are Environment Variables that you can set and secure (I mean encrypt) inside your Github repository under Secrets.","title":"Configure your repository"},{"location":"ch2-discover-github-actions-td/#git-basic-commands","text":"Clone a project $ git clone <url_of_the_project> Fetch distant modifications without merging them into your branch $ git fetch -p Fetch distant modifications and merge them into you branch $ git pull Add your changes to the workspace $ git add . Commit your changes $ git commit -m \u201cYour message\u201d Publish your changes $ git push origin <name_of_the_remote_branch>","title":"Git basic commands"},{"location":"ch2-discover-github-actions-tp/","text":"Discover Github Action Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information Goals Good Practice Do not forget to document what you do along the steps. Create an appropriate file structure, 1 folder per image. Target Application Complete pipeline workflow for testing and delivering your software application. We are going to use different useful tools to build your application, test it automatically, and check the code quality at the same time. Link GitHub Actions Setup GitHub Actions The first tool we are going to use is GitHub Actions . GitHub Actions is an online service that allows you to build pipelines to test your application. Keep in mind that GitHub Actions is not the only one on the market to build integration pipelines. Historically many companies were using Jenkins (and still a lot continue to do it), it is way less accessible than GitHub Actions but much more configurable. You will also hear about Gitlab CI and Bitbucket Pipelines during your work life. First steps into the CI World Note Use your repository from the end of the Docker TP Most of the CI services use a yaml file (except Jenkins that uses a\u2026 Groovy file\u2026) to describe the expected steps to be done over the pipeline execution. Go on and create your first main.yml file into your project\u2019s root directory. Build and test your Application For those who are not familiar with Maven and Java project structures, here is the command for building and running your tests: mvn clean verify You need to launch this command from your pom.xml directory, or specify the path to it with --file /path/to/pom.xml argument. Note What is it supposed to do? This command will actually clear your previous builds inside your cache (otherwise your can have unexpected behavior because maven did not build again each part of your application), then it will freshly build each module inside your application, and finally it will run both Unit Tests and Integration Tests (sometime called Component Tests as well). Note Unit tests? Component tests? Integration tests require a database to verify you correctly inserted or retrieved data from it. Fortunately for you, we\u2019ve already taken care of this! But you still need to understand how it works under the hood. Take a look at your application file tree. Let\u2019s take a look at the pom.xml that is inside the simple-api , you will find some very helpful dependencies for your testing. <dependencies> <dependency> <groupId> org.testcontainers </groupId> <artifactId> testcontainers </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> <dependency> <groupId> org.testcontainers </groupId> <artifactId> jdbc </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> <dependency> <groupId> org.testcontainers </groupId> <artifactId> postgresql </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> </dependencies> As you can see, there are a bunch of testcontainers dependencies inside the pom. Question 2-1 What are testcontainers? They simply are java libraries that allow you to run a bunch of docker containers while testing. Here we use the postgresql container to attach to our application while testing. If you run the command mvn clean verify you\u2019ll be able to see the following: As you can see, a docker container has been launched while your tests were running, pretty convenient, isn\u2019t it? Finally, you\u2019ll see your test results. Now, it is up to you! Create your first CI, asking to build and test your application every time someone commits and pushes code on the repository. First you create a .github/workflows directory in your repository on GitHub. Put your main.yml inside workflows. The main.yml holds the architecture of your pipeline. Each job will represent a step of what you want to do. Each job will be run in parallel unless a link is specified. Here is what your main.yml should look like: name : CI devops 2025 on : #to begin you want to launch this job in main and develop push : branches : #TODO pull_request : jobs : test-backend : runs-on : ubuntu-24.04 steps : #checkout your github code using actions/checkout@v4 - uses : actions/checkout@v4 #do the same with another action (actions/setup-java@v4) that enable to setup jdk 21 - name : Set up JDK 21 #TODO #finally build your app with the latest command - name : Build and test with Maven run : #TODO It\u2019s your turn, fill the #TODOs! To see the result you must follow the next steps: And if it\u2019s GREEN you win! Check First CI with backend test ! Question 2-2 Document your Github Actions configurations. First steps into the CD World Here we are going to configure the Continuous Delivery of our project. Therefore, the main goal will be to create and save a docker image containing our application on the Docker Hub every time there is a commit on a main branch. As you probably already noticed, you need to log in to docker hub to perform any publication. However, you don\u2019t want to publish your credentials on a public repository (it is not even a good practise to do it on a private repository). Fortunately, GitHub allows you to create secured environment variables. 1- Add your docker hub credentials to the environment variables in GitHub Actions (and let them secured). Note Secured Variables, why? Now that you added them, you can freely declare them and use them inside your GitHub Actions pipeline. 2- Build your docker images inside your GitHub Actions pipeline. Maybe the template Build a docker image can help you! For now, we only need to build the images # define job to build and publish docker image build-and-push-docker-image : needs : test-backend # run only when code is compiling and tests are passing runs-on : ubuntu-24.04 # steps to perform in job steps : - name : Checkout code uses : actions/checkout@v4 - name : Build image and push backend uses : docker/build-push-action@v6 with : # relative path to the place where source code with Dockerfile is located context : # Note: tags has to be all lower-case tags : ${{secrets.DOCKERHUB_USERNAME}}/tp-devops-simple-api:latest - name : Build image and push database # DO the same for database - name : Build image and push httpd # DO the same for httpd Note Why did we put needs: build-and-test-backend on this job? Maybe try without this and you will see! OK your images are built but not yet published on dockerhub . 3- Publish your docker images when there is a commit on the main branch. Don\u2019t forget to do a docker login and to put your credentials on secrets! - name : Login to DockerHub run : echo \"${{ secrets.DOCKERHUB_TOKEN }}\" | docker login --username ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin And after modify job Build image and push backend to add a push action: - name : Build image and push backend uses : docker/build-push-action@v6 with : # relative path to the place where source code with Dockerfile is located context : # Note: tags has to be all lower-case tags : ${{secrets.DOCKERHUB_USERNAME}}/tp-devops-simple-api:latest # build on feature branches, push only on main branch push : ${{ github.ref == 'refs/heads/main' }} Do the same for other containers. Question 2-3 For what purpose do we need to push docker images? Now you should be able to find your docker images on your docker repository. Check Working CI & Docker images pushed to your repository. Setup Quality Gate What is quality about? Quality is here to make sure your code will be maintainable and determine every unsecured block. It helps you produce better and tested features, and it will also prevent having dirty code pushed inside your main branch. For this purpose, we are going to use SonarCloud , a cloud solution that makes analysis and reports of your code. This is a useful tool that everyone should use in order to learn java best practices. Register to SonarCloud Create your free-tier account on SonarCloud . SonarCloud will propose you to set up your GitHub Actions pipeline from the GitHub Actions , but forget about that, there is a much better way to save the SonarCloud provided and provide it into your main.yml . 1- You must create an organization. 2- And keep the project key and the organization key you will need it later. 3- You need to add this script to your main.yml for launch sonar at each commit. Set up your pipeline to use SonarCloud analysis while testing. For that, you need to add a new step after Build and test with Maven and change sonar organization and project key. mvn -B verify sonar:sonar -Dsonar.projectKey = <your-project-key> -Dsonar.organization = <your-organization> -Dsonar.host.url = https://sonarcloud.io -Dsonar.login = ${ { secrets.SONAR_TOKEN } } --file ./simple-api/pom.xml If you did your configuration correctly, you should be able to see the SonarCloud analysis report online: Check Working quality gate. Question 2-4 Document your quality gate configuration. Well done buddies, you\u2019ve created your very first Quality Gate! Yay! Going further: Split pipelines In this step you have to separate your jobs into different workflows so that they respect 2 things: test-backend must be launched on develop and master branch and build-and-push-docker-image on master only. The job that pushes the docker api image must be launched only if test-backend is passed. Tip You can use on: workflow_run to trigger a workflow when another workflow is passed. \u00a9 Takima 2025","title":"TP part 02 - Github Actions"},{"location":"ch2-discover-github-actions-tp/#discover-github-action","text":"Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information","title":"Discover Github Action"},{"location":"ch2-discover-github-actions-tp/#goals","text":"","title":"Goals"},{"location":"ch2-discover-github-actions-tp/#good-practice","text":"Do not forget to document what you do along the steps. Create an appropriate file structure, 1 folder per image.","title":"Good Practice"},{"location":"ch2-discover-github-actions-tp/#target-application","text":"Complete pipeline workflow for testing and delivering your software application. We are going to use different useful tools to build your application, test it automatically, and check the code quality at the same time. Link GitHub Actions","title":"Target Application"},{"location":"ch2-discover-github-actions-tp/#setup-github-actions","text":"The first tool we are going to use is GitHub Actions . GitHub Actions is an online service that allows you to build pipelines to test your application. Keep in mind that GitHub Actions is not the only one on the market to build integration pipelines. Historically many companies were using Jenkins (and still a lot continue to do it), it is way less accessible than GitHub Actions but much more configurable. You will also hear about Gitlab CI and Bitbucket Pipelines during your work life.","title":"Setup GitHub Actions"},{"location":"ch2-discover-github-actions-tp/#first-steps-into-the-ci-world","text":"Note Use your repository from the end of the Docker TP Most of the CI services use a yaml file (except Jenkins that uses a\u2026 Groovy file\u2026) to describe the expected steps to be done over the pipeline execution. Go on and create your first main.yml file into your project\u2019s root directory.","title":"First steps into the CI World"},{"location":"ch2-discover-github-actions-tp/#build-and-test-your-application","text":"For those who are not familiar with Maven and Java project structures, here is the command for building and running your tests: mvn clean verify You need to launch this command from your pom.xml directory, or specify the path to it with --file /path/to/pom.xml argument. Note What is it supposed to do? This command will actually clear your previous builds inside your cache (otherwise your can have unexpected behavior because maven did not build again each part of your application), then it will freshly build each module inside your application, and finally it will run both Unit Tests and Integration Tests (sometime called Component Tests as well). Note Unit tests? Component tests? Integration tests require a database to verify you correctly inserted or retrieved data from it. Fortunately for you, we\u2019ve already taken care of this! But you still need to understand how it works under the hood. Take a look at your application file tree. Let\u2019s take a look at the pom.xml that is inside the simple-api , you will find some very helpful dependencies for your testing. <dependencies> <dependency> <groupId> org.testcontainers </groupId> <artifactId> testcontainers </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> <dependency> <groupId> org.testcontainers </groupId> <artifactId> jdbc </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> <dependency> <groupId> org.testcontainers </groupId> <artifactId> postgresql </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> </dependencies> As you can see, there are a bunch of testcontainers dependencies inside the pom. Question 2-1 What are testcontainers? They simply are java libraries that allow you to run a bunch of docker containers while testing. Here we use the postgresql container to attach to our application while testing. If you run the command mvn clean verify you\u2019ll be able to see the following: As you can see, a docker container has been launched while your tests were running, pretty convenient, isn\u2019t it? Finally, you\u2019ll see your test results. Now, it is up to you! Create your first CI, asking to build and test your application every time someone commits and pushes code on the repository. First you create a .github/workflows directory in your repository on GitHub. Put your main.yml inside workflows. The main.yml holds the architecture of your pipeline. Each job will represent a step of what you want to do. Each job will be run in parallel unless a link is specified. Here is what your main.yml should look like: name : CI devops 2025 on : #to begin you want to launch this job in main and develop push : branches : #TODO pull_request : jobs : test-backend : runs-on : ubuntu-24.04 steps : #checkout your github code using actions/checkout@v4 - uses : actions/checkout@v4 #do the same with another action (actions/setup-java@v4) that enable to setup jdk 21 - name : Set up JDK 21 #TODO #finally build your app with the latest command - name : Build and test with Maven run : #TODO It\u2019s your turn, fill the #TODOs! To see the result you must follow the next steps: And if it\u2019s GREEN you win! Check First CI with backend test ! Question 2-2 Document your Github Actions configurations.","title":"Build and test your Application"},{"location":"ch2-discover-github-actions-tp/#first-steps-into-the-cd-world","text":"Here we are going to configure the Continuous Delivery of our project. Therefore, the main goal will be to create and save a docker image containing our application on the Docker Hub every time there is a commit on a main branch. As you probably already noticed, you need to log in to docker hub to perform any publication. However, you don\u2019t want to publish your credentials on a public repository (it is not even a good practise to do it on a private repository). Fortunately, GitHub allows you to create secured environment variables. 1- Add your docker hub credentials to the environment variables in GitHub Actions (and let them secured). Note Secured Variables, why? Now that you added them, you can freely declare them and use them inside your GitHub Actions pipeline. 2- Build your docker images inside your GitHub Actions pipeline. Maybe the template Build a docker image can help you! For now, we only need to build the images # define job to build and publish docker image build-and-push-docker-image : needs : test-backend # run only when code is compiling and tests are passing runs-on : ubuntu-24.04 # steps to perform in job steps : - name : Checkout code uses : actions/checkout@v4 - name : Build image and push backend uses : docker/build-push-action@v6 with : # relative path to the place where source code with Dockerfile is located context : # Note: tags has to be all lower-case tags : ${{secrets.DOCKERHUB_USERNAME}}/tp-devops-simple-api:latest - name : Build image and push database # DO the same for database - name : Build image and push httpd # DO the same for httpd Note Why did we put needs: build-and-test-backend on this job? Maybe try without this and you will see! OK your images are built but not yet published on dockerhub . 3- Publish your docker images when there is a commit on the main branch. Don\u2019t forget to do a docker login and to put your credentials on secrets! - name : Login to DockerHub run : echo \"${{ secrets.DOCKERHUB_TOKEN }}\" | docker login --username ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin And after modify job Build image and push backend to add a push action: - name : Build image and push backend uses : docker/build-push-action@v6 with : # relative path to the place where source code with Dockerfile is located context : # Note: tags has to be all lower-case tags : ${{secrets.DOCKERHUB_USERNAME}}/tp-devops-simple-api:latest # build on feature branches, push only on main branch push : ${{ github.ref == 'refs/heads/main' }} Do the same for other containers. Question 2-3 For what purpose do we need to push docker images? Now you should be able to find your docker images on your docker repository. Check Working CI & Docker images pushed to your repository.","title":"First steps into the CD World"},{"location":"ch2-discover-github-actions-tp/#setup-quality-gate","text":"","title":"Setup Quality Gate"},{"location":"ch2-discover-github-actions-tp/#what-is-quality-about","text":"Quality is here to make sure your code will be maintainable and determine every unsecured block. It helps you produce better and tested features, and it will also prevent having dirty code pushed inside your main branch. For this purpose, we are going to use SonarCloud , a cloud solution that makes analysis and reports of your code. This is a useful tool that everyone should use in order to learn java best practices.","title":"What is quality about?"},{"location":"ch2-discover-github-actions-tp/#register-to-sonarcloud","text":"Create your free-tier account on SonarCloud . SonarCloud will propose you to set up your GitHub Actions pipeline from the GitHub Actions , but forget about that, there is a much better way to save the SonarCloud provided and provide it into your main.yml . 1- You must create an organization. 2- And keep the project key and the organization key you will need it later. 3- You need to add this script to your main.yml for launch sonar at each commit. Set up your pipeline to use SonarCloud analysis while testing. For that, you need to add a new step after Build and test with Maven and change sonar organization and project key. mvn -B verify sonar:sonar -Dsonar.projectKey = <your-project-key> -Dsonar.organization = <your-organization> -Dsonar.host.url = https://sonarcloud.io -Dsonar.login = ${ { secrets.SONAR_TOKEN } } --file ./simple-api/pom.xml If you did your configuration correctly, you should be able to see the SonarCloud analysis report online: Check Working quality gate. Question 2-4 Document your quality gate configuration. Well done buddies, you\u2019ve created your very first Quality Gate! Yay!","title":"Register to SonarCloud"},{"location":"ch2-discover-github-actions-tp/#going-further-split-pipelines","text":"In this step you have to separate your jobs into different workflows so that they respect 2 things: test-backend must be launched on develop and master branch and build-and-push-docker-image on master only. The job that pushes the docker api image must be launched only if test-backend is passed. Tip You can use on: workflow_run to trigger a workflow when another workflow is passed. \u00a9 Takima 2025","title":"Going further: Split pipelines"},{"location":"cheatsheet/","text":"Cheatsheet Docker & docker-compose","title":"Cheatsheet"},{"location":"cheatsheet/#cheatsheet","text":"","title":"Cheatsheet"},{"location":"cheatsheet/#docker-docker-compose","text":"","title":"Docker & docker-compose"}]} \ No newline at end of file +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Devops in Action - Guide For each step you have a TD to discover the subject and a TP to put it into practice. The TPs follow each other and the goal is to make you start from a local application and get to an application delivered in production and accessible to all. For that we will give you each a server and a Java application. Part 1 - Docker session Docker TDs are available here Docker TPs are available here Docker slides are available here Part 2 - Github Action session Github Actions TDs are available here Github Actions TPs are available here Github Actions slides are availabe here Part 3 - Ansible session Ansible TDs are available here Ansible TPs are available here Ansible slides are available here Please read the indications carefully, most of the time what you need is in front of your eyes! \u00a9 Takima 2025","title":"Devops in Action - Guide"},{"location":"#devops-in-action-guide","text":"For each step you have a TD to discover the subject and a TP to put it into practice. The TPs follow each other and the goal is to make you start from a local application and get to an application delivered in production and accessible to all. For that we will give you each a server and a Java application. Part 1 - Docker session Docker TDs are available here Docker TPs are available here Docker slides are available here Part 2 - Github Action session Github Actions TDs are available here Github Actions TPs are available here Github Actions slides are availabe here Part 3 - Ansible session Ansible TDs are available here Ansible TPs are available here Ansible slides are available here Please read the indications carefully, most of the time what you need is in front of your eyes! \u00a9 Takima 2025","title":"Devops in Action - Guide"},{"location":"ch1-discover-docker-td/","text":"Discover Docker Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information Setup Prerequisites There are no specific skills needed for this tutorial beyond a basic comfort with the command line and using a text editor. Prior experience in developing web applications will be helpful but is not required. As you proceed further along the tutorial, we'll make use of https://cloud.docker.com/. Setting up your computer Getting all the tooling setup on your computer can be a daunting task, but getting Docker up and running on your favorite OS has become very easy. The getting started guide on Docker has detailed instructions for setting up Docker on Mac , Linux and Windows If you're using Docker for Windows make sure you have shared your drive. Important note If you're using an older version of Windows or MacOS you may need to use Docker Machine instead. All commands work in either bash or Powershell on Windows Once you are done installing Docker, test your Docker installation by running the following: docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from library/hello-world 03f4658f8b78: Pull complete a3ed95caeb02: Pull complete Digest: sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 Status: Downloaded newer image for hello-world:latest Hello from Docker. ... This message shows that your installation appears to be working correctly. Running your first container Now that you have everything setup, it's time to get our hands dirty. In this section, you are going to run an Alpine Linux container (a lightweight linux distribution) on your system and get a taste of the docker run command. To get started, let's run the following in our terminal: docker pull alpine Note Depending on how you've installed docker on your system, you might see a permission denied error after running the above command. Try the commands from the Getting Started tutorial to verify your installation . If you're on Linux, you may need to prefix your docker commands with sudo . Alternatively you can create a docker group to get rid of this issue. The pull command fetches the alpine image from the Docker registry and saves it in our system. You can use the docker images command to see a list of all images on your system. docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE alpine latest c51f86c28340 4 weeks ago 1 .109 MB hello-world latest 690ed74de00f 5 months ago 960 B 1.1 Docker Run Great! Let's now run a Docker container based on this image. To do that you are going to use the docker run command. docker run alpine ls -l total 48 drwxr-xr-x 2 root root 4096 Mar 2 16:20 bin drwxr-xr-x 5 root root 360 Mar 18 09:47 dev drwxr-xr-x 13 root root 4096 Mar 18 09:47 etc drwxr-xr-x 2 root root 4096 Mar 2 16:20 home drwxr-xr-x 5 root root 4096 Mar 2 16:20 lib ...... ...... What happened? Behind the scenes, a lot of stuff happened. When you call run : 1. The Docker client contacts the Docker daemon. The Docker daemon checks local store if the image (alpine in this case) is available locally, and if not, downloads it from Docker Store. (Since we have issued docker pull alpine before, the download step is not necessary) The Docker daemon creates the container and then runs a command in that container. The Docker daemon streams the output of the command to the Docker client When you run docker run alpine , you provided a command ( ls -l ), so Docker started the command specified and you saw the listing. Let's try something more exciting. docker run alpine echo \"hello from alpine\" hello from alpine OK, that's some actual output. In this case, the Docker client dutifully ran the echo command in our alpine container and then exited it. If you've noticed, all of that happened pretty quickly. Imagine booting up a virtual machine, running a command and then killing it. Now you know why they say containers are fast! Try another command. docker run alpine /bin/sh Wait, nothing happened! Is that a bug? Well, no. These interactive shells will exit after running any scripted commands, unless they are run in an interactive terminal - so for this example to not exit, you need to docker run -it alpine /bin/sh . You are now inside the container shell and you can try out a few commands like ls -l , uname -a and others. Exit out of the container by giving the exit command. Ok, now it's time to see the docker ps command. The docker ps command shows you all containers that are currently running. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES Since no containers are running, you see a blank line. Let's try a more useful variant: docker ps -a docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 36171a5da744 alpine \"/bin/sh\" 5 minutes ago Exited ( 0 ) 2 minutes ago fervent_newton a6a9d46d0b2f alpine \"echo 'hello from alp\" 6 minutes ago Exited ( 0 ) 6 minutes ago lonely_kilby ff0a5c3750b9 alpine \"ls -l\" 8 minutes ago Exited ( 0 ) 8 minutes ago elated_ramanujan c317d0a9e3d2 hello-world \"/hello\" 34 seconds ago Exited ( 0 ) 12 minutes ago stupefied_mcclintock What you see above is a list of all containers that you ran. Notice that the STATUS column shows that these containers exited a few minutes ago. You're probably wondering if there is a way to run more than just one command in a container. Let's try that now: docker run -it alpine /bin/sh / # ls bin dev etc home lib linuxrc media mnt proc root run sbin sys tmp usr var / # uname -a Linux 97916e8cb5dc 4.4.27-moby #1 SMP Wed Oct 26 14:01:48 UTC 2016 x86_64 Linux Running the run command with the -it flags attaches us to an interactive tty in the container. Now you can run as many commands in the container as you want. Take some time to run your favorite commands. Tip run -it is a very useful command to debug at the lowest level a container. That concludes a whirlwind tour of the docker run command which would most likely be the command you'll use most often. It makes sense to spend some time getting comfortable with it. To find out more about run , use docker run --help to see a list of all flags it supports. As you proceed further, we'll see a few more variants of docker run. 1.2 Terminology In the last section, you saw a lot of Docker-specific jargon which might be confusing to some. So before you go further, let's clarify some terminology that is used frequently in the Docker ecosystem. Images - The file system and configuration of our application which are used to create containers. To find out more about a Docker image, run docker inspect alpine . In the demo above, you used the docker pull command to download the alpine image. When you executed the command docker run hello-world , it also did a docker pull behind the scenes to download the hello-world image. Containers - Running instances of Docker images \u2014 containers run the actual applications. A container includes an application and all of its dependencies. It shares the kernel with other containers, and runs as an isolated process in user space on the host OS. You created a container using docker run which you did using the alpine image that you downloaded. A list of running containers can be seen using the docker ps command. Docker daemon - The background service running on the host that manages building, running and distributing Docker containers. Docker client - The command line tool that allows the user to interact with the Docker daemon. Docker Store - A registry of Docker images, where you can find trusted and enterprise ready containers, plugins, and Docker editions. You'll be using this later in this tutorial. 2.0 Webapps with Docker Great! So you have now looked at docker run , played with a Docker container and also got the hang of some terminology. Armed with all this knowledge, you are now ready to get to the real stuff \u2014 deploying web applications with Docker. 2.1 Run a static website in a container Note Code for this section is in this repo in the static-site directory Let's start by taking baby-steps. First, we'll use Docker to run a static website in a container. The website is based on an existing image. We'll pull a Docker image from Docker Store, run the container, and see how easy it is to set up a web server. The image that you are going to use is a single-page website that was already created for this demo and is available on the Docker Store as dockersamples/static-site . You can download and run the image directly in one go using docker run as follows. docker run -d dockersamples/static-site Note The current version of this image doesn't run without the -d flag. The -d flag enables detached mode, which detaches the running container from the terminal/shell and returns your prompt after the container starts. We are debugging the problem with this image but for now, use -d even for this first example. Tip -d is a very useful option. So, what happens when you run this command? Since the image doesn't exist on your Docker host, the Docker daemon first fetches it from the registry and then runs it as a container. Now that the server is running, do you see the website? What port is it running on? And more importantly, how do you access the container directly from our host machine? Actually, you probably won't be able to answer any of these questions yet! \u263a In this case, the client didn't tell the Docker Engine to publish any of the ports, so you need to re-run the docker run command to add this instruction. Let's re-run the command with some new flags to publish ports and pass your name to the container to customize the message displayed. We'll use the -d option again to run the container in detached mode. First, stop the container that you have just launched. In order to do this, we need the container ID. Since we ran the container in detached mode, we don't have to launch another terminal to do this. Run docker ps to view the running containers. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a7a0e504ca3e dockersamples/static-site \"/bin/sh -c 'cd /usr/\" 28 seconds ago Up 26 seconds 80 /tcp, 443 /tcp stupefied_mahavira Check out the CONTAINER ID column. You will need to use this CONTAINER ID value, a long sequence of characters, to identify the container you want to stop, and then to remove it. The example below provides the CONTAINER ID on our system; you should use the value that you see in your terminal. docker stop a7a0e504ca3e docker rm a7a0e504ca3e Note A cool feature is that you do not need to specify the entire CONTAINER ID . You can just specify a few starting characters and if it is unique among all the containers that you have launched, the Docker client will intelligently pick it up. Now, let's launch a container in detached mode as shown below: docker run --name static-site -e AUTHOR = \"Enter Your Name Here\" -d -P dockersamples/static-site e61d12292d69556eabe2a44c16cbd54486b2527e2ce4f95438e504afb7b02810 In the above command: -d will create a container with the process detached from our terminal -P will publish all the exposed container ports to random ports on the Docker host -e is how you pass environment variables to the container. --name allows you to specify a container name AUTHOR is the environment variable name and Your Name is the value that you can pass. Now you can see the ports by running the docker port command. docker port static-site 443 /tcp -> 0 .0.0.0:32772 80 /tcp -> 0 .0.0.0:32773 You can open your freshly created website on http://localhost:[YOUR_PORT_FOR 80/tcp] . For our example this is http://localhost:32773 . You can now open http://localhost:[YOUR_PORT_FOR 80/tcp] to see your site live! For our example, this is: http://192.168.99.100:32773 . You can also run a second webserver at the same time, specifying a custom host port mapping to the container's webserver. docker run --name static-site-2 -e AUTHOR = \"Enter Your Name Here\" -d -p 8888 :80 dockersamples/static-site To deploy this on a real server you would just need to install Docker, and run the above docker command (as in this case you can see the AUTHOR is Docker which we passed as an environment variable). Now that you've seen how to run a webserver inside a Docker container, how do you create your own Docker image? This is the question we'll explore in the next section. But first, let's stop and remove the containers since you won't be using them anymore. docker stop static-site docker rm static-site Let's use a shortcut to remove the second site: docker rm -f static-site-2 Tip rm -f is a very useful option Run docker ps to make sure the containers are gone. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 2.2 Docker Images In this section, let's dive deeper into what Docker images are. You will build your own image, use that image to run an application locally, and finally, push some of your own images to Docker Cloud. Docker images are the basis of containers. In the previous example, you pulled the dockersamples/static-site image from the. registry and asked the Docker client to run a container based on that image. To see the list of images that are available locally on your system, run the docker images command. docker images REPOSITORY TAG IMAGE ID CREATED SIZE dockersamples/static-site latest 92a386b6e686 2 hours ago 190 .5 MB nginx latest af4b3d7d5401 3 hours ago 190 .5 MB python 2 .7 1c32174fd534 14 hours ago 676 .8 MB postgres 9 .4 88d845ac7a88 14 hours ago 263 .6 MB containous/traefik latest 27b4e0c6b2fd 4 days ago 20 .75 MB node 0 .10 42426a5cba5f 6 days ago 633 .7 MB redis latest 4f5f397d4b7c 7 days ago 177 .5 MB mongo latest 467eb21035a8 7 days ago 309 .7 MB alpine 3 .3 70c557e50ed6 8 days ago 4 .794 MB java 7 21f6ce84e43c 8 days ago 587 .7 MB Above is a list of images that I've pulled from the registry and those I've created myself (we'll shortly see how). You will have a different list of images on your machine. The TAG refers to a particular snapshot of the image and the ID is the corresponding unique identifier for that image. For simplicity, you can think of an image akin to a git repository - images can be committed with changes and have multiple. versions. When you do not provide a specific version number, the client defaults to latest. For example you could pull a specific version of ubuntu image as follows: docker pull ubuntu:12.04 If you do not specify the version number of the image then, as mentioned, the Docker client will default to a version named latest . So for example, the docker pull command given below will pull an image named ubuntu:latest : docker pull ubuntu To get a new Docker image you can either get it from a registry (such as the Docker Store) or create your own. There are hundreds of thousands of images available on Docker Store . You can also search for images directly from the command line using docker search . An important distinction with regard to images is between base images and child images . Base images are images that have no parent images, usually images with an OS like ubuntu, alpine or debian. Child images are images that build on base images and add additional functionality. Another key concept is the idea of official images and user images. (Both of which can be base images or child images.) Official images are Docker sanctioned images. Docker, Inc. sponsors a dedicated team that is responsible for reviewing and publishing all Official Repositories content. This team works in collaboration with upstream software maintainers, security experts, and the broader Docker community. These are not prefixed by an organization or user name. In the list of images above, the python , node , alpine and nginx images are official (base) images. To find out more about them, check out the Official Images Documentation . User images are images created and shared by users like you. They build on base images and add additional functionality. Typically these are formatted as user/image-name . The user value in the image name is your Docker Store user or organization name. 2.3 Create your first image Now that you have a better understanding of images, it's time to create your own. Our main objective here is to create an image that sandboxes a small Flask application. The goal of this exercise is to create a Docker image which will run a Flask app. We'll do this by first pulling together the components for a random cat picture generator built with Python Flask, then dockerizing it by writing a Dockerfile . Finally, we'll build the image, and then run it. 2.3.1 Create a Python Flask app that displays random cat pix. For the purposes of this workshop, we've created a fun little Python Flask app that displays a random cat .gif every time it is loaded - because, you know, who doesn't like cats? Start by creating a directory called flask-app where we'll create the following files: app.py requirements.txt templates/index.html Dockerfile Make sure to cd flask-app before you start creating the files, because you don't want to start adding a whole bunch of other random files to your image. app.py Create the app.py with the following content: from flask import Flask , render_template import random app = Flask ( __name__ ) # list of cat images images = [ \"https://c.tenor.com/GTcT7HODLRgAAAAM/smiling-cat-creepy-cat.gif\" , \"https://media0.giphy.com/media/10dU7AN7xsi1I4/giphy.webp?cid=ecf05e47gk63rd81vzlot57qmebr7drtgf6a3khmzvjsdtu7&rid=giphy.webp&ct=g\" , \"https://media0.giphy.com/media/S6VGjvmFRu5Qk/giphy.webp?cid=ecf05e478yofpawrhffnnvb3sgjkos96vyfo5mtqhds35as6&rid=giphy.webp&ct=g\" , \"https://media3.giphy.com/media/JIX9t2j0ZTN9S/200w.webp?cid=ecf05e47gk63rd81vzlot57qmebr7drtgf6a3khmzvjsdtu7&rid=200w.webp&ct=g\" ] @app . route ( '/' ) def index (): url = random . choice ( images ) return render_template ( 'index.html' , url = url ) if __name__ == \"__main__\" : app . run ( host = \"0.0.0.0\" ) requirements.txt In order to install the Python modules required for our app, we need to create a file called requirements.txt and add the following line to that file: Flask==3.1.0 templates/index.html Create a directory called templates and create an index.html file in that directory with the following content in it: < html > < head > < style type = \"text/css\" > body { background : black ; color : white ; } div . container { max-width : 500 px ; margin : 100 px auto ; border : 20 px solid white ; padding : 10 px ; text-align : center ; } h4 { text-transform : uppercase ; } </ style > </ head > < body > < div class = \"container\" > < h4 > Cat Gif of the day </ h4 > < img src = \"{{url}}\" /> < p >< small > Courtesy: < a href = \"http://www.buzzfeed.com/copyranter/the-best-cat-gif-post-in-the-history-of-cat-gifs\" > Buzzfeed </ a ></ small ></ p > </ div > </ body > </ html > 2.3.2 Write a Dockerfile We want to create a Docker image with this web app. As mentioned above, all user images are based on a base image. Since our application is written in Python, we will build our own Python image based on Alpine . We'll do that using a Dockerfile. A Dockerfile is a text file that contains a list of commands that the Docker daemon calls while creating an image. The Dockerfile contains all the information that Docker needs to know to run the app \u2014 a base Docker image to run from, location of your project code, any dependencies it has, and what commands to run at start-up. It is a simple way to automate the image creation process. The best part is that the commands you write in a Dockerfile are almost identical to their equivalent Linux commands. This means you don't really have to learn new syntax to create your own Dockerfiles. 1 - Create a file called Dockerfile, and add content to it as described below. We'll start by specifying our base image, using the FROM keyword. We are using alpine:3.21.0, a lightweight Linux distribution that helps keep our container small and efficient: FROM alpine:3.21.0 2 - Next, we need to install Python 3, pip, and other system dependencies required for our application. The apk add command is used to install packages in Alpine Linux. We use --no-cache to prevent unnecessary image bloat. Add the following RUN command: RUN apk add --no-cache build-base libffi-dev openssl-dev py3-pip python3 3 - Now, we set the working directory inside the container. This ensures that all subsequent commands run within this directory: WORKDIR /usr/src/app 4 - To create an isolated Python environment, we set up a virtual environment inside our container. This helps prevent conflicts between system-wide and project-specific dependencies: RUN python3 -m venv venv 5 - To ensure that all commands within the container use the virtual environment by default, we modify the PATH environment variable: ENV PATH = \"/usr/src/app/venv/bin: $PATH \" 6 - Next, we copy the application's dependencies file (requirements.txt) into the container and install the necessary Python packages. We also upgrade pip to the latest version to ensure compatibility: COPY requirements.txt ./ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r requirements.txt 7 - Copy the files you have created earlier into our image by using COPY command. COPY app.py ./ COPY templates/index.html ./templates/ 8 - Since our Flask application runs on port 5000, we specify that this port should be exposed. This does not automatically publish the port but serves as documentation and can be used by orchestration tools: EXPOSE 5000 9 - The last step is the command for running the application which is simply - python ./app.py . Use the CMD command to do that: CMD [ \"python\" , \"/usr/src/app/app.py\" ] The primary purpose of CMD is to tell the container which command it should run by default when it is started. 10 - Verify your Dockerfile. Our Dockerfile is now ready. This is how it looks: # our base image FROM alpine:3.21.0 # Install Python 3, pip, and system dependencies RUN apk add --no-cache build-base libffi-dev openssl-dev py3-pip python3 # Set the working directory WORKDIR /usr/src/app # Create and activate a virtual environment RUN python3 -m venv venv # Use the virtual environment for all commands ENV PATH = \"/usr/src/app/venv/bin: $PATH \" # Copy and install dependencies COPY requirements.txt ./ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r requirements.txt # Copy application files COPY app.py ./ COPY templates/index.html ./templates/ # Expose the application port EXPOSE 5000 # Run the application inside the virtual environment CMD [ \"python\" , \"/usr/src/app/app.py\" ] 2.3.3 Build the image Now that you have your Dockerfile , you can build your image. The docker build command does the heavy-lifting of creating a docker image from a Dockerfile . When you run the docker build command given below, make sure to replace <YOUR_USERNAME> with your username. This username should be the same one you created when registering on Docker Cloud . If you haven't done that yet, please go ahead and create an account. The docker build command is quite simple - it takes an optional tag name with the -t flag, and the location of the directory containing the Dockerfile - the . indicates the current directory: docker build -t <YOUR_USERNAME>/myfirstapp . If you don't have the alpine:3.21.0 image, the client will first pull the image and then create your image. Therefore, your output on running the command will look different from mine. If everything went well, your image should be ready! Run docker images and see if your image ( <YOUR_USERNAME>/myfirstapp ) shows. 2.3.4 Run your image The next step in this section is to run the image and see if it actually works. docker run -p 8888 :5000 --name myfirstapp YOUR_USERNAME/myfirstapp * Serving Flask app 'app' * Debug mode: off WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. * Running on all addresses ( 0 .0.0.0 ) * Running on http://127.0.0.1:5000 * Running on http://172.17.0.2:5000 Press CTRL+C to quit Head over to http://localhost:8888 and your app should be live. Note If you are using Docker Machine, you may need to open up another terminal and determine the container ip address using docker-machine ip default . Hit the Refresh button in the web browser to see a few more cat images. Check Show us your running flask-app ! 2.3.4 Dockerfile commands summary Here's a quick summary of the few basic commands we used in our Dockerfile. FROM starts the Dockerfile. It is a requirement that the Dockerfile must start with the FROM command. Images are created in layers, which means you can use another image as the base image for your own. The FROM command defines your base layer. As arguments, it takes the name of the image. Optionally, you can add the Docker Cloud username of the maintainer and image version, in the format username/imagename:version . RUN is used to build up the Image you're creating. For each RUN command, Docker will run the command then create a new layer of the image. This way you can roll back your image to previous states easily. The syntax for a RUN instruction is to place the full text of the shell command after the RUN (e.g., RUN mkdir /user/local/foo ). This will automatically run in a /bin/sh shell. You can define a different shell like this: RUN /bin/bash -c 'mkdir /user/local/foo ' COPY copies local files into the container. CMD defines the commands that will run on the Image at start-up. Unlike a RUN , this does not create a new layer for the Image, but simply runs the command. There can only be one CMD per a Dockerfile/Image. If you need to run multiple commands, the best way to do that is to have the CMD run a script. CMD requires that you tell it where to run the command, unlike RUN . So example CMD commands would be: CMD [ \"python\" , \"./app.py\" ] CMD [ \"/bin/bash\" , \"echo\" , \"Hello World\" ] EXPOSE creates a hint for users of an image which ports provide services. It is included in the information which can be retrieved via docker inspect <container-id> . Note The EXPOSE command does not actually make any ports accessible to the host! Instead, this requires publishing ports by means of the -p flag when using docker run . Note If you want to learn more about Dockerfiles, check out Best practices for writing Dockerfiles . (source: https://github.com/docker/labs/tree/master/beginner ) Now that you know how to run docker container and create Dockerfiles let\u2019s move on to the practical part.","title":"TD part 01 - Docker"},{"location":"ch1-discover-docker-td/#discover-docker","text":"Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information","title":"Discover Docker"},{"location":"ch1-discover-docker-td/#setup","text":"","title":"Setup"},{"location":"ch1-discover-docker-td/#prerequisites","text":"There are no specific skills needed for this tutorial beyond a basic comfort with the command line and using a text editor. Prior experience in developing web applications will be helpful but is not required. As you proceed further along the tutorial, we'll make use of https://cloud.docker.com/.","title":"Prerequisites"},{"location":"ch1-discover-docker-td/#setting-up-your-computer","text":"Getting all the tooling setup on your computer can be a daunting task, but getting Docker up and running on your favorite OS has become very easy. The getting started guide on Docker has detailed instructions for setting up Docker on Mac , Linux and Windows If you're using Docker for Windows make sure you have shared your drive. Important note If you're using an older version of Windows or MacOS you may need to use Docker Machine instead. All commands work in either bash or Powershell on Windows Once you are done installing Docker, test your Docker installation by running the following: docker run hello-world Unable to find image 'hello-world:latest' locally latest: Pulling from library/hello-world 03f4658f8b78: Pull complete a3ed95caeb02: Pull complete Digest: sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 Status: Downloaded newer image for hello-world:latest Hello from Docker. ... This message shows that your installation appears to be working correctly.","title":"Setting up your computer"},{"location":"ch1-discover-docker-td/#running-your-first-container","text":"Now that you have everything setup, it's time to get our hands dirty. In this section, you are going to run an Alpine Linux container (a lightweight linux distribution) on your system and get a taste of the docker run command. To get started, let's run the following in our terminal: docker pull alpine Note Depending on how you've installed docker on your system, you might see a permission denied error after running the above command. Try the commands from the Getting Started tutorial to verify your installation . If you're on Linux, you may need to prefix your docker commands with sudo . Alternatively you can create a docker group to get rid of this issue. The pull command fetches the alpine image from the Docker registry and saves it in our system. You can use the docker images command to see a list of all images on your system. docker images REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE alpine latest c51f86c28340 4 weeks ago 1 .109 MB hello-world latest 690ed74de00f 5 months ago 960 B","title":"Running your first container"},{"location":"ch1-discover-docker-td/#11-docker-run","text":"Great! Let's now run a Docker container based on this image. To do that you are going to use the docker run command. docker run alpine ls -l total 48 drwxr-xr-x 2 root root 4096 Mar 2 16:20 bin drwxr-xr-x 5 root root 360 Mar 18 09:47 dev drwxr-xr-x 13 root root 4096 Mar 18 09:47 etc drwxr-xr-x 2 root root 4096 Mar 2 16:20 home drwxr-xr-x 5 root root 4096 Mar 2 16:20 lib ...... ...... What happened? Behind the scenes, a lot of stuff happened. When you call run : 1. The Docker client contacts the Docker daemon. The Docker daemon checks local store if the image (alpine in this case) is available locally, and if not, downloads it from Docker Store. (Since we have issued docker pull alpine before, the download step is not necessary) The Docker daemon creates the container and then runs a command in that container. The Docker daemon streams the output of the command to the Docker client When you run docker run alpine , you provided a command ( ls -l ), so Docker started the command specified and you saw the listing. Let's try something more exciting. docker run alpine echo \"hello from alpine\" hello from alpine OK, that's some actual output. In this case, the Docker client dutifully ran the echo command in our alpine container and then exited it. If you've noticed, all of that happened pretty quickly. Imagine booting up a virtual machine, running a command and then killing it. Now you know why they say containers are fast! Try another command. docker run alpine /bin/sh Wait, nothing happened! Is that a bug? Well, no. These interactive shells will exit after running any scripted commands, unless they are run in an interactive terminal - so for this example to not exit, you need to docker run -it alpine /bin/sh . You are now inside the container shell and you can try out a few commands like ls -l , uname -a and others. Exit out of the container by giving the exit command. Ok, now it's time to see the docker ps command. The docker ps command shows you all containers that are currently running. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES Since no containers are running, you see a blank line. Let's try a more useful variant: docker ps -a docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 36171a5da744 alpine \"/bin/sh\" 5 minutes ago Exited ( 0 ) 2 minutes ago fervent_newton a6a9d46d0b2f alpine \"echo 'hello from alp\" 6 minutes ago Exited ( 0 ) 6 minutes ago lonely_kilby ff0a5c3750b9 alpine \"ls -l\" 8 minutes ago Exited ( 0 ) 8 minutes ago elated_ramanujan c317d0a9e3d2 hello-world \"/hello\" 34 seconds ago Exited ( 0 ) 12 minutes ago stupefied_mcclintock What you see above is a list of all containers that you ran. Notice that the STATUS column shows that these containers exited a few minutes ago. You're probably wondering if there is a way to run more than just one command in a container. Let's try that now: docker run -it alpine /bin/sh / # ls bin dev etc home lib linuxrc media mnt proc root run sbin sys tmp usr var / # uname -a Linux 97916e8cb5dc 4.4.27-moby #1 SMP Wed Oct 26 14:01:48 UTC 2016 x86_64 Linux Running the run command with the -it flags attaches us to an interactive tty in the container. Now you can run as many commands in the container as you want. Take some time to run your favorite commands. Tip run -it is a very useful command to debug at the lowest level a container. That concludes a whirlwind tour of the docker run command which would most likely be the command you'll use most often. It makes sense to spend some time getting comfortable with it. To find out more about run , use docker run --help to see a list of all flags it supports. As you proceed further, we'll see a few more variants of docker run.","title":"1.1 Docker Run"},{"location":"ch1-discover-docker-td/#12-terminology","text":"In the last section, you saw a lot of Docker-specific jargon which might be confusing to some. So before you go further, let's clarify some terminology that is used frequently in the Docker ecosystem. Images - The file system and configuration of our application which are used to create containers. To find out more about a Docker image, run docker inspect alpine . In the demo above, you used the docker pull command to download the alpine image. When you executed the command docker run hello-world , it also did a docker pull behind the scenes to download the hello-world image. Containers - Running instances of Docker images \u2014 containers run the actual applications. A container includes an application and all of its dependencies. It shares the kernel with other containers, and runs as an isolated process in user space on the host OS. You created a container using docker run which you did using the alpine image that you downloaded. A list of running containers can be seen using the docker ps command. Docker daemon - The background service running on the host that manages building, running and distributing Docker containers. Docker client - The command line tool that allows the user to interact with the Docker daemon. Docker Store - A registry of Docker images, where you can find trusted and enterprise ready containers, plugins, and Docker editions. You'll be using this later in this tutorial.","title":"1.2 Terminology"},{"location":"ch1-discover-docker-td/#20-webapps-with-docker","text":"Great! So you have now looked at docker run , played with a Docker container and also got the hang of some terminology. Armed with all this knowledge, you are now ready to get to the real stuff \u2014 deploying web applications with Docker.","title":"2.0 Webapps with Docker"},{"location":"ch1-discover-docker-td/#21-run-a-static-website-in-a-container","text":"Note Code for this section is in this repo in the static-site directory Let's start by taking baby-steps. First, we'll use Docker to run a static website in a container. The website is based on an existing image. We'll pull a Docker image from Docker Store, run the container, and see how easy it is to set up a web server. The image that you are going to use is a single-page website that was already created for this demo and is available on the Docker Store as dockersamples/static-site . You can download and run the image directly in one go using docker run as follows. docker run -d dockersamples/static-site Note The current version of this image doesn't run without the -d flag. The -d flag enables detached mode, which detaches the running container from the terminal/shell and returns your prompt after the container starts. We are debugging the problem with this image but for now, use -d even for this first example. Tip -d is a very useful option. So, what happens when you run this command? Since the image doesn't exist on your Docker host, the Docker daemon first fetches it from the registry and then runs it as a container. Now that the server is running, do you see the website? What port is it running on? And more importantly, how do you access the container directly from our host machine? Actually, you probably won't be able to answer any of these questions yet! \u263a In this case, the client didn't tell the Docker Engine to publish any of the ports, so you need to re-run the docker run command to add this instruction. Let's re-run the command with some new flags to publish ports and pass your name to the container to customize the message displayed. We'll use the -d option again to run the container in detached mode. First, stop the container that you have just launched. In order to do this, we need the container ID. Since we ran the container in detached mode, we don't have to launch another terminal to do this. Run docker ps to view the running containers. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a7a0e504ca3e dockersamples/static-site \"/bin/sh -c 'cd /usr/\" 28 seconds ago Up 26 seconds 80 /tcp, 443 /tcp stupefied_mahavira Check out the CONTAINER ID column. You will need to use this CONTAINER ID value, a long sequence of characters, to identify the container you want to stop, and then to remove it. The example below provides the CONTAINER ID on our system; you should use the value that you see in your terminal. docker stop a7a0e504ca3e docker rm a7a0e504ca3e Note A cool feature is that you do not need to specify the entire CONTAINER ID . You can just specify a few starting characters and if it is unique among all the containers that you have launched, the Docker client will intelligently pick it up. Now, let's launch a container in detached mode as shown below: docker run --name static-site -e AUTHOR = \"Enter Your Name Here\" -d -P dockersamples/static-site e61d12292d69556eabe2a44c16cbd54486b2527e2ce4f95438e504afb7b02810 In the above command: -d will create a container with the process detached from our terminal -P will publish all the exposed container ports to random ports on the Docker host -e is how you pass environment variables to the container. --name allows you to specify a container name AUTHOR is the environment variable name and Your Name is the value that you can pass. Now you can see the ports by running the docker port command. docker port static-site 443 /tcp -> 0 .0.0.0:32772 80 /tcp -> 0 .0.0.0:32773 You can open your freshly created website on http://localhost:[YOUR_PORT_FOR 80/tcp] . For our example this is http://localhost:32773 . You can now open http://localhost:[YOUR_PORT_FOR 80/tcp] to see your site live! For our example, this is: http://192.168.99.100:32773 . You can also run a second webserver at the same time, specifying a custom host port mapping to the container's webserver. docker run --name static-site-2 -e AUTHOR = \"Enter Your Name Here\" -d -p 8888 :80 dockersamples/static-site To deploy this on a real server you would just need to install Docker, and run the above docker command (as in this case you can see the AUTHOR is Docker which we passed as an environment variable). Now that you've seen how to run a webserver inside a Docker container, how do you create your own Docker image? This is the question we'll explore in the next section. But first, let's stop and remove the containers since you won't be using them anymore. docker stop static-site docker rm static-site Let's use a shortcut to remove the second site: docker rm -f static-site-2 Tip rm -f is a very useful option Run docker ps to make sure the containers are gone. docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES","title":"2.1 Run a static website in a container"},{"location":"ch1-discover-docker-td/#22-docker-images","text":"In this section, let's dive deeper into what Docker images are. You will build your own image, use that image to run an application locally, and finally, push some of your own images to Docker Cloud. Docker images are the basis of containers. In the previous example, you pulled the dockersamples/static-site image from the. registry and asked the Docker client to run a container based on that image. To see the list of images that are available locally on your system, run the docker images command. docker images REPOSITORY TAG IMAGE ID CREATED SIZE dockersamples/static-site latest 92a386b6e686 2 hours ago 190 .5 MB nginx latest af4b3d7d5401 3 hours ago 190 .5 MB python 2 .7 1c32174fd534 14 hours ago 676 .8 MB postgres 9 .4 88d845ac7a88 14 hours ago 263 .6 MB containous/traefik latest 27b4e0c6b2fd 4 days ago 20 .75 MB node 0 .10 42426a5cba5f 6 days ago 633 .7 MB redis latest 4f5f397d4b7c 7 days ago 177 .5 MB mongo latest 467eb21035a8 7 days ago 309 .7 MB alpine 3 .3 70c557e50ed6 8 days ago 4 .794 MB java 7 21f6ce84e43c 8 days ago 587 .7 MB Above is a list of images that I've pulled from the registry and those I've created myself (we'll shortly see how). You will have a different list of images on your machine. The TAG refers to a particular snapshot of the image and the ID is the corresponding unique identifier for that image. For simplicity, you can think of an image akin to a git repository - images can be committed with changes and have multiple. versions. When you do not provide a specific version number, the client defaults to latest. For example you could pull a specific version of ubuntu image as follows: docker pull ubuntu:12.04 If you do not specify the version number of the image then, as mentioned, the Docker client will default to a version named latest . So for example, the docker pull command given below will pull an image named ubuntu:latest : docker pull ubuntu To get a new Docker image you can either get it from a registry (such as the Docker Store) or create your own. There are hundreds of thousands of images available on Docker Store . You can also search for images directly from the command line using docker search . An important distinction with regard to images is between base images and child images . Base images are images that have no parent images, usually images with an OS like ubuntu, alpine or debian. Child images are images that build on base images and add additional functionality. Another key concept is the idea of official images and user images. (Both of which can be base images or child images.) Official images are Docker sanctioned images. Docker, Inc. sponsors a dedicated team that is responsible for reviewing and publishing all Official Repositories content. This team works in collaboration with upstream software maintainers, security experts, and the broader Docker community. These are not prefixed by an organization or user name. In the list of images above, the python , node , alpine and nginx images are official (base) images. To find out more about them, check out the Official Images Documentation . User images are images created and shared by users like you. They build on base images and add additional functionality. Typically these are formatted as user/image-name . The user value in the image name is your Docker Store user or organization name.","title":"2.2 Docker Images"},{"location":"ch1-discover-docker-td/#23-create-your-first-image","text":"Now that you have a better understanding of images, it's time to create your own. Our main objective here is to create an image that sandboxes a small Flask application. The goal of this exercise is to create a Docker image which will run a Flask app. We'll do this by first pulling together the components for a random cat picture generator built with Python Flask, then dockerizing it by writing a Dockerfile . Finally, we'll build the image, and then run it.","title":"2.3 Create your first image"},{"location":"ch1-discover-docker-td/#231-create-a-python-flask-app-that-displays-random-cat-pix","text":"For the purposes of this workshop, we've created a fun little Python Flask app that displays a random cat .gif every time it is loaded - because, you know, who doesn't like cats? Start by creating a directory called flask-app where we'll create the following files: app.py requirements.txt templates/index.html Dockerfile Make sure to cd flask-app before you start creating the files, because you don't want to start adding a whole bunch of other random files to your image.","title":"2.3.1 Create a Python Flask app that displays random cat pix."},{"location":"ch1-discover-docker-td/#apppy","text":"Create the app.py with the following content: from flask import Flask , render_template import random app = Flask ( __name__ ) # list of cat images images = [ \"https://c.tenor.com/GTcT7HODLRgAAAAM/smiling-cat-creepy-cat.gif\" , \"https://media0.giphy.com/media/10dU7AN7xsi1I4/giphy.webp?cid=ecf05e47gk63rd81vzlot57qmebr7drtgf6a3khmzvjsdtu7&rid=giphy.webp&ct=g\" , \"https://media0.giphy.com/media/S6VGjvmFRu5Qk/giphy.webp?cid=ecf05e478yofpawrhffnnvb3sgjkos96vyfo5mtqhds35as6&rid=giphy.webp&ct=g\" , \"https://media3.giphy.com/media/JIX9t2j0ZTN9S/200w.webp?cid=ecf05e47gk63rd81vzlot57qmebr7drtgf6a3khmzvjsdtu7&rid=200w.webp&ct=g\" ] @app . route ( '/' ) def index (): url = random . choice ( images ) return render_template ( 'index.html' , url = url ) if __name__ == \"__main__\" : app . run ( host = \"0.0.0.0\" )","title":"app.py"},{"location":"ch1-discover-docker-td/#requirementstxt","text":"In order to install the Python modules required for our app, we need to create a file called requirements.txt and add the following line to that file: Flask==3.1.0","title":"requirements.txt"},{"location":"ch1-discover-docker-td/#templatesindexhtml","text":"Create a directory called templates and create an index.html file in that directory with the following content in it: < html > < head > < style type = \"text/css\" > body { background : black ; color : white ; } div . container { max-width : 500 px ; margin : 100 px auto ; border : 20 px solid white ; padding : 10 px ; text-align : center ; } h4 { text-transform : uppercase ; } </ style > </ head > < body > < div class = \"container\" > < h4 > Cat Gif of the day </ h4 > < img src = \"{{url}}\" /> < p >< small > Courtesy: < a href = \"http://www.buzzfeed.com/copyranter/the-best-cat-gif-post-in-the-history-of-cat-gifs\" > Buzzfeed </ a ></ small ></ p > </ div > </ body > </ html >","title":"templates/index.html"},{"location":"ch1-discover-docker-td/#232-write-a-dockerfile","text":"We want to create a Docker image with this web app. As mentioned above, all user images are based on a base image. Since our application is written in Python, we will build our own Python image based on Alpine . We'll do that using a Dockerfile. A Dockerfile is a text file that contains a list of commands that the Docker daemon calls while creating an image. The Dockerfile contains all the information that Docker needs to know to run the app \u2014 a base Docker image to run from, location of your project code, any dependencies it has, and what commands to run at start-up. It is a simple way to automate the image creation process. The best part is that the commands you write in a Dockerfile are almost identical to their equivalent Linux commands. This means you don't really have to learn new syntax to create your own Dockerfiles. 1 - Create a file called Dockerfile, and add content to it as described below. We'll start by specifying our base image, using the FROM keyword. We are using alpine:3.21.0, a lightweight Linux distribution that helps keep our container small and efficient: FROM alpine:3.21.0 2 - Next, we need to install Python 3, pip, and other system dependencies required for our application. The apk add command is used to install packages in Alpine Linux. We use --no-cache to prevent unnecessary image bloat. Add the following RUN command: RUN apk add --no-cache build-base libffi-dev openssl-dev py3-pip python3 3 - Now, we set the working directory inside the container. This ensures that all subsequent commands run within this directory: WORKDIR /usr/src/app 4 - To create an isolated Python environment, we set up a virtual environment inside our container. This helps prevent conflicts between system-wide and project-specific dependencies: RUN python3 -m venv venv 5 - To ensure that all commands within the container use the virtual environment by default, we modify the PATH environment variable: ENV PATH = \"/usr/src/app/venv/bin: $PATH \" 6 - Next, we copy the application's dependencies file (requirements.txt) into the container and install the necessary Python packages. We also upgrade pip to the latest version to ensure compatibility: COPY requirements.txt ./ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r requirements.txt 7 - Copy the files you have created earlier into our image by using COPY command. COPY app.py ./ COPY templates/index.html ./templates/ 8 - Since our Flask application runs on port 5000, we specify that this port should be exposed. This does not automatically publish the port but serves as documentation and can be used by orchestration tools: EXPOSE 5000 9 - The last step is the command for running the application which is simply - python ./app.py . Use the CMD command to do that: CMD [ \"python\" , \"/usr/src/app/app.py\" ] The primary purpose of CMD is to tell the container which command it should run by default when it is started. 10 - Verify your Dockerfile. Our Dockerfile is now ready. This is how it looks: # our base image FROM alpine:3.21.0 # Install Python 3, pip, and system dependencies RUN apk add --no-cache build-base libffi-dev openssl-dev py3-pip python3 # Set the working directory WORKDIR /usr/src/app # Create and activate a virtual environment RUN python3 -m venv venv # Use the virtual environment for all commands ENV PATH = \"/usr/src/app/venv/bin: $PATH \" # Copy and install dependencies COPY requirements.txt ./ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r requirements.txt # Copy application files COPY app.py ./ COPY templates/index.html ./templates/ # Expose the application port EXPOSE 5000 # Run the application inside the virtual environment CMD [ \"python\" , \"/usr/src/app/app.py\" ]","title":"2.3.2 Write a Dockerfile"},{"location":"ch1-discover-docker-td/#233-build-the-image","text":"Now that you have your Dockerfile , you can build your image. The docker build command does the heavy-lifting of creating a docker image from a Dockerfile . When you run the docker build command given below, make sure to replace <YOUR_USERNAME> with your username. This username should be the same one you created when registering on Docker Cloud . If you haven't done that yet, please go ahead and create an account. The docker build command is quite simple - it takes an optional tag name with the -t flag, and the location of the directory containing the Dockerfile - the . indicates the current directory: docker build -t <YOUR_USERNAME>/myfirstapp . If you don't have the alpine:3.21.0 image, the client will first pull the image and then create your image. Therefore, your output on running the command will look different from mine. If everything went well, your image should be ready! Run docker images and see if your image ( <YOUR_USERNAME>/myfirstapp ) shows.","title":"2.3.3 Build the image"},{"location":"ch1-discover-docker-td/#234-run-your-image","text":"The next step in this section is to run the image and see if it actually works. docker run -p 8888 :5000 --name myfirstapp YOUR_USERNAME/myfirstapp * Serving Flask app 'app' * Debug mode: off WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. * Running on all addresses ( 0 .0.0.0 ) * Running on http://127.0.0.1:5000 * Running on http://172.17.0.2:5000 Press CTRL+C to quit Head over to http://localhost:8888 and your app should be live. Note If you are using Docker Machine, you may need to open up another terminal and determine the container ip address using docker-machine ip default . Hit the Refresh button in the web browser to see a few more cat images. Check Show us your running flask-app !","title":"2.3.4 Run your image"},{"location":"ch1-discover-docker-td/#234-dockerfile-commands-summary","text":"Here's a quick summary of the few basic commands we used in our Dockerfile. FROM starts the Dockerfile. It is a requirement that the Dockerfile must start with the FROM command. Images are created in layers, which means you can use another image as the base image for your own. The FROM command defines your base layer. As arguments, it takes the name of the image. Optionally, you can add the Docker Cloud username of the maintainer and image version, in the format username/imagename:version . RUN is used to build up the Image you're creating. For each RUN command, Docker will run the command then create a new layer of the image. This way you can roll back your image to previous states easily. The syntax for a RUN instruction is to place the full text of the shell command after the RUN (e.g., RUN mkdir /user/local/foo ). This will automatically run in a /bin/sh shell. You can define a different shell like this: RUN /bin/bash -c 'mkdir /user/local/foo ' COPY copies local files into the container. CMD defines the commands that will run on the Image at start-up. Unlike a RUN , this does not create a new layer for the Image, but simply runs the command. There can only be one CMD per a Dockerfile/Image. If you need to run multiple commands, the best way to do that is to have the CMD run a script. CMD requires that you tell it where to run the command, unlike RUN . So example CMD commands would be: CMD [ \"python\" , \"./app.py\" ] CMD [ \"/bin/bash\" , \"echo\" , \"Hello World\" ] EXPOSE creates a hint for users of an image which ports provide services. It is included in the information which can be retrieved via docker inspect <container-id> . Note The EXPOSE command does not actually make any ports accessible to the host! Instead, this requires publishing ports by means of the -p flag when using docker run . Note If you want to learn more about Dockerfiles, check out Best practices for writing Dockerfiles . (source: https://github.com/docker/labs/tree/master/beginner ) Now that you know how to run docker container and create Dockerfiles let\u2019s move on to the practical part.","title":"2.3.4 Dockerfile commands summary"},{"location":"ch1-discover-docker-tp/","text":"Discover Docker Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time). Question Point to document/report. Tip Interesting information. Goals Good practice Do not forget to document what you do along the steps, the documentation provided will be evaluated as your report. Create an appropriate file structure, 1 folder per image. Target application 3-tiers application: HTTP server Backend API Database For each of those applications, we will follow the same process: choose the appropriate docker base image, create and configure this image, put our application specifics inside and at some point have it running. Our final goal is to have a 3-tier web API running. Base images HTTP server Backend API Database Database Basics We will use the image: postgres:17.2-alpine. Let\u2019s have a simple postgres server running, here is what would be a minimal Dockerfile: FROM postgres:17.2-alpine ENV POSTGRES_DB = db \\ POSTGRES_USER = usr \\ POSTGRES_PASSWORD = pwd Build this image and start a container properly. Your Postgres DB should be up and running. Check that everything is running smoothly with the docker command of your choice. Don\u2019t forget to name your docker image and container. Tip If you have difficulties go back to part 2.3.3 Build the image and 2.3.4 Run your image on TD01 - Docker ( TD 1 Discover Docker ). Re-run your database with adminer . Don't forget --network app-network to enable adminer/database communication. We use -\u2013network instead of -\u2013link because the latter is deprecated. Tip Don't forget to create your network docker network create app-network Also, does it seem right to have passwords written in plain text in a file? You may rather define those environment parameters when running the image using the flag -e . Question 1-1 For which reason is it better to run the container with a flag -e to give the environment variables rather than put them directly in the Dockerfile? It would be nice to have our database structure initialized with the docker image as well as some initial data. Any sql scripts found in /docker-entrypoint-initdb.d will be executed in alphabetical order, therefore let\u2019s add a couple scripts to our image: Tip Don't forget to restart the adminer: docker run \\ -p \"8090:8080\" \\ --net = app-network \\ --name = adminer \\ -d \\ adminer Init database 01-CreateScheme.sql CREATE TABLE public . departments ( id SERIAL PRIMARY KEY , name VARCHAR ( 20 ) NOT NULL ); CREATE TABLE public . students ( id SERIAL PRIMARY KEY , department_id INT NOT NULL REFERENCES departments ( id ), first_name VARCHAR ( 20 ) NOT NULL , last_name VARCHAR ( 20 ) NOT NULL ); 02-InsertData.sql INSERT INTO departments ( name ) VALUES ( 'IRC' ); INSERT INTO departments ( name ) VALUES ( 'ETI' ); INSERT INTO departments ( name ) VALUES ( 'CGP' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 1 , 'Eli' , 'Copter' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 2 , 'Emma' , 'Carena' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 2 , 'Jack' , 'Uzzi' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 3 , 'Aude' , 'Javel' ); Rebuild your image and check that your scripts have been executed at startup and that the data is present in your container. Tip When we talk about /docker-entrypoint-initdb.d it means inside the container, so you have to copy your directory's content and the container\u2019s directory. Persist data You may have noticed that if your database container gets destroyed then all your data is reset, a database must persist data durably. Use volumes to persist data on the host disk. -v /my/own/datadir:/var/lib/postgresql/data Check that data survives when your container gets destroyed. Link Docker volumes Question 1-2 Why do we need a volume to be attached to our postgres container? Question 1-3 Document your database container essentials: commands and Dockerfile. Backend API Basics For starters, we will simply run a Java hello-world class in our containers, only after will we be running a jar. In both cases, choose the proper image keeping in mind that we only need a Java runtime . Here is a complex Java Hello World implementation: Main.java public class Main { public static void main ( String [] args ) { System . out . println ( \"Hello World!\" ); } } 1- Compile with your target Java: javac Main.java . 2- Write dockerfile. FROM # TODO: Choose a java JRE # TODO: Add the compiled java (aka bytecode, aka .class) # TODO: Run the Java with: \u201cjava Main\u201d command. 3- Now, to launch app you have to do the same thing that Basic step 1. Here you have a first glimpse of your backend application. In the next step we will simply enrich the build (using maven instead of a minimalistic javac) and execute a jar instead of a simple .class. \u2192 If it\u2019s a success you must see \u201cHello Word\u201d in your console. Multistage build In the previous section we were building Java code on our machine to have it running on a docker container. Wouldn\u2019t it be great to have Docker handle the build as well? You probably noticed that the default openjdk docker images contain... Well... a JDK! Create a multistage build using the Multistage . Your Dockerfile should look like this: FROM eclipse-temurin:21-jdk-alpine # Build Main.java with JDK # TODO : in next steps (not now) FROM eclipse-temurin:21-jre-alpine # Copy resource from previous stage COPY --from = 0 /usr/src/Main.class . # Run java code with the JRE # TODO : in next steps (not now) Don\u2019t fill the Dockerfile now, we will have to do it in the next steps. Backend simple api We will deploy a Springboot application providing a simple API with a single greeting endpoint. Create your Springboot application on: Spring Initializer . Use the following config: Project: Maven Language: Java 21 Spring Boot: 3.4.2 Packaging: Jar Dependencies: Spring Web Generate the project and give it a simple GreetingController class: package fr.takima.training.simpleapi.controller ; import org.springframework.web.bind.annotation.* ; import java.util.concurrent.atomic.AtomicLong ; @RestController public class GreetingController { private static final String template = \"Hello, %s!\" ; private final AtomicLong counter = new AtomicLong (); @GetMapping ( \"/\" ) public Greeting greeting ( @RequestParam ( value = \"name\" , defaultValue = \"World\" ) String name ) { return new Greeting ( counter . incrementAndGet (), String . format ( template , name )); } record Greeting ( long id , String content ) {} } You can now build and start your application, of course you will need maven and a jdk-21. How convenient would it be to have a virtual container to build and run our simplistic API? Oh wait, we have docker, here is how you could build and run your application with Docker: # Build stage FROM eclipse-temurin:21-jdk-alpine AS myapp-build ENV MYAPP_HOME = /opt/myapp WORKDIR $MYAPP_HOME RUN apk add --no-cache maven COPY pom.xml . COPY src ./src RUN mvn package -DskipTests # Run stage FROM eclipse-temurin:21-jre-alpine ENV MYAPP_HOME = /opt/myapp WORKDIR $MYAPP_HOME COPY --from = myapp-build $MYAPP_HOME /target/*.jar $MYAPP_HOME /myapp.jar ENTRYPOINT [ \"java\" , \"-jar\" , \"myapp.jar\" ] Question 1-4 Why do we need a multistage build? And explain each step of this dockerfile. Check A working Springboot application with a simple HelloWorld endpoint. Did you notice that maven downloads all libraries on every image build? You can contribute to saving the planet caching libraries when maven pom file has not been changed by running the goal: mvn dependency:go-offline . Backend API Let\u2019s now build and run the backend API connected to the database. You can get the zipped source code here: simple-api . You can replace only your src directory and the pom.xml file with the ones available in the repository. Adjust the configuration in simple-api/src/main/resources/application.yml (this is the application configuration). How to access the database container from your backend application? Use the deprecated --link or create a docker network . Once everything is properly bound, you should be able to access your application API, for example on: /departments/IRC/students . [ { \"id\" : 1 , \"firstname\" : \"Eli\" , \"lastname\" : \"Copter\" , \"department\" : { \"id\" : 1 , \"name\" : \"IRC\" } } ] Explore your API other endpoints, have a look at the controllers in the source code. Check A simple web API on top of your database. Http server Basics Choose an appropriate base image. Create a simple landing page: index.html and put it inside your container. It should be enough for now, start your container and check that everything is working as expected. Here are commands that you may want to try to do so: docker stats docker inspect docker logs Link Httpd Getting Started Configuration You are using the default apache configuration, and it will be enough for now, you use yours by copying it in your image. Use docker exec to retrieve this default configuration from your running container /usr/local/apache2/conf/httpd.conf . Note You can also use docker cp . Reverse proxy We will configure the http server as a simple reverse proxy server in front of our application, this server could be used to deliver a front-end application, to configure SSL or to handle load balancing. So this can be quite useful even though in our case we will keep things simple. Here is the documentation: Reverse Proxy . Add the following to the configuration, and you should be all set: <VirtualHost *:80> ProxyPreserveHost On ProxyPass / http://YOUR_BACKEND_LINK:8080/ ProxyPassReverse / http://YOUR_BACKEND_LINK:8080/ </VirtualHost> LoadModule proxy_module modules/mod_proxy.so LoadModule proxy_http_module modules/mod_proxy_http.so Question 1-5 Why do we need a reverse proxy? Check Checkpoint: a working application through a reverse proxy. Link application Docker-compose 1- Install docker-compose if the docker compose command does not work . You may have noticed that this can be quite painful to orchestrate manually the start, stop and rebuild of our containers. Thankfully, a useful tool called docker-compose comes in handy in those situations. 2- Let\u2019s create a docker-compose.yml file with the following structure to define and drive our containers: services : backend : build : #TODO networks : #TODO depends_on : #TODO database : build : #TODO networks : #TODO httpd : build : #TODO ports : #TODO networks : #TODO depends_on : #TODO networks : #TODO volumes : #TODO The docker-compose will handle the three containers for us. The file above is a basic example of structure, you need to add more parameters and think about the cleanest and most optimized approach like you would do in a company (for example: env variables, volumes, restart policies and processes segregation). Once your containers are orchestrated as services by docker-compose you should have a perfectly running application, make sure you can access your API on localhost . Note The ports of both your backend and database should not be opened to your host machine. Question 1-6 Why is docker-compose so important? Question 1-7 Document docker-compose most important commands. Question 1-8 Document your docker-compose file. Check A working 3-tier application running with docker-compose. Publish Your docker images are stored locally, let\u2019s publish them, so they can be used by other team members or on other machines. You will need a Docker Hub account. 1- Connect to your freshly created account with docker login . 2- Tag your image. For now, we have been only using the latest tag, now that we want to publish it, let\u2019s add some meaningful version information to our images. docker tag my-database USERNAME/my-database:1.0 3- Then push your image to dockerhub: docker push USERNAME/my-database:1.0 Dockerhub is not the only docker image registry, and you can also self-host your images (this is obviously the choice of most companies). Once you publish your images to dockerhub, you will see them in your account: having some documentation for your image would be quite useful if you want to use those later. Question 1-9 Document your publication commands and published images in dockerhub. Question 1-10 Why do we put our images into an online repo? \u00a9 Takima 2025","title":"TP part 01 - Docker"},{"location":"ch1-discover-docker-tp/#discover-docker","text":"Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time). Question Point to document/report. Tip Interesting information.","title":"Discover Docker"},{"location":"ch1-discover-docker-tp/#goals","text":"","title":"Goals"},{"location":"ch1-discover-docker-tp/#good-practice","text":"Do not forget to document what you do along the steps, the documentation provided will be evaluated as your report. Create an appropriate file structure, 1 folder per image.","title":"Good practice"},{"location":"ch1-discover-docker-tp/#target-application","text":"3-tiers application: HTTP server Backend API Database For each of those applications, we will follow the same process: choose the appropriate docker base image, create and configure this image, put our application specifics inside and at some point have it running. Our final goal is to have a 3-tier web API running.","title":"Target application"},{"location":"ch1-discover-docker-tp/#base-images","text":"HTTP server Backend API Database","title":"Base images"},{"location":"ch1-discover-docker-tp/#database","text":"","title":"Database"},{"location":"ch1-discover-docker-tp/#basics","text":"We will use the image: postgres:17.2-alpine. Let\u2019s have a simple postgres server running, here is what would be a minimal Dockerfile: FROM postgres:17.2-alpine ENV POSTGRES_DB = db \\ POSTGRES_USER = usr \\ POSTGRES_PASSWORD = pwd Build this image and start a container properly. Your Postgres DB should be up and running. Check that everything is running smoothly with the docker command of your choice. Don\u2019t forget to name your docker image and container. Tip If you have difficulties go back to part 2.3.3 Build the image and 2.3.4 Run your image on TD01 - Docker ( TD 1 Discover Docker ). Re-run your database with adminer . Don't forget --network app-network to enable adminer/database communication. We use -\u2013network instead of -\u2013link because the latter is deprecated. Tip Don't forget to create your network docker network create app-network Also, does it seem right to have passwords written in plain text in a file? You may rather define those environment parameters when running the image using the flag -e . Question 1-1 For which reason is it better to run the container with a flag -e to give the environment variables rather than put them directly in the Dockerfile? It would be nice to have our database structure initialized with the docker image as well as some initial data. Any sql scripts found in /docker-entrypoint-initdb.d will be executed in alphabetical order, therefore let\u2019s add a couple scripts to our image: Tip Don't forget to restart the adminer: docker run \\ -p \"8090:8080\" \\ --net = app-network \\ --name = adminer \\ -d \\ adminer","title":"Basics"},{"location":"ch1-discover-docker-tp/#init-database","text":"01-CreateScheme.sql CREATE TABLE public . departments ( id SERIAL PRIMARY KEY , name VARCHAR ( 20 ) NOT NULL ); CREATE TABLE public . students ( id SERIAL PRIMARY KEY , department_id INT NOT NULL REFERENCES departments ( id ), first_name VARCHAR ( 20 ) NOT NULL , last_name VARCHAR ( 20 ) NOT NULL ); 02-InsertData.sql INSERT INTO departments ( name ) VALUES ( 'IRC' ); INSERT INTO departments ( name ) VALUES ( 'ETI' ); INSERT INTO departments ( name ) VALUES ( 'CGP' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 1 , 'Eli' , 'Copter' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 2 , 'Emma' , 'Carena' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 2 , 'Jack' , 'Uzzi' ); INSERT INTO students ( department_id , first_name , last_name ) VALUES ( 3 , 'Aude' , 'Javel' ); Rebuild your image and check that your scripts have been executed at startup and that the data is present in your container. Tip When we talk about /docker-entrypoint-initdb.d it means inside the container, so you have to copy your directory's content and the container\u2019s directory.","title":"Init database"},{"location":"ch1-discover-docker-tp/#persist-data","text":"You may have noticed that if your database container gets destroyed then all your data is reset, a database must persist data durably. Use volumes to persist data on the host disk. -v /my/own/datadir:/var/lib/postgresql/data Check that data survives when your container gets destroyed. Link Docker volumes Question 1-2 Why do we need a volume to be attached to our postgres container? Question 1-3 Document your database container essentials: commands and Dockerfile.","title":"Persist data"},{"location":"ch1-discover-docker-tp/#backend-api","text":"","title":"Backend API"},{"location":"ch1-discover-docker-tp/#basics_1","text":"For starters, we will simply run a Java hello-world class in our containers, only after will we be running a jar. In both cases, choose the proper image keeping in mind that we only need a Java runtime . Here is a complex Java Hello World implementation: Main.java public class Main { public static void main ( String [] args ) { System . out . println ( \"Hello World!\" ); } } 1- Compile with your target Java: javac Main.java . 2- Write dockerfile. FROM # TODO: Choose a java JRE # TODO: Add the compiled java (aka bytecode, aka .class) # TODO: Run the Java with: \u201cjava Main\u201d command. 3- Now, to launch app you have to do the same thing that Basic step 1. Here you have a first glimpse of your backend application. In the next step we will simply enrich the build (using maven instead of a minimalistic javac) and execute a jar instead of a simple .class. \u2192 If it\u2019s a success you must see \u201cHello Word\u201d in your console.","title":"Basics"},{"location":"ch1-discover-docker-tp/#multistage-build","text":"In the previous section we were building Java code on our machine to have it running on a docker container. Wouldn\u2019t it be great to have Docker handle the build as well? You probably noticed that the default openjdk docker images contain... Well... a JDK! Create a multistage build using the Multistage . Your Dockerfile should look like this: FROM eclipse-temurin:21-jdk-alpine # Build Main.java with JDK # TODO : in next steps (not now) FROM eclipse-temurin:21-jre-alpine # Copy resource from previous stage COPY --from = 0 /usr/src/Main.class . # Run java code with the JRE # TODO : in next steps (not now) Don\u2019t fill the Dockerfile now, we will have to do it in the next steps.","title":"Multistage build"},{"location":"ch1-discover-docker-tp/#backend-simple-api","text":"We will deploy a Springboot application providing a simple API with a single greeting endpoint. Create your Springboot application on: Spring Initializer . Use the following config: Project: Maven Language: Java 21 Spring Boot: 3.4.2 Packaging: Jar Dependencies: Spring Web Generate the project and give it a simple GreetingController class: package fr.takima.training.simpleapi.controller ; import org.springframework.web.bind.annotation.* ; import java.util.concurrent.atomic.AtomicLong ; @RestController public class GreetingController { private static final String template = \"Hello, %s!\" ; private final AtomicLong counter = new AtomicLong (); @GetMapping ( \"/\" ) public Greeting greeting ( @RequestParam ( value = \"name\" , defaultValue = \"World\" ) String name ) { return new Greeting ( counter . incrementAndGet (), String . format ( template , name )); } record Greeting ( long id , String content ) {} } You can now build and start your application, of course you will need maven and a jdk-21. How convenient would it be to have a virtual container to build and run our simplistic API? Oh wait, we have docker, here is how you could build and run your application with Docker: # Build stage FROM eclipse-temurin:21-jdk-alpine AS myapp-build ENV MYAPP_HOME = /opt/myapp WORKDIR $MYAPP_HOME RUN apk add --no-cache maven COPY pom.xml . COPY src ./src RUN mvn package -DskipTests # Run stage FROM eclipse-temurin:21-jre-alpine ENV MYAPP_HOME = /opt/myapp WORKDIR $MYAPP_HOME COPY --from = myapp-build $MYAPP_HOME /target/*.jar $MYAPP_HOME /myapp.jar ENTRYPOINT [ \"java\" , \"-jar\" , \"myapp.jar\" ] Question 1-4 Why do we need a multistage build? And explain each step of this dockerfile. Check A working Springboot application with a simple HelloWorld endpoint. Did you notice that maven downloads all libraries on every image build? You can contribute to saving the planet caching libraries when maven pom file has not been changed by running the goal: mvn dependency:go-offline .","title":"Backend simple api"},{"location":"ch1-discover-docker-tp/#backend-api_1","text":"Let\u2019s now build and run the backend API connected to the database. You can get the zipped source code here: simple-api . You can replace only your src directory and the pom.xml file with the ones available in the repository. Adjust the configuration in simple-api/src/main/resources/application.yml (this is the application configuration). How to access the database container from your backend application? Use the deprecated --link or create a docker network . Once everything is properly bound, you should be able to access your application API, for example on: /departments/IRC/students . [ { \"id\" : 1 , \"firstname\" : \"Eli\" , \"lastname\" : \"Copter\" , \"department\" : { \"id\" : 1 , \"name\" : \"IRC\" } } ] Explore your API other endpoints, have a look at the controllers in the source code. Check A simple web API on top of your database.","title":"Backend API"},{"location":"ch1-discover-docker-tp/#http-server","text":"","title":"Http server"},{"location":"ch1-discover-docker-tp/#basics_2","text":"","title":"Basics"},{"location":"ch1-discover-docker-tp/#choose-an-appropriate-base-image","text":"Create a simple landing page: index.html and put it inside your container. It should be enough for now, start your container and check that everything is working as expected. Here are commands that you may want to try to do so: docker stats docker inspect docker logs Link Httpd Getting Started","title":"Choose an appropriate base image."},{"location":"ch1-discover-docker-tp/#configuration","text":"You are using the default apache configuration, and it will be enough for now, you use yours by copying it in your image. Use docker exec to retrieve this default configuration from your running container /usr/local/apache2/conf/httpd.conf . Note You can also use docker cp .","title":"Configuration"},{"location":"ch1-discover-docker-tp/#reverse-proxy","text":"We will configure the http server as a simple reverse proxy server in front of our application, this server could be used to deliver a front-end application, to configure SSL or to handle load balancing. So this can be quite useful even though in our case we will keep things simple. Here is the documentation: Reverse Proxy . Add the following to the configuration, and you should be all set: <VirtualHost *:80> ProxyPreserveHost On ProxyPass / http://YOUR_BACKEND_LINK:8080/ ProxyPassReverse / http://YOUR_BACKEND_LINK:8080/ </VirtualHost> LoadModule proxy_module modules/mod_proxy.so LoadModule proxy_http_module modules/mod_proxy_http.so Question 1-5 Why do we need a reverse proxy? Check Checkpoint: a working application through a reverse proxy.","title":"Reverse proxy"},{"location":"ch1-discover-docker-tp/#link-application","text":"","title":"Link application"},{"location":"ch1-discover-docker-tp/#docker-compose","text":"1- Install docker-compose if the docker compose command does not work . You may have noticed that this can be quite painful to orchestrate manually the start, stop and rebuild of our containers. Thankfully, a useful tool called docker-compose comes in handy in those situations. 2- Let\u2019s create a docker-compose.yml file with the following structure to define and drive our containers: services : backend : build : #TODO networks : #TODO depends_on : #TODO database : build : #TODO networks : #TODO httpd : build : #TODO ports : #TODO networks : #TODO depends_on : #TODO networks : #TODO volumes : #TODO The docker-compose will handle the three containers for us. The file above is a basic example of structure, you need to add more parameters and think about the cleanest and most optimized approach like you would do in a company (for example: env variables, volumes, restart policies and processes segregation). Once your containers are orchestrated as services by docker-compose you should have a perfectly running application, make sure you can access your API on localhost . Note The ports of both your backend and database should not be opened to your host machine. Question 1-6 Why is docker-compose so important? Question 1-7 Document docker-compose most important commands. Question 1-8 Document your docker-compose file. Check A working 3-tier application running with docker-compose.","title":"Docker-compose"},{"location":"ch1-discover-docker-tp/#publish","text":"Your docker images are stored locally, let\u2019s publish them, so they can be used by other team members or on other machines. You will need a Docker Hub account. 1- Connect to your freshly created account with docker login . 2- Tag your image. For now, we have been only using the latest tag, now that we want to publish it, let\u2019s add some meaningful version information to our images. docker tag my-database USERNAME/my-database:1.0 3- Then push your image to dockerhub: docker push USERNAME/my-database:1.0 Dockerhub is not the only docker image registry, and you can also self-host your images (this is obviously the choice of most companies). Once you publish your images to dockerhub, you will see them in your account: having some documentation for your image would be quite useful if you want to use those later. Question 1-9 Document your publication commands and published images in dockerhub. Question 1-10 Why do we put our images into an online repo? \u00a9 Takima 2025","title":"Publish"},{"location":"ch2-discover-github-actions-td/","text":"Discover Github Note Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information Setup Prerequisites Even if it seems pretty usual to use Git in the development world, not every project is managed with this tool. The main goal here is to have you create and set up a Github account before using it for further purposes. Git will be required as well as it is a must have. You might want to start with Sign up to Github First step is (if not already done) to sign up to Github with your school mail address and fill the required information. We recommend you to use an individual free plan for the next steps of this project. You can eventually fill the last page but it\u2019s not really important. Select \u201cComplete setup\u201d. There you are, your (probably not first) Github account is set up. Yay ! Now, let\u2019s move on to the next step ! Project forking and publishing For this part, we are going to fork the project that will be used for the rest of the lesson (I mean, till the end of the week). Now you own the project under your Github workspace, you can basically do whatever you want on this project. However we recommend not to modify the whole java and maven content if you still want this project to compile. First of all, make sure the git CLI is installed on your computer and then clone the project on your computer to be able to modify it locally. Securing Github access There are actually two different ways of cloning and publishing a project. By default, Github will propose you to clone by HTTPS link. Copy to clipboard, then open a new terminal and enter : $ git clone <project_url_with_https> Git will probably ask you to authenticate in order to be able to clone the repository. It will ask you the same thing every time you want to publish your work on a branch. This might be painful and you don\u2019t want to do this. The second option is \u201cuse SSH\u201d and the link starts with \u201cgit@github.com:\u2026\u201d, but there is a prerequisite to use this solution, you\u2019ll need to create an SSH key and have it added to your account. Fine, then tape: $ ssh-keygen -t rsa -b 4096 -f ~/.ssh/ { theNameOfYourKeyPair } It will ask you to enter and confirm a passphrase, this is for security purposes but we will let it empty for this course. Well done, you\u2019ve generated a new RSA key pair of 4096 bits size. If you do \u201cls ~/.ssh\u201d you\u2019ll see new files inside your folder, one is named theNameOfYourKeyPair and the other one theNameOfYourKeyPair.pub. The first one is your private key to NEVER communicate to anyone and the second one is you public key. Let\u2019s take a look to this last one, enter \u201ccat ~/.ssh/theNameOfYourKeyPair.pub\u201d: Something like this will appear on you terminal, this is the content of your public key that you will communicate to Github. Copy the whole content and past it to you Github account under \u201cSettings\u201d and \u201cSSH and GPG keys\u201d. Click on New SSH key and paste the content of your public key. Give it a name and validate the operation. Now try to clone the repository again with the git@ prefix. It will ask you to select a key pair to perform the action. Take the one you\u2019ve just indicated to Github and press enter. Now you are able to clone and publish work on your Github repository without entering a password every time, I hope you enjoy this. Let\u2019s publish Open the project inside your favorite IDE (I hope it\u2019s IntelliJ) and open the file README.md. Modify this file entering, for example \u201cThis project is now mine\u201d. Save it and check that Git has correctly seen you changes $ git status You\u2019ll see you file colored in red. This means that Git has seen you\u2019ve made some modifications to this file, but it will not take them into account once you will publish them. Then ask git to add them to your work. $ git add . Actually, we did not ask him to add our file, but to add any modification made to any file inside our working directory. Now if you enter \u201cgit status\u201d again you\u2019ll see that your file is colored in green. You work will be taken into account, hopefully. Let\u2019s commit this work: $ git commit -m \u201cThe message of your commit\u201d Now if you try to \u201cgit status\u201d again you\u2019ll see that your workspace is \u201cclean\u201d. Git created a new reference with all the changes you\u2019ve made. If you go on and enter: $ git log You\u2019ll see the message of you last commit on top of the references. However you cannot see the changes on the Github website because we did not publish yet our work. Let\u2019s do it ! $ git push origin master This command literally means \u201cI want to publish my work on the distant/remote branch master\u201d. And now you can see that your work is published online ! Big up guys ! Configure your repository Git is one of the most useful tool you\u2019ll find in your developer life. Almost everybody uses it and most of the time you\u2019ll have to work with other people on project using Github. However you\u2019ll find many people that use it wrongly, and many people that will create things you don\u2019t want to merge in you production branch. Let\u2019s secure a bit our labor to prevent any fool to throw it away. Go back to your project on the Github webpage and click on settings. Go to Branches and you\u2019ll see that your default branch is master. Fine, it means that every time you connect on your repository, this branch will be displayed. Just under this indication, you\u2019ll see a Branch protection rule. Try to add one. You\u2019ll see a bunch of options, most of them are very useful working in team (especially asking for pull request and review before merging inside master branch). You can also select options to block push force (when someone does push -f) because it doesn\u2019t take care of Git warning messages that usually prevent you from pushing. As you are working alone on this project we will only add the name \u201cmaster\u201d to the naming pattern and let the rest as it is. It will only prevent you from doing bad things on you master branch. Finally, be aware that all the work you do on Github is public by default. Therefore you should or you must NEVER publish any password on your repository. Thankfully you can turn your repository to private from the options and there are Environment Variables that you can set and secure (I mean encrypt) inside your Github repository under Secrets. Git basic commands Clone a project $ git clone <url_of_the_project> Fetch distant modifications without merging them into your branch $ git fetch -p Fetch distant modifications and merge them into you branch $ git pull Add your changes to the workspace $ git add . Commit your changes $ git commit -m \u201cYour message\u201d Publish your changes $ git push origin <name_of_the_remote_branch>","title":"TD part 02 - Github Actions"},{"location":"ch2-discover-github-actions-td/#discover-github","text":"Note Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information","title":"Discover Github"},{"location":"ch2-discover-github-actions-td/#setup","text":"","title":"Setup"},{"location":"ch2-discover-github-actions-td/#prerequisites","text":"Even if it seems pretty usual to use Git in the development world, not every project is managed with this tool. The main goal here is to have you create and set up a Github account before using it for further purposes. Git will be required as well as it is a must have. You might want to start with","title":"Prerequisites"},{"location":"ch2-discover-github-actions-td/#sign-up-to-github","text":"First step is (if not already done) to sign up to Github with your school mail address and fill the required information. We recommend you to use an individual free plan for the next steps of this project. You can eventually fill the last page but it\u2019s not really important. Select \u201cComplete setup\u201d. There you are, your (probably not first) Github account is set up. Yay ! Now, let\u2019s move on to the next step !","title":"Sign up to Github"},{"location":"ch2-discover-github-actions-td/#project-forking-and-publishing","text":"For this part, we are going to fork the project that will be used for the rest of the lesson (I mean, till the end of the week). Now you own the project under your Github workspace, you can basically do whatever you want on this project. However we recommend not to modify the whole java and maven content if you still want this project to compile. First of all, make sure the git CLI is installed on your computer and then clone the project on your computer to be able to modify it locally.","title":"Project forking and publishing"},{"location":"ch2-discover-github-actions-td/#securing-github-access","text":"There are actually two different ways of cloning and publishing a project. By default, Github will propose you to clone by HTTPS link. Copy to clipboard, then open a new terminal and enter : $ git clone <project_url_with_https> Git will probably ask you to authenticate in order to be able to clone the repository. It will ask you the same thing every time you want to publish your work on a branch. This might be painful and you don\u2019t want to do this. The second option is \u201cuse SSH\u201d and the link starts with \u201cgit@github.com:\u2026\u201d, but there is a prerequisite to use this solution, you\u2019ll need to create an SSH key and have it added to your account. Fine, then tape: $ ssh-keygen -t rsa -b 4096 -f ~/.ssh/ { theNameOfYourKeyPair } It will ask you to enter and confirm a passphrase, this is for security purposes but we will let it empty for this course. Well done, you\u2019ve generated a new RSA key pair of 4096 bits size. If you do \u201cls ~/.ssh\u201d you\u2019ll see new files inside your folder, one is named theNameOfYourKeyPair and the other one theNameOfYourKeyPair.pub. The first one is your private key to NEVER communicate to anyone and the second one is you public key. Let\u2019s take a look to this last one, enter \u201ccat ~/.ssh/theNameOfYourKeyPair.pub\u201d: Something like this will appear on you terminal, this is the content of your public key that you will communicate to Github. Copy the whole content and past it to you Github account under \u201cSettings\u201d and \u201cSSH and GPG keys\u201d. Click on New SSH key and paste the content of your public key. Give it a name and validate the operation. Now try to clone the repository again with the git@ prefix. It will ask you to select a key pair to perform the action. Take the one you\u2019ve just indicated to Github and press enter. Now you are able to clone and publish work on your Github repository without entering a password every time, I hope you enjoy this.","title":"Securing Github access"},{"location":"ch2-discover-github-actions-td/#lets-publish","text":"Open the project inside your favorite IDE (I hope it\u2019s IntelliJ) and open the file README.md. Modify this file entering, for example \u201cThis project is now mine\u201d. Save it and check that Git has correctly seen you changes $ git status You\u2019ll see you file colored in red. This means that Git has seen you\u2019ve made some modifications to this file, but it will not take them into account once you will publish them. Then ask git to add them to your work. $ git add . Actually, we did not ask him to add our file, but to add any modification made to any file inside our working directory. Now if you enter \u201cgit status\u201d again you\u2019ll see that your file is colored in green. You work will be taken into account, hopefully. Let\u2019s commit this work: $ git commit -m \u201cThe message of your commit\u201d Now if you try to \u201cgit status\u201d again you\u2019ll see that your workspace is \u201cclean\u201d. Git created a new reference with all the changes you\u2019ve made. If you go on and enter: $ git log You\u2019ll see the message of you last commit on top of the references. However you cannot see the changes on the Github website because we did not publish yet our work. Let\u2019s do it ! $ git push origin master This command literally means \u201cI want to publish my work on the distant/remote branch master\u201d. And now you can see that your work is published online ! Big up guys !","title":"Let\u2019s publish"},{"location":"ch2-discover-github-actions-td/#configure-your-repository","text":"Git is one of the most useful tool you\u2019ll find in your developer life. Almost everybody uses it and most of the time you\u2019ll have to work with other people on project using Github. However you\u2019ll find many people that use it wrongly, and many people that will create things you don\u2019t want to merge in you production branch. Let\u2019s secure a bit our labor to prevent any fool to throw it away. Go back to your project on the Github webpage and click on settings. Go to Branches and you\u2019ll see that your default branch is master. Fine, it means that every time you connect on your repository, this branch will be displayed. Just under this indication, you\u2019ll see a Branch protection rule. Try to add one. You\u2019ll see a bunch of options, most of them are very useful working in team (especially asking for pull request and review before merging inside master branch). You can also select options to block push force (when someone does push -f) because it doesn\u2019t take care of Git warning messages that usually prevent you from pushing. As you are working alone on this project we will only add the name \u201cmaster\u201d to the naming pattern and let the rest as it is. It will only prevent you from doing bad things on you master branch. Finally, be aware that all the work you do on Github is public by default. Therefore you should or you must NEVER publish any password on your repository. Thankfully you can turn your repository to private from the options and there are Environment Variables that you can set and secure (I mean encrypt) inside your Github repository under Secrets.","title":"Configure your repository"},{"location":"ch2-discover-github-actions-td/#git-basic-commands","text":"Clone a project $ git clone <url_of_the_project> Fetch distant modifications without merging them into your branch $ git fetch -p Fetch distant modifications and merge them into you branch $ git pull Add your changes to the workspace $ git add . Commit your changes $ git commit -m \u201cYour message\u201d Publish your changes $ git push origin <name_of_the_remote_branch>","title":"Git basic commands"},{"location":"ch2-discover-github-actions-tp/","text":"Discover Github Action Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information Goals Good Practice Do not forget to document what you do along the steps. Create an appropriate file structure, 1 folder per image. Target Application Complete pipeline workflow for testing and delivering your software application. We are going to use different useful tools to build your application, test it automatically, and check the code quality at the same time. Link GitHub Actions Setup GitHub Actions The first tool we are going to use is GitHub Actions . GitHub Actions is an online service that allows you to build pipelines to test your application. Keep in mind that GitHub Actions is not the only one on the market to build integration pipelines. Historically many companies were using Jenkins (and still a lot continue to do it), it is way less accessible than GitHub Actions but much more configurable. You will also hear about Gitlab CI and Bitbucket Pipelines during your work life. First steps into the CI World Note Use your repository from the end of the Docker TP Most of the CI services use a yaml file (except Jenkins that uses a\u2026 Groovy file\u2026) to describe the expected steps to be done over the pipeline execution. Go on and create your first main.yml file into your project\u2019s root directory. Build and test your Application For those who are not familiar with Maven and Java project structures, here is the command for building and running your tests: mvn clean verify You need to launch this command from your pom.xml directory, or specify the path to it with --file /path/to/pom.xml argument. Note What is it supposed to do? This command will actually clear your previous builds inside your cache (otherwise your can have unexpected behavior because maven did not build again each part of your application), then it will freshly build each module inside your application, and finally it will run both Unit Tests and Integration Tests (sometime called Component Tests as well). Note Unit tests? Component tests? Integration tests require a database to verify you correctly inserted or retrieved data from it. Fortunately for you, we\u2019ve already taken care of this! But you still need to understand how it works under the hood. Take a look at your application file tree. Let\u2019s take a look at the pom.xml that is inside the simple-api , you will find some very helpful dependencies for your testing. <dependencies> <dependency> <groupId> org.testcontainers </groupId> <artifactId> testcontainers </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> <dependency> <groupId> org.testcontainers </groupId> <artifactId> jdbc </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> <dependency> <groupId> org.testcontainers </groupId> <artifactId> postgresql </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> </dependencies> As you can see, there are a bunch of testcontainers dependencies inside the pom. Question 2-1 What are testcontainers? They simply are java libraries that allow you to run a bunch of docker containers while testing. Here we use the postgresql container to attach to our application while testing. If you run the command mvn clean verify you\u2019ll be able to see the following: As you can see, a docker container has been launched while your tests were running, pretty convenient, isn\u2019t it? Finally, you\u2019ll see your test results. Now, it is up to you! Create your first CI, asking to build and test your application every time someone commits and pushes code on the repository. First you create a .github/workflows directory in your repository on GitHub. Put your main.yml inside workflows. The main.yml holds the architecture of your pipeline. Each job will represent a step of what you want to do. Each job will be run in parallel unless a link is specified. Here is what your main.yml should look like: name : CI devops 2025 on : #to begin you want to launch this job in main and develop push : branches : #TODO pull_request : jobs : test-backend : runs-on : ubuntu-24.04 steps : #checkout your github code using actions/checkout@v4 - uses : actions/checkout@v4 #do the same with another action (actions/setup-java@v4) that enable to setup jdk 21 - name : Set up JDK 21 #TODO #finally build your app with the latest command - name : Build and test with Maven run : #TODO It\u2019s your turn, fill the #TODOs! To see the result you must follow the next steps: And if it\u2019s GREEN you win! Check First CI with backend test ! Question 2-2 Document your Github Actions configurations. First steps into the CD World Here we are going to configure the Continuous Delivery of our project. Therefore, the main goal will be to create and save a docker image containing our application on the Docker Hub every time there is a commit on a main branch. As you probably already noticed, you need to log in to docker hub to perform any publication. However, you don\u2019t want to publish your credentials on a public repository (it is not even a good practise to do it on a private repository). Fortunately, GitHub allows you to create secured environment variables. 1- Add your docker hub credentials to the environment variables in GitHub Actions (and let them secured). Note Secured Variables, why? Now that you added them, you can freely declare them and use them inside your GitHub Actions pipeline. 2- Build your docker images inside your GitHub Actions pipeline. Maybe the template Build a docker image can help you! For now, we only need to build the images # define job to build and publish docker image build-and-push-docker-image : needs : test-backend # run only when code is compiling and tests are passing runs-on : ubuntu-24.04 # steps to perform in job steps : - name : Checkout code uses : actions/checkout@v4 - name : Build image and push backend uses : docker/build-push-action@v6 with : # relative path to the place where source code with Dockerfile is located context : # Note: tags has to be all lower-case tags : ${{secrets.DOCKERHUB_USERNAME}}/tp-devops-simple-api:latest - name : Build image and push database # DO the same for database - name : Build image and push httpd # DO the same for httpd Note Why did we put needs: build-and-test-backend on this job? Maybe try without this and you will see! OK your images are built but not yet published on dockerhub . 3- Publish your docker images when there is a commit on the main branch. Don\u2019t forget to do a docker login and to put your credentials on secrets! - name : Login to DockerHub run : echo \"${{ secrets.DOCKERHUB_TOKEN }}\" | docker login --username ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin And after modify job Build image and push backend to add a push action: - name : Build image and push backend uses : docker/build-push-action@v6 with : # relative path to the place where source code with Dockerfile is located context : # Note: tags has to be all lower-case tags : ${{secrets.DOCKERHUB_USERNAME}}/tp-devops-simple-api:latest # build on feature branches, push only on main branch push : ${{ github.ref == 'refs/heads/main' }} Do the same for other containers. Question 2-3 For what purpose do we need to push docker images? Now you should be able to find your docker images on your docker repository. Check Working CI & Docker images pushed to your repository. Setup Quality Gate What is quality about? Quality is here to make sure your code will be maintainable and determine every unsecured block. It helps you produce better and tested features, and it will also prevent having dirty code pushed inside your main branch. For this purpose, we are going to use SonarCloud , a cloud solution that makes analysis and reports of your code. This is a useful tool that everyone should use in order to learn java best practices. Register to SonarCloud Create your free-tier account on SonarCloud . SonarCloud will propose you to set up your GitHub Actions pipeline from the GitHub Actions , but forget about that, there is a much better way to save the SonarCloud provided and provide it into your main.yml . 1- You must create an organization. 2- And keep the project key and the organization key you will need it later. 3- You need to add this script to your main.yml for launch sonar at each commit. Set up your pipeline to use SonarCloud analysis while testing. For that, you need to add a new step after Build and test with Maven and change sonar organization and project key. mvn -B verify sonar:sonar -Dsonar.projectKey = <your-project-key> -Dsonar.organization = <your-organization> -Dsonar.host.url = https://sonarcloud.io -Dsonar.login = ${ { secrets.SONAR_TOKEN } } --file ./simple-api/pom.xml If you did your configuration correctly, you should be able to see the SonarCloud analysis report online: Check Working quality gate. Question 2-4 Document your quality gate configuration. Well done buddies, you\u2019ve created your very first Quality Gate! Yay! Going further: Split pipelines In this step you have to separate your jobs into different workflows so that they respect 2 things: test-backend must be launched on develop and master branch and build-and-push-docker-image on master only. The job that pushes the docker api image must be launched only if test-backend is passed. Tip You can use on: workflow_run to trigger a workflow when another workflow is passed. \u00a9 Takima 2025","title":"TP part 02 - Github Actions"},{"location":"ch2-discover-github-actions-tp/#discover-github-action","text":"Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information","title":"Discover Github Action"},{"location":"ch2-discover-github-actions-tp/#goals","text":"","title":"Goals"},{"location":"ch2-discover-github-actions-tp/#good-practice","text":"Do not forget to document what you do along the steps. Create an appropriate file structure, 1 folder per image.","title":"Good Practice"},{"location":"ch2-discover-github-actions-tp/#target-application","text":"Complete pipeline workflow for testing and delivering your software application. We are going to use different useful tools to build your application, test it automatically, and check the code quality at the same time. Link GitHub Actions","title":"Target Application"},{"location":"ch2-discover-github-actions-tp/#setup-github-actions","text":"The first tool we are going to use is GitHub Actions . GitHub Actions is an online service that allows you to build pipelines to test your application. Keep in mind that GitHub Actions is not the only one on the market to build integration pipelines. Historically many companies were using Jenkins (and still a lot continue to do it), it is way less accessible than GitHub Actions but much more configurable. You will also hear about Gitlab CI and Bitbucket Pipelines during your work life.","title":"Setup GitHub Actions"},{"location":"ch2-discover-github-actions-tp/#first-steps-into-the-ci-world","text":"Note Use your repository from the end of the Docker TP Most of the CI services use a yaml file (except Jenkins that uses a\u2026 Groovy file\u2026) to describe the expected steps to be done over the pipeline execution. Go on and create your first main.yml file into your project\u2019s root directory.","title":"First steps into the CI World"},{"location":"ch2-discover-github-actions-tp/#build-and-test-your-application","text":"For those who are not familiar with Maven and Java project structures, here is the command for building and running your tests: mvn clean verify You need to launch this command from your pom.xml directory, or specify the path to it with --file /path/to/pom.xml argument. Note What is it supposed to do? This command will actually clear your previous builds inside your cache (otherwise your can have unexpected behavior because maven did not build again each part of your application), then it will freshly build each module inside your application, and finally it will run both Unit Tests and Integration Tests (sometime called Component Tests as well). Note Unit tests? Component tests? Integration tests require a database to verify you correctly inserted or retrieved data from it. Fortunately for you, we\u2019ve already taken care of this! But you still need to understand how it works under the hood. Take a look at your application file tree. Let\u2019s take a look at the pom.xml that is inside the simple-api , you will find some very helpful dependencies for your testing. <dependencies> <dependency> <groupId> org.testcontainers </groupId> <artifactId> testcontainers </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> <dependency> <groupId> org.testcontainers </groupId> <artifactId> jdbc </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> <dependency> <groupId> org.testcontainers </groupId> <artifactId> postgresql </artifactId> <version> ${testcontainers.version} </version> <scope> test </scope> </dependency> </dependencies> As you can see, there are a bunch of testcontainers dependencies inside the pom. Question 2-1 What are testcontainers? They simply are java libraries that allow you to run a bunch of docker containers while testing. Here we use the postgresql container to attach to our application while testing. If you run the command mvn clean verify you\u2019ll be able to see the following: As you can see, a docker container has been launched while your tests were running, pretty convenient, isn\u2019t it? Finally, you\u2019ll see your test results. Now, it is up to you! Create your first CI, asking to build and test your application every time someone commits and pushes code on the repository. First you create a .github/workflows directory in your repository on GitHub. Put your main.yml inside workflows. The main.yml holds the architecture of your pipeline. Each job will represent a step of what you want to do. Each job will be run in parallel unless a link is specified. Here is what your main.yml should look like: name : CI devops 2025 on : #to begin you want to launch this job in main and develop push : branches : #TODO pull_request : jobs : test-backend : runs-on : ubuntu-24.04 steps : #checkout your github code using actions/checkout@v4 - uses : actions/checkout@v4 #do the same with another action (actions/setup-java@v4) that enable to setup jdk 21 - name : Set up JDK 21 #TODO #finally build your app with the latest command - name : Build and test with Maven run : #TODO It\u2019s your turn, fill the #TODOs! To see the result you must follow the next steps: And if it\u2019s GREEN you win! Check First CI with backend test ! Question 2-2 Document your Github Actions configurations.","title":"Build and test your Application"},{"location":"ch2-discover-github-actions-tp/#first-steps-into-the-cd-world","text":"Here we are going to configure the Continuous Delivery of our project. Therefore, the main goal will be to create and save a docker image containing our application on the Docker Hub every time there is a commit on a main branch. As you probably already noticed, you need to log in to docker hub to perform any publication. However, you don\u2019t want to publish your credentials on a public repository (it is not even a good practise to do it on a private repository). Fortunately, GitHub allows you to create secured environment variables. 1- Add your docker hub credentials to the environment variables in GitHub Actions (and let them secured). Note Secured Variables, why? Now that you added them, you can freely declare them and use them inside your GitHub Actions pipeline. 2- Build your docker images inside your GitHub Actions pipeline. Maybe the template Build a docker image can help you! For now, we only need to build the images # define job to build and publish docker image build-and-push-docker-image : needs : test-backend # run only when code is compiling and tests are passing runs-on : ubuntu-24.04 # steps to perform in job steps : - name : Checkout code uses : actions/checkout@v4 - name : Build image and push backend uses : docker/build-push-action@v6 with : # relative path to the place where source code with Dockerfile is located context : # Note: tags has to be all lower-case tags : ${{secrets.DOCKERHUB_USERNAME}}/tp-devops-simple-api:latest - name : Build image and push database # DO the same for database - name : Build image and push httpd # DO the same for httpd Note Why did we put needs: build-and-test-backend on this job? Maybe try without this and you will see! OK your images are built but not yet published on dockerhub . 3- Publish your docker images when there is a commit on the main branch. Don\u2019t forget to do a docker login and to put your credentials on secrets! - name : Login to DockerHub run : echo \"${{ secrets.DOCKERHUB_TOKEN }}\" | docker login --username ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin And after modify job Build image and push backend to add a push action: - name : Build image and push backend uses : docker/build-push-action@v6 with : # relative path to the place where source code with Dockerfile is located context : # Note: tags has to be all lower-case tags : ${{secrets.DOCKERHUB_USERNAME}}/tp-devops-simple-api:latest # build on feature branches, push only on main branch push : ${{ github.ref == 'refs/heads/main' }} Do the same for other containers. Question 2-3 For what purpose do we need to push docker images? Now you should be able to find your docker images on your docker repository. Check Working CI & Docker images pushed to your repository.","title":"First steps into the CD World"},{"location":"ch2-discover-github-actions-tp/#setup-quality-gate","text":"","title":"Setup Quality Gate"},{"location":"ch2-discover-github-actions-tp/#what-is-quality-about","text":"Quality is here to make sure your code will be maintainable and determine every unsecured block. It helps you produce better and tested features, and it will also prevent having dirty code pushed inside your main branch. For this purpose, we are going to use SonarCloud , a cloud solution that makes analysis and reports of your code. This is a useful tool that everyone should use in order to learn java best practices.","title":"What is quality about?"},{"location":"ch2-discover-github-actions-tp/#register-to-sonarcloud","text":"Create your free-tier account on SonarCloud . SonarCloud will propose you to set up your GitHub Actions pipeline from the GitHub Actions , but forget about that, there is a much better way to save the SonarCloud provided and provide it into your main.yml . 1- You must create an organization. 2- And keep the project key and the organization key you will need it later. 3- You need to add this script to your main.yml for launch sonar at each commit. Set up your pipeline to use SonarCloud analysis while testing. For that, you need to add a new step after Build and test with Maven and change sonar organization and project key. mvn -B verify sonar:sonar -Dsonar.projectKey = <your-project-key> -Dsonar.organization = <your-organization> -Dsonar.host.url = https://sonarcloud.io -Dsonar.login = ${ { secrets.SONAR_TOKEN } } --file ./simple-api/pom.xml If you did your configuration correctly, you should be able to see the SonarCloud analysis report online: Check Working quality gate. Question 2-4 Document your quality gate configuration. Well done buddies, you\u2019ve created your very first Quality Gate! Yay!","title":"Register to SonarCloud"},{"location":"ch2-discover-github-actions-tp/#going-further-split-pipelines","text":"In this step you have to separate your jobs into different workflows so that they respect 2 things: test-backend must be launched on develop and master branch and build-and-push-docker-image on master only. The job that pushes the docker api image must be launched only if test-backend is passed. Tip You can use on: workflow_run to trigger a workflow when another workflow is passed. \u00a9 Takima 2025","title":"Going further: Split pipelines"},{"location":"ch3-discover-ansible-td/","text":"Discover Ansible Note Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information Prerequisites Till now we have only been preparing our applications to be deployed. However we did not deploy anything. That\u2019s where Ansible takes place. Ansible is basically a tool to manage your servers, provision them and deploy your applications on them. This is not the only solution on the market, you\u2019ll hear also about Chef, Puppet, Terraform during your developper life. All of them have their advantages and disadvantages, it is up to you to play with and make your own decisions. This introduction will be pretty fast, it is just here to make you manipulate the tool a little bit with some simple ad hoc commands. You will go deeper into the practical part. Do you Ansible? $ ansible --version Check your installed version, config file location, associated python version and more. If you do not ansible, head to Ansible doc . Unfortunately Ansible is not available on Windows, so if you\u2019re using Windows you have two options : Use a virtual machine, with ansible already installed : ask for a .ova Install a Windows Linux Subsystem by following this documentation and install ansible SSH remote connection Each of you has normally received a server domain name that should be yourname-yourlastname-formation.takima.io and a private key to SSH to it. This server is yours, you will be the only one to manipulate it. In order to play with it, you can simply ssh to it. SSH basically means Secure Shell, it is both a software and a communication protocol that uses the protocol/port TCP/22 of your machines to communicate. It is called Secure because the communication is encrypted using your ssh key pair. Before trying any command, you should know that your private key requires restricted permissions to be used. Change the rights of your key: chmod 400 <path_to_your_key> Now your key can be used to ssh to your server. Go on and hit : ssh -i <path_to_your_key> admin@<your_server_domain_name> Why do we have to add this admin@\u201d ? Your machines run under a Debian distribution, and the default user is admin, this is why we specify which user we want to use. Now you are connected to your instance, nothing important to see here. You can exit whenever you want using the command: exit Why do we show you how to make a remote SSH connection ? Because it is basically what does Ansible to communicate with your server. Now as you can already guess, Ansible will require some configurations to be able to access your machine. Say Hello from Ansible We will simply use a ping command from Ansible to say hello to our server. Actually, the Ansible ping command does a bit more than just the usual bash ping command. If Ansible responds a \u201cpong\u201d to you, it means that your server is available, that the user provided exists and that Ansible was able to authenticate to your server. In summary, it tells you that your Ansible configuration works. First, we need to add our server name to our Ansible hosts list: $ vim /etc/ansible/hosts Add your server domain name into the file and save it. Now Ansible knows you remote host. Now let\u2019s hit: $ ansible all -m ping And it\u2026 doesn\u2019t work. Why? Because Ansible has been access denied as it did not provide either a user nor a private ssh key. Now try again with this one: $ ansible all -m ping --private-key = <path_to_your_ssh_key> -u admin And now it should respond \u201cpong\u201d, which means you are successful with your configurations. Check it replies \"pong\" Setup an Apache Server Now that we are able to access our instance with Ansible, let\u2019s see how powerful Ansible can be for provisioning your web server. We are going to ask Ansible to install Apache into your instance to make it a webserver. $ ansible all -m apt -a \"name=apache2 state=present\" --private-key = <path_to_your_ssh_key> -u admin And it\u2026 doesn\u2019t work ! Actually, like in every system, you need to be root in order to install a software. Fortunately, Ansible can take care of this. Try again with the following command: $ ansible all -m apt -a \"name=apache2 state=present\" --private-key = <path_to_your_ssh_key> -u admin --become The --become flag tells Ansible to perform the command as a super user. Keep in mind that the admin user is part of the wheel group which is the Debian super users group. It would not be possible using a normal user. Now you have successfully installed Apache on your server. We will go on and create an html page for our website: $ ansible all -m shell -a 'echo \"<html><h1>Hello World</h1></html>\" > /var/www/html/index.html' --private-key = <path_to_your_ssh_key> -u admin --become Now start your Apache service: $ ansible all -m service -a \"name=apache2 state=started\" --private-key = <path_to_your_ssh_key> -u admin --become Connect to your server from your browser and\u2026 it works ! Well done you\u2019ve set up your very first server with Ansible. Now move on to the practical part. \u00a9 Takima 2025","title":"TD part 03 - Ansible"},{"location":"ch3-discover-ansible-td/#discover-ansible","text":"Note Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information","title":"Discover Ansible"},{"location":"ch3-discover-ansible-td/#prerequisites","text":"Till now we have only been preparing our applications to be deployed. However we did not deploy anything. That\u2019s where Ansible takes place. Ansible is basically a tool to manage your servers, provision them and deploy your applications on them. This is not the only solution on the market, you\u2019ll hear also about Chef, Puppet, Terraform during your developper life. All of them have their advantages and disadvantages, it is up to you to play with and make your own decisions. This introduction will be pretty fast, it is just here to make you manipulate the tool a little bit with some simple ad hoc commands. You will go deeper into the practical part. Do you Ansible? $ ansible --version Check your installed version, config file location, associated python version and more. If you do not ansible, head to Ansible doc . Unfortunately Ansible is not available on Windows, so if you\u2019re using Windows you have two options : Use a virtual machine, with ansible already installed : ask for a .ova Install a Windows Linux Subsystem by following this documentation and install ansible","title":"Prerequisites"},{"location":"ch3-discover-ansible-td/#ssh-remote-connection","text":"Each of you has normally received a server domain name that should be yourname-yourlastname-formation.takima.io and a private key to SSH to it. This server is yours, you will be the only one to manipulate it. In order to play with it, you can simply ssh to it. SSH basically means Secure Shell, it is both a software and a communication protocol that uses the protocol/port TCP/22 of your machines to communicate. It is called Secure because the communication is encrypted using your ssh key pair. Before trying any command, you should know that your private key requires restricted permissions to be used. Change the rights of your key: chmod 400 <path_to_your_key> Now your key can be used to ssh to your server. Go on and hit : ssh -i <path_to_your_key> admin@<your_server_domain_name> Why do we have to add this admin@\u201d ? Your machines run under a Debian distribution, and the default user is admin, this is why we specify which user we want to use. Now you are connected to your instance, nothing important to see here. You can exit whenever you want using the command: exit Why do we show you how to make a remote SSH connection ? Because it is basically what does Ansible to communicate with your server. Now as you can already guess, Ansible will require some configurations to be able to access your machine.","title":"SSH remote connection"},{"location":"ch3-discover-ansible-td/#say-hello-from-ansible","text":"We will simply use a ping command from Ansible to say hello to our server. Actually, the Ansible ping command does a bit more than just the usual bash ping command. If Ansible responds a \u201cpong\u201d to you, it means that your server is available, that the user provided exists and that Ansible was able to authenticate to your server. In summary, it tells you that your Ansible configuration works. First, we need to add our server name to our Ansible hosts list: $ vim /etc/ansible/hosts Add your server domain name into the file and save it. Now Ansible knows you remote host. Now let\u2019s hit: $ ansible all -m ping And it\u2026 doesn\u2019t work. Why? Because Ansible has been access denied as it did not provide either a user nor a private ssh key. Now try again with this one: $ ansible all -m ping --private-key = <path_to_your_ssh_key> -u admin And now it should respond \u201cpong\u201d, which means you are successful with your configurations. Check it replies \"pong\"","title":"Say Hello from Ansible"},{"location":"ch3-discover-ansible-td/#setup-an-apache-server","text":"Now that we are able to access our instance with Ansible, let\u2019s see how powerful Ansible can be for provisioning your web server. We are going to ask Ansible to install Apache into your instance to make it a webserver. $ ansible all -m apt -a \"name=apache2 state=present\" --private-key = <path_to_your_ssh_key> -u admin And it\u2026 doesn\u2019t work ! Actually, like in every system, you need to be root in order to install a software. Fortunately, Ansible can take care of this. Try again with the following command: $ ansible all -m apt -a \"name=apache2 state=present\" --private-key = <path_to_your_ssh_key> -u admin --become The --become flag tells Ansible to perform the command as a super user. Keep in mind that the admin user is part of the wheel group which is the Debian super users group. It would not be possible using a normal user. Now you have successfully installed Apache on your server. We will go on and create an html page for our website: $ ansible all -m shell -a 'echo \"<html><h1>Hello World</h1></html>\" > /var/www/html/index.html' --private-key = <path_to_your_ssh_key> -u admin --become Now start your Apache service: $ ansible all -m service -a \"name=apache2 state=started\" --private-key = <path_to_your_ssh_key> -u admin --become Connect to your server from your browser and\u2026 it works ! Well done you\u2019ve set up your very first server with Ansible. Now move on to the practical part. \u00a9 Takima 2025","title":"Setup an Apache Server"},{"location":"ch3-discover-ansible-tp/","text":"Discover Ansible Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information Goals Install and deploy your application automatically with ansible. Introduction Inventories By default, Ansible's inventory is saved in the location /etc/ansible/hosts where you already defined your server. The headings between brackets (eg: [webservers]) are used to group sets of hosts together, they are called, surprisingly, groups. You could regroup them by roles like database servers, front-ends, reverse proxies, build servers\u2026 Let\u2019s create a project specific inventory, in your project create an ansible directory, then create a new directory called inventories and in this folder a new file ( my-project/ansible/inventories/setup.yml ): all : vars : ansible_user : admin ansible_ssh_private_key_file : /path/to/private/key children : prod : hosts : hostname or IP Test your inventory with the ping command: ansible all -i inventories/setup.yml -m ping Facts Let\u2019s get information about hosts: these kinds of variables, not set by the user but discovered are called facts . Facts are prefixed by ansible_ and represent information derived from speaking with your remote systems. You will request your server to get your OS distribution, thanks to the setup module. ansible all -i inventories/setup.yml -m setup -a \"filter=ansible_distribution*\" Earlier you installed Apache2 server on your machine, let\u2019s remove it: ansible all -i inventories/setup.yml -m apt -a \"name=apache2 state=absent\" --become With ansible, you just describe the state of your server and let ansible automatically update it for you. If you run this command another time you won\u2019t have the same output as apache2 would have been removed. Question 3-1 Document your inventory and base commands Playbooks First playbook Let\u2019s create a first very simple playbook in my-project/ansible/playbook.yml : - hosts : all gather_facts : false become : true tasks : - name : Test connection ping : Just execute your playbook: ansible-playbook -i inventories/setup.yml playbook.yml You can check your playbooks before playing them using the option: --syntax-check Advanced Playbook Let\u2019s create a playbook to install docker on your server, follow the documentation and create the corresponding tasks: https://docs.docker.com/engine/install/debian/#install-using-the-repository . - hosts : all gather_facts : true become : true tasks : # Install prerequisites for Docker - name : Install required packages apt : name : - apt-transport-https - ca-certificates - curl - gnupg - lsb-release - python3-venv state : latest update_cache : yes # Add Docker\u2019s official GPG key - name : Add Docker GPG key apt_key : url : https://download.docker.com/linux/debian/gpg state : present # Set up the Docker stable repository - name : Add Docker APT repository apt_repository : repo : \"deb [arch=amd64] https://download.docker.com/linux/debian {{ ansible_facts['distribution_release'] }} stable\" state : present update_cache : yes # Install Docker - name : Install Docker apt : name : docker-ce state : present # Install Python3 and pip3 - name : Install Python3 and pip3 apt : name : - python3 - python3-pip state : present # Create a virtual environment for Python packages - name : Create a virtual environment for Docker SDK command : python3 -m venv /opt/docker_venv args : creates : /opt/docker_venv # Only runs if this directory doesn\u2019t exist # Install Docker SDK for Python in the virtual environment - name : Install Docker SDK for Python in virtual environment command : /opt/docker_venv/bin/pip install docker # Ensure Docker is running - name : Make sure Docker is running service : name : docker state : started tags : docker Good news, we now have docker installed on our server. One task was created to be sure docker was running, you could check this with an ad-hoc command or by connecting to the server until you really trust ansible. Check Docker installed on remote server Using roles Our docker install playbook is nice and all but it will be cleaner to have in a specific place, in a role for example. Create a docker role and move the installation task there: ansible-galaxy init roles/docker Call the docker role from your playbook to check your refactor and your installation. Initialized role has a couple of directories, keep only the one you will need: tasks - contains the main list of tasks to be executed by the role. handlers - contains handlers, which may be used by this role or outside. Question 3-2 Document your playbook Deploy your App Time has come to deploy your application to your Ansible managed server. Create specific roles for each part of your application and use the Ansible module: docker_container to start your dockerized application. Here is what a docker_container task should look like: - name : Run HTTPD docker_container : name : httpd image : your image name from DockerHub You must have at least this roles : install docker create network launch database launch app launch proxy Note You will need to add env variables on app and database tasks. Ansible is able to modify the variables either in the .env for the db or in the application.yml for the app. Don\u2019t forget to use existing module for example to create the network Don't forget to use the right python interpreter when creating the docker network (refer to ansible_python_interpreter variable usage) Link docker_container module documentation docker_network module documentation Check You should be able to access your API on your server. Question 3-3 Document your docker_container tasks configuration. Front If you have reached the end of each TP, you are able to access your api through your server. Your database, api and httpd must be up on your server and deployed with your Github Actions. Everything under the hood of docker-compose. Usually when we have an API we also have something called a front part to display our information. That's your bonus part to do, you can find the code of the front ready . You have to customize your httpd server to make the redirection correct between the API and the front. The httpd server is a proxy within your system. Check Front working Continuous Deployment Note Do this part in a separate workflow. Configure Github action to automatically deploy your application when you release it on the production branch of your github repository. It is a little bit overkilled to launch an Ansible job for deploying on one unique server. Therefore you ssh to your machine with your encrypted private key and only relaunch your http api backend application. You like challenges and overkilled solutions, you run your Ansible script through a Docker image (that provides Ansible, of course) and you use a VAULT to encrypt your private data. Check Full CI/CD pipeline in action. \u00a9 Takima 2025","title":"TP part 03 - Ansible"},{"location":"ch3-discover-ansible-tp/#discover-ansible","text":"Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information","title":"Discover Ansible"},{"location":"ch3-discover-ansible-tp/#goals","text":"Install and deploy your application automatically with ansible.","title":"Goals"},{"location":"ch3-discover-ansible-tp/#introduction","text":"","title":"Introduction"},{"location":"ch3-discover-ansible-tp/#inventories","text":"By default, Ansible's inventory is saved in the location /etc/ansible/hosts where you already defined your server. The headings between brackets (eg: [webservers]) are used to group sets of hosts together, they are called, surprisingly, groups. You could regroup them by roles like database servers, front-ends, reverse proxies, build servers\u2026 Let\u2019s create a project specific inventory, in your project create an ansible directory, then create a new directory called inventories and in this folder a new file ( my-project/ansible/inventories/setup.yml ): all : vars : ansible_user : admin ansible_ssh_private_key_file : /path/to/private/key children : prod : hosts : hostname or IP Test your inventory with the ping command: ansible all -i inventories/setup.yml -m ping","title":"Inventories"},{"location":"ch3-discover-ansible-tp/#facts","text":"Let\u2019s get information about hosts: these kinds of variables, not set by the user but discovered are called facts . Facts are prefixed by ansible_ and represent information derived from speaking with your remote systems. You will request your server to get your OS distribution, thanks to the setup module. ansible all -i inventories/setup.yml -m setup -a \"filter=ansible_distribution*\" Earlier you installed Apache2 server on your machine, let\u2019s remove it: ansible all -i inventories/setup.yml -m apt -a \"name=apache2 state=absent\" --become With ansible, you just describe the state of your server and let ansible automatically update it for you. If you run this command another time you won\u2019t have the same output as apache2 would have been removed. Question 3-1 Document your inventory and base commands","title":"Facts"},{"location":"ch3-discover-ansible-tp/#playbooks","text":"","title":"Playbooks"},{"location":"ch3-discover-ansible-tp/#first-playbook","text":"Let\u2019s create a first very simple playbook in my-project/ansible/playbook.yml : - hosts : all gather_facts : false become : true tasks : - name : Test connection ping : Just execute your playbook: ansible-playbook -i inventories/setup.yml playbook.yml You can check your playbooks before playing them using the option: --syntax-check","title":"First playbook"},{"location":"ch3-discover-ansible-tp/#advanced-playbook","text":"Let\u2019s create a playbook to install docker on your server, follow the documentation and create the corresponding tasks: https://docs.docker.com/engine/install/debian/#install-using-the-repository . - hosts : all gather_facts : true become : true tasks : # Install prerequisites for Docker - name : Install required packages apt : name : - apt-transport-https - ca-certificates - curl - gnupg - lsb-release - python3-venv state : latest update_cache : yes # Add Docker\u2019s official GPG key - name : Add Docker GPG key apt_key : url : https://download.docker.com/linux/debian/gpg state : present # Set up the Docker stable repository - name : Add Docker APT repository apt_repository : repo : \"deb [arch=amd64] https://download.docker.com/linux/debian {{ ansible_facts['distribution_release'] }} stable\" state : present update_cache : yes # Install Docker - name : Install Docker apt : name : docker-ce state : present # Install Python3 and pip3 - name : Install Python3 and pip3 apt : name : - python3 - python3-pip state : present # Create a virtual environment for Python packages - name : Create a virtual environment for Docker SDK command : python3 -m venv /opt/docker_venv args : creates : /opt/docker_venv # Only runs if this directory doesn\u2019t exist # Install Docker SDK for Python in the virtual environment - name : Install Docker SDK for Python in virtual environment command : /opt/docker_venv/bin/pip install docker # Ensure Docker is running - name : Make sure Docker is running service : name : docker state : started tags : docker Good news, we now have docker installed on our server. One task was created to be sure docker was running, you could check this with an ad-hoc command or by connecting to the server until you really trust ansible. Check Docker installed on remote server","title":"Advanced Playbook"},{"location":"ch3-discover-ansible-tp/#using-roles","text":"Our docker install playbook is nice and all but it will be cleaner to have in a specific place, in a role for example. Create a docker role and move the installation task there: ansible-galaxy init roles/docker Call the docker role from your playbook to check your refactor and your installation. Initialized role has a couple of directories, keep only the one you will need: tasks - contains the main list of tasks to be executed by the role. handlers - contains handlers, which may be used by this role or outside. Question 3-2 Document your playbook","title":"Using roles"},{"location":"ch3-discover-ansible-tp/#deploy-your-app","text":"Time has come to deploy your application to your Ansible managed server. Create specific roles for each part of your application and use the Ansible module: docker_container to start your dockerized application. Here is what a docker_container task should look like: - name : Run HTTPD docker_container : name : httpd image : your image name from DockerHub You must have at least this roles : install docker create network launch database launch app launch proxy Note You will need to add env variables on app and database tasks. Ansible is able to modify the variables either in the .env for the db or in the application.yml for the app. Don\u2019t forget to use existing module for example to create the network Don't forget to use the right python interpreter when creating the docker network (refer to ansible_python_interpreter variable usage) Link docker_container module documentation docker_network module documentation Check You should be able to access your API on your server. Question 3-3 Document your docker_container tasks configuration.","title":"Deploy your App"},{"location":"ch3-discover-ansible-tp/#front","text":"If you have reached the end of each TP, you are able to access your api through your server. Your database, api and httpd must be up on your server and deployed with your Github Actions. Everything under the hood of docker-compose. Usually when we have an API we also have something called a front part to display our information. That's your bonus part to do, you can find the code of the front ready . You have to customize your httpd server to make the redirection correct between the API and the front. The httpd server is a proxy within your system. Check Front working","title":"Front"},{"location":"ch3-discover-ansible-tp/#continuous-deployment","text":"Note Do this part in a separate workflow. Configure Github action to automatically deploy your application when you release it on the production branch of your github repository. It is a little bit overkilled to launch an Ansible job for deploying on one unique server. Therefore you ssh to your machine with your encrypted private key and only relaunch your http api backend application. You like challenges and overkilled solutions, you run your Ansible script through a Docker image (that provides Ansible, of course) and you use a VAULT to encrypt your private data. Check Full CI/CD pipeline in action. \u00a9 Takima 2025","title":"Continuous Deployment"},{"location":"ch4-extras-tp/","text":"Go further Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information Goals Setup the extra infrastructure tools and become the devops master. Load balancing Redundancy In this section our goal will be to have a redundant backend. Instead of having a single backend application let\u2019s have 2 (Why not more? Do not forget that your server does not have unlimited resources). Setup 2 backends instances: backend-1/backend-2 or backend-blue/backend green (my personal favorite). Actual load balancing Set up your reverse proxy to do some actual load-balancing between your 2 backends using: Mod proxy balancer . Start by setting up your load balancing on your dev environment, once you are satisfied with the behavior deploy it to your production. Tip Ask yourself: why can we that easily load balance between our backends? Heard of sticky sessions or stateless apps? Check Checkpoint: do you loadbalance? Grafana Setup Grafana with Ansible to monitor your instance: Ansible Grafana Useful links: Grafana installation Grafana repository Check Checkpoint: do you Grafana? \u00a9 Takima 2025","title":"TP Extras"},{"location":"ch4-extras-tp/#go-further","text":"Check Checkpoint: call us to check your results (don\u2019t stay blocked on a checkpoint if we are busy, we can check \u2154 checkpoints at the same time) Question Point to document/report Tip Interesting information","title":"Go further"},{"location":"ch4-extras-tp/#goals","text":"Setup the extra infrastructure tools and become the devops master.","title":"Goals"},{"location":"ch4-extras-tp/#load-balancing","text":"","title":"Load balancing"},{"location":"ch4-extras-tp/#redundancy","text":"In this section our goal will be to have a redundant backend. Instead of having a single backend application let\u2019s have 2 (Why not more? Do not forget that your server does not have unlimited resources). Setup 2 backends instances: backend-1/backend-2 or backend-blue/backend green (my personal favorite).","title":"Redundancy"},{"location":"ch4-extras-tp/#actual-load-balancing","text":"Set up your reverse proxy to do some actual load-balancing between your 2 backends using: Mod proxy balancer . Start by setting up your load balancing on your dev environment, once you are satisfied with the behavior deploy it to your production. Tip Ask yourself: why can we that easily load balance between our backends? Heard of sticky sessions or stateless apps? Check Checkpoint: do you loadbalance?","title":"Actual load balancing"},{"location":"ch4-extras-tp/#grafana","text":"Setup Grafana with Ansible to monitor your instance: Ansible Grafana Useful links: Grafana installation Grafana repository Check Checkpoint: do you Grafana? \u00a9 Takima 2025","title":"Grafana"},{"location":"cheatsheet/","text":"Cheatsheet Docker & docker-compose","title":"Cheatsheet"},{"location":"cheatsheet/#cheatsheet","text":"","title":"Cheatsheet"},{"location":"cheatsheet/#docker-docker-compose","text":"","title":"Docker & docker-compose"}]} \ No newline at end of file diff --git a/public/sitemap.xml b/public/sitemap.xml index 34b4b86166fa85f56ca99793f7f965f771e94a73..01bc4e7f410dcc0ac38e41746763303f31559143 100644 --- a/public/sitemap.xml +++ b/public/sitemap.xml @@ -30,4 +30,19 @@ <lastmod>2025-04-30</lastmod> <changefreq>daily</changefreq> </url> + <url> + <loc>None</loc> + <lastmod>2025-04-30</lastmod> + <changefreq>daily</changefreq> + </url> + <url> + <loc>None</loc> + <lastmod>2025-04-30</lastmod> + <changefreq>daily</changefreq> + </url> + <url> + <loc>None</loc> + <lastmod>2025-04-30</lastmod> + <changefreq>daily</changefreq> + </url> </urlset> \ No newline at end of file diff --git a/public/sitemap.xml.gz b/public/sitemap.xml.gz index 214b1330c6d8c6831f4ae2dbc08f16e93915a269..0b5476618726950fcff03339086f37904d7b434f 100644 Binary files a/public/sitemap.xml.gz and b/public/sitemap.xml.gz differ