src/examples/atomisation.c

    Atomisation of a pulsed liquid jet

    A dense cylindrical liquid jet is injected into a stagnant lighter phase (density ratio 1/27.84). The inflow velocity is modulated sinusoidally to promote the growth of primary shear instabilities. Surface tension is included and ultimately controls the characteristic scale of the smallest droplets.

    We solve the two-phase Navier–Stokes equations with surface tension. We need the tag() function to count the number of droplets. We generate animations online using Basilisk View.

    #include "navier-stokes/centered.h"
    #include "two-phase.h"
    #include "tension.h"
    #include "tag.h"
    #include "view.h"

    We define the radius of the jet, the initial jet length, the Reynolds number and the surface tension coefficient.

    double radius = 1./12.;
    double length = 0.025;
    double Re = 5800;
    double SIGMA = 3e-5;

    The default maximum level of refinement is 10 and the error threshold on velocity is 0.1.

    int maxlevel = 10;
    double uemax = 0.1;

    To impose boundary conditions on a disk we use an auxilliary volume fraction field f0 which is one inside the cylinder and zero outside. We then set an oscillating inflow velocity on the left-hand-side and free outflow on the right-hand-side.

    double u0 = 1., au = 0.05, T0 = 0.1;
    scalar f0[];
    u.n[left]  = dirichlet(f0[]*(u0 + au*sin (2.*pi*t/T0)));
    u.t[left]  = dirichlet(0);
    #if dimension > 2
    u.r[left]  = dirichlet(0);
    #endif
    p[left]    = neumann(0);
    f[left]    = f0[];
    
    u.n[right] = neumann(0);
    p[right]   = dirichlet(0);

    The program can take two optional command-line arguments: the maximum level and the error threshold on velocity.

    int main (int argc, char * argv[])
    {
      if (argc > 1)
        maxlevel = atoi (argv[1]);
      if (argc > 2)
        uemax = atof (argv[2]);

    The initial domain is discretised with 64^3 grid points. We set the origin and domain size.

      init_grid (64);
      origin (0, -1.5, -1.5);
      size (3. [1]);

    We set the density and viscosity of each phase as well as the surface tension coefficient and start the simulation.

      rho1 = 1. [0], rho2 = rho1/27.84;
      mu1 = 2.*u0*radius/Re*rho1, mu2 = 2.*u0*radius/Re*rho2;
      f.sigma = SIGMA;
    
      run();
    }

    Initial conditions

    event init (t = 0) {
      if (!restore (file = "restart")) {

    We use a static refinement down to maxlevel in a cylinder 1.2 times longer than the initial jet and twice the radius.

        refine (x < 1.2*length && sq(y) + sq(z) < 2.*sq(radius) && level < maxlevel);

    We initialise the auxilliary volume fraction field for a cylinder of constant radius.

        fraction (f0, sq(radius) - sq(y) - sq(z));
        f0.refine = f0.prolongation = fraction_refine;
        restriction ({f0}); // for boundary conditions on levels

    We then use this to define the initial jet and its velocity.

        foreach() {
          f[] = f0[]*(x < length);
          u.x[] = u0*f[];
        }
      }
    }

    Outputs

    We log some statistics on the solver.

    event logfile (i++) {
      if (i == 0)
        fprintf (stderr,
    	     "t dt mgp.i mgpf.i mgu.i grid->tn perf.t perf.speed\n");
      fprintf (stderr, "%g %g %d %d %d %ld %g %g\n", 
    	   t, dt, mgp.i, mgpf.i, mgu.i,
    	   grid->tn, perf.t, perf.speed);
    }

    We generate an animation using Basilisk View.

    event movie (t += 1e-2)
    {
    #if dimension == 2
      scalar omega[];
      vorticity (u, omega);
      view (tx = -0.5);
      clear();
      draw_vof ("f");
      squares ("omega", linear = true, spread = 10);
      box ();
    #else // 3D
      scalar pid[];
      foreach()
        pid[] = fmod(pid()*(npe() + 37), npe());
      view (camera = "iso",
    	fov = 14.5, tx = -0.418, ty = 0.288,
    	width = 1600, height = 1200);
      clear();
      draw_vof ("f");
    #endif // 3D
      save ("movie.mp4");
    }

    We save snapshots of the simulation at regular intervals to restart or to post-process with bview.

    event snapshot (t = 0.1; t += 0.1; t <= 3.8) {
      char name[80];
      sprintf (name, "snapshot-%g", t);
      scalar pid[];
      foreach()
        pid[] = fmod(pid()*(npe() + 37), npe());
      dump (name);
    }

    Counting droplets

    The number and sizes of droplets generated by the atomising jet is a useful statistics for atomisation problems. This is not a quantity which is trivial to compute. The tag() function is designed to solve this problem. Any connected region for which f[] > 1e-3 (i.e. a droplet) will be identified by a unique “tag” value between 0 and n-1.

    event droplets (t += 0.1)
    {
      scalar m[];
      foreach()
        m[] = f[] > 1e-3;
      int n = tag (m);

    Once each cell is tagged with a unique droplet index, we can easily compute the volume v and position b of each droplet. Note that we use foreach (serial) to avoid doing a parallel traversal when using OpenMP. This is because we don’t have reduction operations for the v and b arrays (yet).

      double v[n];
      coord b[n];
      for (int j = 0; j < n; j++)
        v[j] = b[j].x = b[j].y = b[j].z = 0.;
      foreach (serial)
        if (m[] > 0) {
          int j = m[] - 1;
          v[j] += dv()*f[];
          coord p = {x,y,z};
          foreach_dimension()
    	b[j].x += dv()*f[]*p.x;
        }

    When using MPI we need to perform a global reduction to get the volumes and positions of droplets which span multiple processes.

    #if _MPI
      MPI_Allreduce (MPI_IN_PLACE, v, n, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
      MPI_Allreduce (MPI_IN_PLACE, b, 3*n, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
    #endif

    Finally we output the volume and position of each droplet to standard output.

      for (int j = 0; j < n; j++)
        fprintf (fout, "%d %g %d %g %g %g\n", i, t,
    	     j, v[j], b[j].x/v[j], b[j].y/v[j]);
      fflush (fout);
    }

    Mesh adaptation

    We adapt the mesh according to the error on the volume fraction field and the velocity.

    event adapt (i++) {
      adapt_wavelet ({f,u}, (double[]){0.01,uemax,uemax,uemax}, maxlevel);
    }

    Running in parallel

    On Occigen

    To run in 3D on occigen, we can do on the local machine

    local% qcc -source -grid=octree -D_MPI=1 atomisation.c
    local% scp _atomisation.c occigen.cines.fr: 

    and on occigen (to run on 64*24 = 1536 cores, with 12 levels of refinement)

    occigen% sbatch --nodes=64 --time=5:00:00 run.sh

    with the following run.sh script

    #!/bin/bash
    #SBATCH -J basilisk
    #SBATCH --nodes=1
    #SBATCH --constraint=HSW24
    #SBATCH --ntasks-per-node=24
    #SBATCH --threads-per-core=1
    #SBATCH --time=10:00
    #SBATCH --output basilisk.output
    #SBATCH --exclusive
    
    LEVEL=12
    
    module purge
    module load openmpi
    module load intel
    
    NAME=atomisation
    mpicc -Wall -std=c99 -D_XOPEN_SOURCE=700 -O2 _$NAME.c -o $NAME \
          -L$HOME/gl -lglutils -lfb_tiny -lm
    srun --mpi=pmi2 -K1 --resv-ports -n $SLURM_NTASKS ./$NAME $LEVEL \
         2> log-$LEVEL-$SLURM_NTASKS > out-$LEVEL-$SLURM_NTASKS

    Note that this assumes that the gl libraries have been installed in $HOME/gl.

    On Mesu

    To run in 3D on mesu, we can do on the local machine

    local% qcc -source -grid=octree -D_MPI=1 atomisation.c
    local% scp _atomisation.c mesu.dsi.upmc.fr: 

    and on mesu (to run on 672 cores, with 12 levels of refinement)

    mesu% qstat -u popinet
    mesu% qsub run.sh

    with the following run.sh script

    #!/bin/bash 
    #PBS -l select=28:ncpus=24:mpiprocs=24
    #PBS -l walltime=12:00:00
    #PBS -N atomisation
    #PBS -j oe  
    # load modules 
    module load mpt
    mpicc -Wall -O2 -std=c99 -D_XOPEN_SOURCE=700 _atomisation.c -o atomisation \
            -L$HOME/gl -lglutils -lfb_tiny -lm
    # change to the directory where program job_script_file is located 
    cd $PBS_O_WORKDIR 
    # mpirun -np 672 !!!! does not work !!!!
    mpiexec_mpt -n 672 ./atomisation 12 2>> log >> out

    Note that this assumes that the gl libraries have been installed in $HOME/gl.

    Results

    Atomisation of a pulsed liquid jet. 40963 equivalent resolution.

    The gain in number of grid points of the adaptive simulation (relative to a constant 40963 Cartesian grid discretisation) is illustrated below as well as the computational speed (in points.timesteps/sec). The simulation on Mesu is restarted at t=2.6 (after 12 hours of runtime).

    set xlabel 'Time'
    set logscale y
    set key center right
    plot 'atomisation.occigen' u 1:8 w l t 'speed (occigen 1536 cores)', \
         'atomisation.mesu' u 1:8 w l t 'speed (mesu 672 cores)', \
         'atomisation.mesu' u 1:(4096.**3./$6) w l t 'compression ratio'
    Compression ratio and computational speed (script)

    Compression ratio and computational speed (script)

    reset
    set xlabel 'log10(Volume)'
    set ylabel 'Number of droplets'
    binstart = -100
    binwidth = 0.2
    set boxwidth binwidth
    set style fill solid 0.5
    Delta = 3./2**12 # minimum cell size
    minvol = log10(Delta**3) # minimum cell volume
    set arrow from minvol, graph 0 to minvol, graph 1 nohead
    set label "Minimum cell volume" at minvol - 0.2, graph 0.35 rotate by 90
    t = "3.7"
    plot "< awk '{if ($2==".t.") print $0;}' atomisation.out" u \
         (binwidth*(floor((log10($4)-binstart)/binwidth)+0.5)+binstart):(1.0) \
         smooth freq w boxes t ''
    Histogram of droplet volumes at t=3.7 (script)

    Histogram of droplet volumes at t=3.7 (script)

    See also