From aca05a961405ddae330c7fbb01e8817e3d51e0d0 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 20 Aug 2019 18:51:54 +0100 Subject: [PATCH 001/428] Finite difference implementation of LaplaceXY The existing implementation of LaplaceXY uses a finite-volume discretisation. This commit introduces an implementation using the same finite-difference discretisation as the 'Laplace(Field2D)' and 'Grad(Field2D)*Grad(Field2D)' operators would use. Also, when using the finite-difference implementation, values for the boundary conditions are passed in the same way as for the 'Laplacian' solvers, i.e. as the value in the boundary cell, instead of the mean of the boundary and neighbouring grid cells. --- include/bout/invert/laplacexy.hxx | 3 + src/invert/laplacexy/laplacexy.cxx | 521 ++++++++++++++++++++--------- 2 files changed, 367 insertions(+), 157 deletions(-) diff --git a/include/bout/invert/laplacexy.hxx b/include/bout/invert/laplacexy.hxx index 705ad76351..96f3cfbd6d 100644 --- a/include/bout/invert/laplacexy.hxx +++ b/include/bout/invert/laplacexy.hxx @@ -124,6 +124,9 @@ private: Matrix acoef, bcoef, ccoef, xvals, bvals; std::unique_ptr> cr; ///< Tridiagonal solver + // Use finite volume or finite difference discretization + bool finite_volume{true}; + // Y derivatives bool include_y_derivs; // Include Y derivative terms? diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index 40af4949fc..bd1650d850 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -222,6 +223,9 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) // Configure Linear Solver + finite_volume = (*opt)["finite_volume"].doc( + "Use finite volume rather than finite difference discritisation." + ).withDefault(true); bool direct = (*opt)["direct"].doc("Use a direct LU solver").withDefault(false); if(direct) { @@ -314,98 +318,198 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { Coordinates *coords = localmesh->getCoordinates(location); - ////////////////////////////////////////////////// - // Set Matrix elements - // - // (1/J) d/dx ( J * g11 d/dx ) + (1/J) d/dy ( J * g22 d/dy ) - - for(int x=localmesh->xstart; x <= localmesh->xend; x++) { - for(int y=localmesh->ystart;y<=localmesh->yend;y++) { - // stencil entries - PetscScalar c, xm, xp, ym, yp; - - // XX component - - // Metrics on x+1/2 boundary - BoutReal J = 0.5*(coords->J(x,y) + coords->J(x+1,y)); - BoutReal g11 = 0.5*(coords->g11(x,y) + coords->g11(x+1,y)); - BoutReal dx = 0.5*(coords->dx(x,y) + coords->dx(x+1,y)); - BoutReal Acoef = 0.5*(A(x,y) + A(x+1,y)); - - BoutReal val = Acoef * J * g11 / (coords->J(x,y) * dx * coords->dx(x,y)); - xp = val; - c = -val; - - // Metrics on x-1/2 boundary - J = 0.5*(coords->J(x,y) + coords->J(x-1,y)); - g11 = 0.5*(coords->g11(x,y) + coords->g11(x-1,y)); - dx = 0.5*(coords->dx(x,y) + coords->dx(x-1,y)); - Acoef = 0.5*(A(x,y) + A(x-1,y)); - - val = Acoef * J * g11 / (coords->J(x,y) * dx * coords->dx(x,y)); - xm = val; - c -= val; + if (finite_volume) { + ////////////////////////////////////////////////// + // Set Matrix elements + // + // (1/J) d/dx ( J * g11 d/dx ) + (1/J) d/dy ( J * g22 d/dy ) - c += B(x,y); - - // Put values into the preconditioner, X derivatives only - acoef(y - localmesh->ystart, x - xstart) = xm; - bcoef(y - localmesh->ystart, x - xstart) = c; - ccoef(y - localmesh->ystart, x - xstart) = xp; - - if( include_y_derivs ) { - // YY component - // Metrics at y+1/2 - J = 0.5*(coords->J(x,y) + coords->J(x,y+1)); - BoutReal g_22 = 0.5*(coords->g_22(x,y) + coords->g_22(x,y+1)); - BoutReal g23 = 0.5*(coords->g23(x,y) + coords->g23(x,y+1)); - BoutReal g_23 = 0.5*(coords->g_23(x,y) + coords->g_23(x,y+1)); - BoutReal dy = 0.5*(coords->dy(x,y) + coords->dy(x,y+1)); - Acoef = 0.5*(A(x,y+1) + A(x,y)); - - val = -Acoef * J * g23 * g_23 / (g_22 * coords->J(x,y) * dy * coords->dy(x,y)); - yp = val; - c -= val; - - // Metrics at y-1/2 - J = 0.5*(coords->J(x,y) + coords->J(x,y-1)); - g_22 = 0.5*(coords->g_22(x,y) + coords->g_22(x,y-1)); - g23 = 0.5*(coords->g23(x,y) + coords->g23(x,y-1)); - g_23 = 0.5*(coords->g_23(x,y) + coords->g_23(x,y-1)); - dy = 0.5*(coords->dy(x,y) + coords->dy(x,y-1)); - Acoef = 0.5*(A(x,y-1) + A(x,y)); - - val = -Acoef * J * g23 * g_23 / (g_22 * coords->J(x,y) * dy * coords->dy(x,y)); - ym = val; - c -= val; + for(int x=localmesh->xstart; x <= localmesh->xend; x++) { + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + // stencil entries + PetscScalar c, xm, xp, ym, yp; + + // XX component + + // Metrics on x+1/2 boundary + BoutReal J = 0.5*(coords->J(x,y) + coords->J(x+1,y)); + BoutReal g11 = 0.5*(coords->g11(x,y) + coords->g11(x+1,y)); + BoutReal dx = 0.5*(coords->dx(x,y) + coords->dx(x+1,y)); + BoutReal Acoef = 0.5*(A(x,y) + A(x+1,y)); + + BoutReal val = Acoef * J * g11 / (coords->J(x,y) * dx * coords->dx(x,y)); + xp = val; + c = -val; + + // Metrics on x-1/2 boundary + J = 0.5*(coords->J(x,y) + coords->J(x-1,y)); + g11 = 0.5*(coords->g11(x,y) + coords->g11(x-1,y)); + dx = 0.5*(coords->dx(x,y) + coords->dx(x-1,y)); + Acoef = 0.5*(A(x,y) + A(x-1,y)); + + val = Acoef * J * g11 / (coords->J(x,y) * dx * coords->dx(x,y)); + xm = val; + c -= val; + + c += B(x,y); + + // Put values into the preconditioner, X derivatives only + acoef(y - localmesh->ystart, x - xstart) = xm; + bcoef(y - localmesh->ystart, x - xstart) = c; + ccoef(y - localmesh->ystart, x - xstart) = xp; + + if( include_y_derivs ) { + // YY component + // Metrics at y+1/2 + J = 0.5*(coords->J(x,y) + coords->J(x,y+1)); + BoutReal g_22 = 0.5*(coords->g_22(x,y) + coords->g_22(x,y+1)); + BoutReal g23 = 0.5*(coords->g23(x,y) + coords->g23(x,y+1)); + BoutReal g_23 = 0.5*(coords->g_23(x,y) + coords->g_23(x,y+1)); + BoutReal dy = 0.5*(coords->dy(x,y) + coords->dy(x,y+1)); + Acoef = 0.5*(A(x,y+1) + A(x,y)); + + val = -Acoef * J * g23 * g_23 / (g_22 * coords->J(x,y) * dy * coords->dy(x,y)); + yp = val; + c -= val; + + // Metrics at y-1/2 + J = 0.5*(coords->J(x,y) + coords->J(x,y-1)); + g_22 = 0.5*(coords->g_22(x,y) + coords->g_22(x,y-1)); + g23 = 0.5*(coords->g23(x,y) + coords->g23(x,y-1)); + g_23 = 0.5*(coords->g_23(x,y) + coords->g_23(x,y-1)); + dy = 0.5*(coords->dy(x,y) + coords->dy(x,y-1)); + Acoef = 0.5*(A(x,y-1) + A(x,y)); + + val = -Acoef * J * g23 * g_23 / (g_22 * coords->J(x,y) * dy * coords->dy(x,y)); + ym = val; + c -= val; + } + + ///////////////////////////////////////////////// + // Now have a 5-point stencil for the Laplacian + + int row = globalIndex(x,y); + + // Set the centre (diagonal) + MatSetValues(MatA,1,&row,1,&row,&c,INSERT_VALUES); + + // X + 1 + int col = globalIndex(x+1, y); + MatSetValues(MatA,1,&row,1,&col,&xp,INSERT_VALUES); + + // X - 1 + col = globalIndex(x-1, y); + MatSetValues(MatA,1,&row,1,&col,&xm,INSERT_VALUES); + + if( include_y_derivs ) { + // Y + 1 + col = globalIndex(x, y+1); + MatSetValues(MatA,1,&row,1,&col,&yp,INSERT_VALUES); + + // Y - 1 + col = globalIndex(x, y-1); + MatSetValues(MatA,1,&row,1,&col,&ym,INSERT_VALUES); + } } - - ///////////////////////////////////////////////// - // Now have a 5-point stencil for the Laplacian - - int row = globalIndex(x,y); - - // Set the centre (diagonal) - MatSetValues(MatA,1,&row,1,&row,&c,INSERT_VALUES); - - // X + 1 - int col = globalIndex(x+1, y); - MatSetValues(MatA,1,&row,1,&col,&xp,INSERT_VALUES); - - // X - 1 - col = globalIndex(x-1, y); - MatSetValues(MatA,1,&row,1,&col,&xm,INSERT_VALUES); - - if( include_y_derivs ) { - // Y + 1 - col = globalIndex(x, y+1); - MatSetValues(MatA,1,&row,1,&col,&yp,INSERT_VALUES); - - // Y - 1 - col = globalIndex(x, y-1); - MatSetValues(MatA,1,&row,1,&col,&ym,INSERT_VALUES); + } + } else { + ////////////////////////////////////////////////// + // Set Matrix elements + // + // Div(A Grad(f)) + B f + // = A Laplace_perp(f) + Grad_perp(A).Grad_perp(f) + B f + // = A*(G1*dfdx + (G2-1/J*d/dy(J/g_22))*dfdy + // + g11*d2fdx2 + (g22-1/g_22)*d2fdy2 + 2*g12*d2fdxdy) + // + g11*dAdx*dfdx + (g22-1/g_22)*dAdy*dfdy + g12*(dAdx*dfdy + dAdy*dfdx) + // + B*f + + Field2D coef_dfdy = coords->G2 - DDY(coords->J/coords->g_22)/coords->J; + + for(int x=localmesh->xstart; x <= localmesh->xend; x++) { + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + // stencil entries + PetscScalar c, xm, xp, ym, yp; + + BoutReal dx = coords->dx(x,y); + + // A*G1*dfdx + BoutReal val = A(x,y)*coords->G1(x,y)/(2.*dx); + xp = val; + xm = -val; + + // A*g11*d2fdx2 + val = A(x,y)*coords->g11(x,y)/SQ(dx); + xp += val; + c = -2.*val; + xm += val; + + // g11*dAdx*dfdx + val = coords->g11(x, y)*(A(x+1, y) - A(x-1, y))/(4.*SQ(dx)); + xp += val; + xm -= val; + + // B*f + c += B(x,y); + + // Put values into the preconditioner, X derivatives only + acoef(y - localmesh->ystart, x - xstart) = xm; + bcoef(y - localmesh->ystart, x - xstart) = c; + ccoef(y - localmesh->ystart, x - xstart) = xp; + + if(include_y_derivs) { + BoutReal dy = coords->dy(x,y); + + // A*(G2-1/J*d/dy(J/g_22))*dfdy + val = A(x, y)*coef_dfdy(x, y)/(2.*dy); + yp = val; + ym = -val; + + // A*(g22-1/g_22)*d2fdy2 + val = A(x, y)*(coords->g22(x, y) - 1./coords->g_22(x,y))/SQ(dy); + yp += val; + c -= 2.*val; + ym += val; + + // 2*g12*d2dfdxdy + // This term would turn the 5-point stencil into a 9-point stencil, and the corner + // points are not well defined adjacent to an X-point (corner guard cells). So for + // now assume an orthogonal grid where g12=0 + + // g22*dAdy*dfdy + val = (coords->g22(x, y) - 1./coords->g_22(x,y))*dAdy/(2.*dy); + yp += val; + ym -= val; + + // g12*(dAdx*dfdy + dAdy*dfdx) + // Assume orthogonal grid with g12=0 again + } + + ///////////////////////////////////////////////// + // Now have a 5-point stencil for the Laplacian + + int row = globalIndex(x,y); + + // Set the centre (diagonal) + MatSetValues(MatA,1,&row,1,&row,&c,INSERT_VALUES); + + // X + 1 + int col = globalIndex(x+1, y); + MatSetValues(MatA,1,&row,1,&col,&xp,INSERT_VALUES); + + // X - 1 + col = globalIndex(x-1, y); + MatSetValues(MatA,1,&row,1,&col,&xm,INSERT_VALUES); + + if( include_y_derivs ) { + // Y + 1 + col = globalIndex(x, y+1); + MatSetValues(MatA,1,&row,1,&col,&yp,INSERT_VALUES); + + // Y - 1 + col = globalIndex(x, y-1); + MatSetValues(MatA,1,&row,1,&col,&ym,INSERT_VALUES); + } } - } } @@ -543,7 +647,7 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { ASSERT1(x0.getLocation() == location); // Load initial guess x0 into xs and rhs into bs - + for(int x=localmesh->xstart;x<= localmesh->xend;x++) { for(int y=localmesh->ystart;y<=localmesh->yend;y++) { int ind = globalIndex(x,y); @@ -556,84 +660,187 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { } } - if(localmesh->firstX()) { - if(x_inner_dirichlet) { + if (finite_volume) { + // Use original LaplaceXY implementation of passing boundary values for backward + // compatibility + if(localmesh->firstX()) { + if(x_inner_dirichlet) { + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + int ind = globalIndex(localmesh->xstart-1,y); + + PetscScalar val = x0(localmesh->xstart-1,y); + VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); + + val = 0.5*(x0(localmesh->xstart-1,y) + x0(localmesh->xstart,y)); + VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); + } + }else { + // Inner X boundary (Neumann) + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + int ind = globalIndex(localmesh->xstart-1,y); + + PetscScalar val = x0(localmesh->xstart-1,y); + VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); + + val = 0.0; //x0(localmesh->xstart-1,y) - x0(localmesh->xstart,y); + VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); + } + } + } + + // Outer X boundary (Dirichlet) + if(localmesh->lastX()) { for(int y=localmesh->ystart;y<=localmesh->yend;y++) { - int ind = globalIndex(localmesh->xstart-1,y); - - PetscScalar val = x0(localmesh->xstart-1,y); + int ind = globalIndex(localmesh->xend+1,y); + + PetscScalar val = x0(localmesh->xend+1,y); VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); - - val = 0.5*(x0(localmesh->xstart-1,y) + x0(localmesh->xstart,y)); + + val = 0.5*(x0(localmesh->xend,y) + x0(localmesh->xend+1,y)); VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); } - }else { - // Inner X boundary (Neumann) - for(int y=localmesh->ystart;y<=localmesh->yend;y++) { - int ind = globalIndex(localmesh->xstart-1,y); - - PetscScalar val = x0(localmesh->xstart-1,y); + } + + if(y_bndry_dirichlet) { + for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { + int ind = globalIndex(it.ind, localmesh->ystart-1); + + PetscScalar val = x0(it.ind,localmesh->ystart-1); VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); - - val = 0.0; //x0(localmesh->xstart-1,y) - x0(localmesh->xstart,y); + + val = 0.5*(x0(it.ind, localmesh->ystart-1) + x0(it.ind, localmesh->ystart)); VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); } - } - } - - // Outer X boundary (Dirichlet) - if(localmesh->lastX()) { - for(int y=localmesh->ystart;y<=localmesh->yend;y++) { - int ind = globalIndex(localmesh->xend+1,y); - - PetscScalar val = x0(localmesh->xend+1,y); - VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); - - val = 0.5*(x0(localmesh->xend,y) + x0(localmesh->xend+1,y)); - VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); - } - } - if(y_bndry_dirichlet) { - for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { - int ind = globalIndex(it.ind, localmesh->ystart-1); - - PetscScalar val = x0(it.ind,localmesh->ystart-1); - VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); - - val = 0.5*(x0(it.ind, localmesh->ystart-1) + x0(it.ind, localmesh->ystart)); - VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); - } - - for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { - int ind = globalIndex(it.ind, localmesh->yend+1); - - PetscScalar val = x0(it.ind,localmesh->yend+1); - VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); - - val = 0.5*(x0(it.ind, localmesh->yend+1) + x0(it.ind, localmesh->yend)); - VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); + for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { + int ind = globalIndex(it.ind, localmesh->yend+1); + + PetscScalar val = x0(it.ind,localmesh->yend+1); + VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); + + val = 0.5*(x0(it.ind, localmesh->yend+1) + x0(it.ind, localmesh->yend)); + VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); + } + } else { + // Y boundaries Neumann + for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { + int ind = globalIndex(it.ind, localmesh->ystart-1); + + PetscScalar val = x0(it.ind,localmesh->ystart-1); + VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); + + val = 0.0; + VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); + } + + for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { + int ind = globalIndex(it.ind, localmesh->yend+1); + + PetscScalar val = x0(it.ind,localmesh->yend+1); + VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); + + val = 0.0; + VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); + } } } else { - // Y boundaries Neumann - for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { - int ind = globalIndex(it.ind, localmesh->ystart-1); - - PetscScalar val = x0(it.ind,localmesh->ystart-1); - VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); - - val = 0.0; - VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); + // For finite-difference implementation pass boundary values in the same way as for + // the 'Laplacian' solvers - the value to use (for Dirichlet boundary conditions) on + // the boundary (which is half way between grid cell and boundary cell) is passed as + // the value in the first boundary cell. + if (localmesh->firstX()) { + if (x_inner_dirichlet) { + for (int y=localmesh->ystart; y<=localmesh->yend; y++) { + int ind = globalIndex(localmesh->xstart-1, y); + + // Use the value that would be set by applying the boundary condition to the + // initial guess + PetscScalar val = 2.*x0(localmesh->xstart-1, y) - x0(localmesh->xstart, y); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = x0(localmesh->xstart-1, y); + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + } else { + // Inner X boundary (Neumann) + for(int y=localmesh->ystart; y<=localmesh->yend; y++) { + int ind = globalIndex(localmesh->xstart-1, y); + + // Use the value that would be set by applying the boundary condition to the + // initial guess + PetscScalar val = x0(localmesh->xstart, y); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + } } - - for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { - int ind = globalIndex(it.ind, localmesh->yend+1); - - PetscScalar val = x0(it.ind,localmesh->yend+1); - VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); - - val = 0.0; - VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); + + // Outer X boundary (Dirichlet) + if (localmesh->lastX()) { + for (int y=localmesh->ystart; y<=localmesh->yend; y++) { + int ind = globalIndex(localmesh->xend+1, y); + + // Use the value that would be set by applying the boundary condition to the + // initial guess + PetscScalar val = 2.*x0(localmesh->xend+1, y) - x0(localmesh->xend, y); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = x0(localmesh->xend+1, y); + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + } + + if(y_bndry_dirichlet) { + for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { + int ind = globalIndex(it.ind, localmesh->ystart-1); + + // Use the value that would be set by applying the boundary condition to the + // initial guess + PetscScalar val = 2.*x0(it.ind,localmesh->ystart-1) - x0(it.ind,localmesh->ystart); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = x0(it.ind, localmesh->ystart-1); + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + + for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { + int ind = globalIndex(it.ind, localmesh->yend+1); + + // Use the value that would be set by applying the boundary condition to the + // initial guess + PetscScalar val = 2.*x0(it.ind,localmesh->yend+1) - x0(it.ind,localmesh->yend); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = x0(it.ind, localmesh->yend+1); + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + } else { + // Y boundaries Neumann + for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { + int ind = globalIndex(it.ind, localmesh->ystart-1); + + // Use the value that would be set by applying the boundary condition to the + // initial guess + PetscScalar val = x0(it.ind,localmesh->ystart); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + + for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { + int ind = globalIndex(it.ind, localmesh->yend+1); + + // Use the value that would be set by applying the boundary condition to the + // initial guess + PetscScalar val = x0(it.ind,localmesh->yend); + VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); + + val = 0.0; + VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); + } } } From ae4486db4dfd2ce51fe7757dcba1d442ddba8193 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 21 Aug 2019 16:29:37 +0100 Subject: [PATCH 002/428] Implement g12 terms Set up corner cells in LaplaceXY, to allow a 9-point stencil to be used. Implement the g12 terms in LaplaceXY, with mixed x-y derivatives. --- src/invert/laplacexy/laplacexy.cxx | 518 +++++++++++++++++++++++------ 1 file changed, 425 insertions(+), 93 deletions(-) diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index bd1650d850..eb5fbd6a9c 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -38,6 +38,10 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) opt = &(Options::root()["laplacexy"]); } + finite_volume = (*opt)["finite_volume"].doc( + "Use finite volume rather than finite difference discritisation." + ).withDefault(true); + // Get MPI communicator MPI_Comm comm = BoutComm::get(); @@ -67,9 +71,27 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { indexXY(it.ind, localmesh->ystart-1) = ind++; } + if ((not finite_volume) and localmesh->hasBndryLowerY()) { + // Corner boundary cells + if (localmesh->firstX()) { + indexXY(localmesh->xstart-1, localmesh->ystart-1) = ind++; + } + if (localmesh->lastX()) { + indexXY(localmesh->xend+1, localmesh->ystart-1) = ind++; + } + } for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { indexXY(it.ind, localmesh->yend+1) = ind++; } + if ((not finite_volume) and localmesh->hasBndryUpperY()) { + // Corner boundary cells + if (localmesh->firstX()) { + indexXY(localmesh->xstart-1, localmesh->yend+1) = ind++; + } + if (localmesh->lastX()) { + indexXY(localmesh->xend+1, localmesh->yend+1) = ind++; + } + } xstart = localmesh->xstart; if(localmesh->firstX()) @@ -106,96 +128,217 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) PetscMalloc( (localN)*sizeof(PetscInt), &d_nnz ); PetscMalloc( (localN)*sizeof(PetscInt), &o_nnz ); - for(int i=0;ifirstX()) { - // Lower X boundary - for(int y=localmesh->ystart;y<=localmesh->yend;y++) { - const int localIndex = globalIndex(localmesh->xstart - 1, y); - ASSERT1( (localIndex >= 0) && (localIndex < localN) ); - - d_nnz[localIndex] = 2; // Diagonal sub-matrix - o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + if (finite_volume) { + // This discretisation uses a 5-point stencil + for(int i=0;iystart;y<=localmesh->yend;y++) { - const int localIndex = globalIndex(localmesh->xstart, y); - ASSERT1( (localIndex >= 0) && (localIndex < localN) ); - d_nnz[localIndex] -= 1; - o_nnz[localIndex] += 1; + + // X boundaries + if(localmesh->firstX()) { + // Lower X boundary + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + const int localIndex = globalIndex(localmesh->xstart - 1, y); + ASSERT1( (localIndex >= 0) && (localIndex < localN) ); + + d_nnz[localIndex] = 2; // Diagonal sub-matrix + o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + } + }else { + // On another processor + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + const int localIndex = globalIndex(localmesh->xstart, y); + ASSERT1( (localIndex >= 0) && (localIndex < localN) ); + d_nnz[localIndex] -= 1; + o_nnz[localIndex] += 1; + } } - } - if(localmesh->lastX()) { - // Upper X boundary - for(int y=localmesh->ystart;y<=localmesh->yend;y++) { - const int localIndex = globalIndex(localmesh->xend + 1, y); - ASSERT1( (localIndex >= 0) && (localIndex < localN) ); - d_nnz[localIndex] = 2; // Diagonal sub-matrix - o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + if(localmesh->lastX()) { + // Upper X boundary + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + const int localIndex = globalIndex(localmesh->xend + 1, y); + ASSERT1( (localIndex >= 0) && (localIndex < localN) ); + d_nnz[localIndex] = 2; // Diagonal sub-matrix + o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + } + }else { + // On another processor + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + const int localIndex = globalIndex(localmesh->xend, y); + ASSERT1( (localIndex >= 0) && (localIndex < localN) ); + d_nnz[localIndex] -= 1; + o_nnz[localIndex] += 1; + } } - }else { - // On another processor - for(int y=localmesh->ystart;y<=localmesh->yend;y++) { - const int localIndex = globalIndex(localmesh->xend, y); - ASSERT1( (localIndex >= 0) && (localIndex < localN) ); - d_nnz[localIndex] -= 1; - o_nnz[localIndex] += 1; + // Y boundaries + + for(int x=localmesh->xstart; x <=localmesh->xend; x++) { + // Default to no boundary + // NOTE: This assumes that communications in Y are to other + // processors. If Y is communicated with this processor (e.g. NYPE=1) + // then this will result in PETSc warnings about out of range allocations + { + const int localIndex = globalIndex(x, localmesh->ystart); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + // d_nnz[localIndex] -= 1; // Note: Slightly inefficient + o_nnz[localIndex] += 1; + } + { + const int localIndex = globalIndex(x, localmesh->yend); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + // d_nnz[localIndex] -= 1; // Note: Slightly inefficient + o_nnz[localIndex] += 1; + } } - } - // Y boundaries - - for(int x=localmesh->xstart; x <=localmesh->xend; x++) { - // Default to no boundary - // NOTE: This assumes that communications in Y are to other - // processors. If Y is communicated with this processor (e.g. NYPE=1) - // then this will result in PETSc warnings about out of range allocations - { - const int localIndex = globalIndex(x, localmesh->ystart); - ASSERT1((localIndex >= 0) && (localIndex < localN)); - // d_nnz[localIndex] -= 1; // Note: Slightly inefficient - o_nnz[localIndex] += 1; + + for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { + { + const int localIndex = globalIndex(it.ind, localmesh->ystart - 1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 2; // Diagonal sub-matrix + o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + } + { + const int localIndex = globalIndex(it.ind, localmesh->ystart); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] += 1; + o_nnz[localIndex] -= 1; + } } - { - const int localIndex = globalIndex(x, localmesh->yend); - ASSERT1((localIndex >= 0) && (localIndex < localN)); - // d_nnz[localIndex] -= 1; // Note: Slightly inefficient - o_nnz[localIndex] += 1; + for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { + { + const int localIndex = globalIndex(it.ind, localmesh->yend + 1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 2; // Diagonal sub-matrix + o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + } + { + const int localIndex = globalIndex(it.ind, localmesh->yend); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] += 1; + o_nnz[localIndex] -= 1; + } } - } - - for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { - { - const int localIndex = globalIndex(it.ind, localmesh->ystart - 1); - ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = 2; // Diagonal sub-matrix - o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + } else { + // This discretisation uses a 9-point stencil + for(int i=0;ifirstX()) { + // Lower X boundary + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + const int localIndex = globalIndex(localmesh->xstart - 1, y); + ASSERT1( (localIndex >= 0) && (localIndex < localN) ); + + d_nnz[localIndex] = 2; // Diagonal sub-matrix + o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + } + }else { + // On another processor + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + const int localIndex = globalIndex(localmesh->xstart, y); + ASSERT1( (localIndex >= 0) && (localIndex < localN) ); + d_nnz[localIndex] -= 3; + o_nnz[localIndex] += 3; + } + } + if(localmesh->lastX()) { + // Upper X boundary + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + const int localIndex = globalIndex(localmesh->xend + 1, y); + ASSERT1( (localIndex >= 0) && (localIndex < localN) ); + d_nnz[localIndex] = 2; // Diagonal sub-matrix + o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + } + }else { + // On another processor + for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + const int localIndex = globalIndex(localmesh->xend, y); + ASSERT1( (localIndex >= 0) && (localIndex < localN) ); + d_nnz[localIndex] -= 3; + o_nnz[localIndex] += 3; + } } - { - const int localIndex = globalIndex(it.ind, localmesh->ystart); - ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] += 1; - o_nnz[localIndex] -= 1; + // Y boundaries + + for(int x=localmesh->xstart; x <=localmesh->xend; x++) { + // Default to no boundary + // NOTE: This assumes that communications in Y are to other + // processors. If Y is communicated with this processor (e.g. NYPE=1) + // then this will result in PETSc warnings about out of range allocations + { + const int localIndex = globalIndex(x, localmesh->ystart); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + // d_nnz[localIndex] -= 3; // Note: Slightly inefficient + o_nnz[localIndex] += 3; + } + { + const int localIndex = globalIndex(x, localmesh->yend); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + // d_nnz[localIndex] -= 3; // Note: Slightly inefficient + o_nnz[localIndex] += 3; + } } - } - for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { - { - const int localIndex = globalIndex(it.ind, localmesh->yend + 1); - ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = 2; // Diagonal sub-matrix - o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + + for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { + { + const int localIndex = globalIndex(it.ind, localmesh->ystart - 1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 2; // Diagonal sub-matrix + o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + } + { + const int localIndex = globalIndex(it.ind, localmesh->ystart); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + //d_nnz[localIndex] += 3; + o_nnz[localIndex] -= 3; + } } - { - const int localIndex = globalIndex(it.ind, localmesh->yend); - ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] += 1; - o_nnz[localIndex] -= 1; + if (localmesh->hasBndryLowerY()) { + if (localmesh->firstX()) { + const int localIndex = globalIndex(localmesh->xstart-1, localmesh->ystart-1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 2; + } + if (localmesh->lastX()) { + const int localIndex = globalIndex(localmesh->xend+1, localmesh->ystart-1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 2; + } + } + for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { + { + const int localIndex = globalIndex(it.ind, localmesh->yend + 1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 2; // Diagonal sub-matrix + o_nnz[localIndex] = 0; // Off-diagonal sub-matrix + } + { + const int localIndex = globalIndex(it.ind, localmesh->yend); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] += 1; + o_nnz[localIndex] -= 1; + } + } + if (localmesh->hasBndryUpperY()) { + if (localmesh->firstX()) { + const int localIndex = globalIndex(localmesh->xstart-1, localmesh->yend+1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 2; + } + if (localmesh->lastX()) { + const int localIndex = globalIndex(localmesh->xend+1, localmesh->yend+1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 2; + } } } // Pre-allocate @@ -215,6 +358,67 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) // Now communicate to fill guard cells localmesh->communicate(indexXY); + // Fill corner guard cells by getting the y-guard cells from the x-neighbours. + // This is an arbitrary choice, which makes a difference at the X-point, where taking + // the x-guard cells from the y-neighbours would give points in different regions. + auto xcomm = localmesh->getXcomm(); + int proc_xind = localmesh->getXProcIndex(); + int tag0 = localmesh->getYProcIndex()*localmesh->getNXPE(); + MPI_Request requests[] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL}; + MPI_Status statuses[2]; + + // Get lower, inner corner + if (not localmesh->firstX()) { + // Receive from inner + MPI_Irecv(&indexXY(localmesh->xstart-1, localmesh->ystart-1), 1, MPI_DOUBLE, + proc_xind-1, tag0 + 1*proc_xind, xcomm, &requests[0]); + } + if (not localmesh->lastX()) { + // Need to send lower outer point out + MPI_Isend(&indexXY(localmesh->xend, localmesh->ystart-1), 1, MPI_DOUBLE, proc_xind+1, + tag0 + 1*(proc_xind + 1), xcomm, &requests[1]); + } + MPI_Waitall(2, requests, statuses); + + // Get upper, inner corner + if (not localmesh->firstX()) { + // Receive from inner + MPI_Irecv(&indexXY(localmesh->xstart-1, localmesh->yend+1), 1, MPI_DOUBLE, + proc_xind-1, tag0 + 1*proc_xind, xcomm, &requests[0]); + } + if (not localmesh->lastX()) { + // Need to send upper outer point out + MPI_Isend(&indexXY(localmesh->xend, localmesh->yend+1), 1, MPI_DOUBLE, proc_xind+1, + tag0 + 1*(proc_xind + 1), xcomm, &requests[1]); + } + MPI_Waitall(2, requests, statuses); + + // Get lower, outer corner + if (not localmesh->lastX()) { + // Receive from outer + MPI_Irecv(&indexXY(localmesh->xend+1, localmesh->ystart-1), 1, MPI_DOUBLE, + proc_xind+1, tag0 + proc_xind, xcomm, &requests[0]); + } + if (not localmesh->firstX()) { + // Need to send lower inner point in + MPI_Isend(&indexXY(localmesh->xstart, localmesh->ystart-1), 1, MPI_DOUBLE, + proc_xind-1, tag0 + (proc_xind - 1), xcomm, &requests[1]); + } + MPI_Waitall(2, requests, statuses); + + // Get upper, outer corner + if (not localmesh->lastX()) { + // Receive from outer + MPI_Irecv(&indexXY(localmesh->xend+1, localmesh->yend+1), 1, MPI_DOUBLE, + proc_xind+1, tag0 + proc_xind, xcomm, &requests[0]); + } + if (not localmesh->firstX()) { + // Need to send upper inner point in + MPI_Isend(&indexXY(localmesh->xstart, localmesh->yend+1), 1, MPI_DOUBLE, + proc_xind-1, tag0 + (proc_xind - 1), xcomm, &requests[1]); + } + MPI_Waitall(2, requests, statuses); + ////////////////////////////////////////////////// // Set up KSP @@ -223,9 +427,6 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) // Configure Linear Solver - finite_volume = (*opt)["finite_volume"].doc( - "Use finite volume rather than finite difference discritisation." - ).withDefault(true); bool direct = (*opt)["direct"].doc("Use a direct LU solver").withDefault(false); if(direct) { @@ -425,10 +626,10 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { Field2D coef_dfdy = coords->G2 - DDY(coords->J/coords->g_22)/coords->J; - for(int x=localmesh->xstart; x <= localmesh->xend; x++) { - for(int y=localmesh->ystart;y<=localmesh->yend;y++) { + for(int x = localmesh->xstart; x <= localmesh->xend; x++) { + for(int y = localmesh->ystart; y <= localmesh->yend; y++) { // stencil entries - PetscScalar c, xm, xp, ym, yp; + PetscScalar c, xm, xp, ym, yp, xpyp, xpym, xmyp, xmym; BoutReal dx = coords->dx(x,y); @@ -458,6 +659,8 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { if(include_y_derivs) { BoutReal dy = coords->dy(x,y); + BoutReal dAdx = (A(x+1, y) - A(x-1, y))/(2.*dx); + BoutReal dAdy = (A(x, y+1) - A(x, y-1))/(2.*dy); // A*(G2-1/J*d/dy(J/g_22))*dfdy val = A(x, y)*coef_dfdy(x, y)/(2.*dy); @@ -470,10 +673,12 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { c -= 2.*val; ym += val; - // 2*g12*d2dfdxdy - // This term would turn the 5-point stencil into a 9-point stencil, and the corner - // points are not well defined adjacent to an X-point (corner guard cells). So for - // now assume an orthogonal grid where g12=0 + // 2*A*g12*d2dfdxdy + val = A(x, y)*coords->g12(x, y)/(2.*dx*dy); + xpyp = val; + xpym = -val; + xmyp = -val; + xmym = val; // g22*dAdy*dfdy val = (coords->g22(x, y) - 1./coords->g_22(x,y))*dAdy/(2.*dy); @@ -481,11 +686,16 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { ym -= val; // g12*(dAdx*dfdy + dAdy*dfdx) - // Assume orthogonal grid with g12=0 again + val = coords->g12(x, y)*dAdx/(2.*dy); + yp += val; + ym -= val; + val = coords->g12(x, y)*dAdy/(2.*dx); + xp += val; + xm -= val; } ///////////////////////////////////////////////// - // Now have a 5-point stencil for the Laplacian + // Now have a 9-point stencil for the Laplacian int row = globalIndex(x,y); @@ -508,6 +718,22 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { // Y - 1 col = globalIndex(x, y-1); MatSetValues(MatA,1,&row,1,&col,&ym,INSERT_VALUES); + + // X + 1, Y + 1 + col = globalIndex(x+1, y+1); + MatSetValues(MatA,1,&row,1,&col,&xpyp,INSERT_VALUES); + + // X + 1, Y - 1 + col = globalIndex(x+1, y-1); + MatSetValues(MatA,1,&row,1,&col,&xpym,INSERT_VALUES); + + // X - 1, Y + 1 + col = globalIndex(x-1, y+1); + MatSetValues(MatA,1,&row,1,&col,&xmyp,INSERT_VALUES); + + // X - 1, Y - 1 + col = globalIndex(x-1, y-1); + MatSetValues(MatA,1,&row,1,&col,&xmym,INSERT_VALUES); } } } @@ -608,6 +834,51 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } } + + if (not finite_volume) { + // Handle corner boundary cells in case we need to include D2DXDY + // Apply the y-boundary-condition to the cells in the x-boundary - this is an + // arbitrary choice, cf. connections around the X-point + + if (localmesh->hasBndryLowerY()) { + if (localmesh->firstX()) { + if (y_bndry_dirichlet) { + // Both Dirichlet + throw BoutException("Dirichlet y-boundary-condition not supported for mixed " + "second derivatives."); + } else { + // Neumann y-bc + // f(xs-1,ys-1) = f(xs-1,ys) + PetscScalar val = 1.0; + int row = globalIndex(localmesh->xstart-1, localmesh->ystart-1); + MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); + + val = -1.0; + int col = globalIndex(localmesh->xstart-1, localmesh->ystart); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } + } + } + if (localmesh->hasBndryUpperY()) { + if (localmesh->firstX()) { + if (y_bndry_dirichlet) { + // Both Dirichlet + throw BoutException("Dirichlet y-boundary-condition not supported for mixed " + "second derivatives."); + } else { + // Neumann y-bc + // f(xs-1,ys-1) = f(xs-1,ys) + PetscScalar val = 1.0; + int row = globalIndex(localmesh->xstart-1, localmesh->yend+1); + MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); + + val = -1.0; + int col = globalIndex(localmesh->xstart-1, localmesh->yend); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } + } + } + } // Assemble Matrix MatAssemblyBegin( MatA, MAT_FINAL_ASSEMBLY ); @@ -816,6 +1087,11 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { val = x0(it.ind, localmesh->yend+1); VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); } + + if ((localmesh->hasBndryLowerY() or localmesh->hasBndryUpperY()) + and (localmesh->firstX() or localmesh->lastX())) { + throw BoutException("Dirichlet not implemented for mixed-derivatives."); + } } else { // Y boundaries Neumann for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { @@ -829,6 +1105,26 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { val = 0.0; VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); } + if (localmesh->hasBndryLowerY()) { + if (localmesh->firstX()) { + int ind = globalIndex(localmesh->xstart-1, localmesh->ystart-1); + + PetscScalar val = x0(localmesh->xstart-1, localmesh->ystart); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + if (localmesh->lastX()) { + int ind = globalIndex(localmesh->xend+1, localmesh->ystart-1); + + PetscScalar val = x0(localmesh->xend+1, localmesh->ystart); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + } for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { int ind = globalIndex(it.ind, localmesh->yend+1); @@ -841,6 +1137,26 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { val = 0.0; VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); } + if (localmesh->hasBndryUpperY()) { + if (localmesh->firstX()) { + int ind = globalIndex(localmesh->xstart-1, localmesh->yend+1); + + PetscScalar val = x0(localmesh->xstart-1, localmesh->yend); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + if (localmesh->lastX()) { + int ind = globalIndex(localmesh->xend+1, localmesh->yend+1); + + PetscScalar val = x0(localmesh->xend+1, localmesh->yend); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + } } } @@ -1009,9 +1325,25 @@ int LaplaceXY::localSize() { for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { n++; } + if ((not finite_volume) and localmesh->hasBndryLowerY()) { + if (localmesh->firstX()) { + n++; + } + if (localmesh->lastX()) { + n++; + } + } for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { n++; } + if ((not finite_volume) and localmesh->hasBndryUpperY()) { + if (localmesh->firstX()) { + n++; + } + if (localmesh->lastX()) { + n++; + } + } return n; } From bb601a94446e1cbd89b9ec00d74ba06e9384d5be Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 21 Aug 2019 23:47:03 +0100 Subject: [PATCH 003/428] Field2D/Vector2D version of Grad_perp --- include/vecops.hxx | 2 ++ src/field/vecops.cxx | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/include/vecops.hxx b/include/vecops.hxx index c8a815181d..2f06a51fca 100644 --- a/include/vecops.hxx +++ b/include/vecops.hxx @@ -66,6 +66,8 @@ const Vector3D Grad_perp(const Field3D& f, CELL_LOC outloc = CELL_DEFAULT, inline const Vector3D Grad_perp(const Field3D& f, CELL_LOC outloc, DIFF_METHOD method) { return Grad_perp(f, outloc, toString(method)); } +const Vector2D Grad_perp(const Field2D& f, CELL_LOC outloc = CELL_DEFAULT, + const std::string& method = "DEFAULT"); /// Divergence of a vector \p v, returning a scalar /// diff --git a/src/field/vecops.cxx b/src/field/vecops.cxx index 991eacabb5..ec71d09293 100644 --- a/src/field/vecops.cxx +++ b/src/field/vecops.cxx @@ -118,6 +118,27 @@ const Vector3D Grad_perp(const Field3D &f, CELL_LOC outloc, const std::string& m return result; } +const Vector2D Grad_perp(const Field2D &f, CELL_LOC outloc, const std::string& method) { + AUTO_TRACE(); + SCOREP0(); + ASSERT1(outloc == CELL_DEFAULT || outloc == f.getLocation()); + + Coordinates *metric = f.getCoordinates(outloc); + + Vector2D result(f.getMesh()); + + result.x = DDX(f, outloc, method) + - metric->g_12 * DDY(f, outloc, method) / SQ(metric->J * metric->Bxy); + result.y = 0.0; + result.z = - metric->g_23 * DDY(f, outloc, method) / SQ(metric->J * metric->Bxy); + + result.setLocation(result.x.getLocation()); + + result.covariant = true; + + return result; +} + /************************************************************************** * Divergence operators **************************************************************************/ From 70fe131a53589760ffef4ff171c44f18c2853031 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 21 Aug 2019 23:47:46 +0100 Subject: [PATCH 004/428] Integrated test for LaplaceXY --- tests/integrated/test-laplacexy/data/BOUT.inp | 53 ++++++++++++ tests/integrated/test-laplacexy/makefile | 5 ++ tests/integrated/test-laplacexy/runtest | 76 +++++++++++++++++ .../test-laplacexy/test-laplacexy.cxx | 82 +++++++++++++++++++ 4 files changed, 216 insertions(+) create mode 100644 tests/integrated/test-laplacexy/data/BOUT.inp create mode 100644 tests/integrated/test-laplacexy/makefile create mode 100755 tests/integrated/test-laplacexy/runtest create mode 100644 tests/integrated/test-laplacexy/test-laplacexy.cxx diff --git a/tests/integrated/test-laplacexy/data/BOUT.inp b/tests/integrated/test-laplacexy/data/BOUT.inp new file mode 100644 index 0000000000..2541197765 --- /dev/null +++ b/tests/integrated/test-laplacexy/data/BOUT.inp @@ -0,0 +1,53 @@ +mz = 1 + +[mesh] +nx = 132 +ny = 128 + +dx = 1./nx +dy = 40./ny + +g11 = 1. +g22 = 1. +g33 = 1. +g12 = 0.3 +g13 = 0. +g23 = 0.5 + +jyseps1_1 = 15 +jyseps2_1 = 47 +ny_inner = 64 +jyseps1_2 = 79 +jyseps2_2 = 111 + +ixseps1 = 64 +ixseps2 = 64 + +[laplacexy] +pctype = shell # Supply a second solver as a preconditioner +#pctype = hypre # Algebraic multigrid preconditioner using hypre library +finite_volume = false +rtol = 1.e-14 + +core_bndry_dirichlet = true +pf_bndry_dirichlet = true +y_bndry_dirichlet = false + +[f] +# make an input: +# - compatible with both Dirichlet and Neumann boundary conditions in either x- +# or y-directions +# - y-boundaries at -pi/2, 3pi/2, pi/2 and 5pi/2 +# - periodic in y 0->2pi +function = sin(2.*pi*x)^2 * sin(y - pi/2.)^2 + +bndry_xin = dirichlet +bndry_xout = dirichlet +bndry_yup = neumann +bndry_ydown = neumann + +[a] +function = 1. + .1*sin(x + .1)*sin(y/pi + .1) + +[b] +function = 0. diff --git a/tests/integrated/test-laplacexy/makefile b/tests/integrated/test-laplacexy/makefile new file mode 100644 index 0000000000..e7789b2203 --- /dev/null +++ b/tests/integrated/test-laplacexy/makefile @@ -0,0 +1,5 @@ +BOUT_TOP = ../../.. + +SOURCEC = test-laplacexy.cxx + +include $(BOUT_TOP)/make.config diff --git a/tests/integrated/test-laplacexy/runtest b/tests/integrated/test-laplacexy/runtest new file mode 100755 index 0000000000..791d9ce1f0 --- /dev/null +++ b/tests/integrated/test-laplacexy/runtest @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 + +# +# Run the test, compare results against the benchmark +# + +#requires: petsc + +from boututils.run_wrapper import shell, shell_safe, launch_safe +from boutdata.collect import collect +from sys import exit + +tol_orth = 5.e-8 + +# Note accuracy of test is limited when g12!=0 by inconsistency between the way boundary +# conditions are applied in LaplaceXY and the way they are applied in the D2DXDY() +# operator called by Laplace_perp(). In D2DXDY(f) 'free_o3' boundary conditions are +# applied to dfdy before calculating DDX(dfdy). +tol_nonorth = 1.e-5 + +argslist = [#'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry_dirichlet=true', + 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry_dirichlet=false ' + 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', + #'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=true ' + #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', + #'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=false ' + #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', + #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry_dirichlet=true ' + #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', + #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry_dirichlet=false ' + #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', + #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=true ' + #'f:bndry_xin=neumann f:bndry_xout=neumann f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', + 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=false ' + 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', + ] + +print('Making LaplaceXY inversion test') +shell_safe('make > make.log') + +print('Running LaplaceXY inversion test') +success = True + +for nproc in [8]: + print(' %d processors....' % nproc) + for nonorth, tol in [(False, tol_orth), (True, tol_nonorth)]: + for args in argslist: + if not nonorth: + args += ' mesh:g12=0.' + + cmd = './test-laplacexy ' + args + ' -ksp_gmres_restart 500' + + shell('rm data/BOUT.dmp.*.nc > /dev/null 2>&1') + + s, out = launch_safe(cmd, nproc=nproc, pipe=True,verbose=True) + f = open('run.log.'+str(nproc), 'w') + f.write(out) + f.close() + + # Collect output data + error = collect('max_error', path='data', info=False) + if error <= 0: + print('Convergence error') + success = False + elif error > tol: + print('Fail, maximum error is = '+str(error)) + success = False + else: + print('Pass') + +if success: + print(' => All LaplaceXY inversion tests passed') + exit(0) +else: + print(' => Some failed tests') + exit(1) diff --git a/tests/integrated/test-laplacexy/test-laplacexy.cxx b/tests/integrated/test-laplacexy/test-laplacexy.cxx new file mode 100644 index 0000000000..c3c52451db --- /dev/null +++ b/tests/integrated/test-laplacexy/test-laplacexy.cxx @@ -0,0 +1,82 @@ +/************************************************************************** + * Testing Perpendicular Laplacian inversion using PETSc solvers + * + ************************************************************************** + * Copyright 2019 J.T. Omotani, B.D. Dudson + * + * Contact: Ben Dudson, bd512@york.ac.uk + * + * This file is part of BOUT++. + * + * BOUT++ is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * BOUT++ is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with BOUT++. If not, see . + * + **************************************************************************/ + +#include +#include +#include +#include +#include + +int main(int argc, char** argv) { + + BoutInitialise(argc, argv); + + LaplaceXY laplacexy; + + // Solving equations of the form + // Div(A Grad_perp(f)) + B*f = rhs + // A*Laplace_perp(f) + Grad_perp(A).Grad_perp(f) + B*f = rhs + Field2D f, a, b, sol; + Field2D error, absolute_error; //Absolute value of relative error: abs((f - sol)/f) + BoutReal max_error; //Output of test + + initial_profile("f", f); + initial_profile("a", a); + initial_profile("b", b); + + // Apply boundary conditions to f, so the boundary cells match the way boundary + // conditions will be applied to sol + f.setBoundary("f"); + f.applyBoundary(); + + //////////////////////////////////////////////////////////////////////////////////////// + + Field2D rhs = a*Laplace_perp(f) + Grad_perp(a)*Grad_perp(f) + b*f; + + laplacexy.setCoefs(a, b); + + sol = laplacexy.solve(rhs, 0.); + error = (f - sol)/f; + absolute_error = f - sol; + max_error = max(abs(absolute_error), true); + + output<<"Magnitude of maximum absolute error is "< Date: Thu, 22 Aug 2019 17:46:46 +0100 Subject: [PATCH 005/428] LaplaceXY tests with B coefficient non-zero --- tests/integrated/test-laplacexy/runtest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integrated/test-laplacexy/runtest b/tests/integrated/test-laplacexy/runtest index 791d9ce1f0..a22c4b8091 100755 --- a/tests/integrated/test-laplacexy/runtest +++ b/tests/integrated/test-laplacexy/runtest @@ -33,6 +33,10 @@ argslist = [#'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=t #'f:bndry_xin=neumann f:bndry_xout=neumann f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=false ' 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', + 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry_dirichlet=false ' + 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann b:function=.1', + 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=false ' + 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann b:function=.1', ] print('Making LaplaceXY inversion test') From 2645eaa859f90349f520b772086b91112efa2d5f Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 22 Aug 2019 17:48:05 +0100 Subject: [PATCH 006/428] LaplaceXY test is slow, so require all_tests Especially when using 'pctype = shell' rather than 'pctype = hypre', test-laplacexy is very slow. --- tests/integrated/test-laplacexy/runtest | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integrated/test-laplacexy/runtest b/tests/integrated/test-laplacexy/runtest index a22c4b8091..c06788efc2 100755 --- a/tests/integrated/test-laplacexy/runtest +++ b/tests/integrated/test-laplacexy/runtest @@ -5,6 +5,7 @@ # #requires: petsc +#requires: all_tests from boututils.run_wrapper import shell, shell_safe, launch_safe from boutdata.collect import collect From 340b8b5669f18aa8cd3d7b761f1f485fb5a06e03 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 22 Aug 2019 18:55:17 +0100 Subject: [PATCH 007/428] Note in manual on finite_volume=false option for LaplaceXY --- manual/sphinx/user_docs/laplacian.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/manual/sphinx/user_docs/laplacian.rst b/manual/sphinx/user_docs/laplacian.rst index c41a958135..58c028bc3e 100644 --- a/manual/sphinx/user_docs/laplacian.rst +++ b/manual/sphinx/user_docs/laplacian.rst @@ -889,6 +889,17 @@ Notes: - The ShiftXderivs option must be true for this to work, since it assumes that :math:`g^{xz} = 0` +- Setting the option ``pctype = hypre`` seems to work well, if PETSc has been + compiled with the algebraic multigrid library hypre; this can be included by + passing the option ``--download-hypre`` to PETSc's ``configure`` script. + +An alternative discretization is available if the option ``finite_volume = +false`` is set. Then a finite-difference discretization very close to the one used when +calling ``A*Laplace_perp(f) + Grad_perp(A)*Grad_perp(f) + B*f`` is used. This also +supports non-orthogonal grids with :math:`g^{xy} \neq 0`. The difference is that when +:math:`g^{xy} \neq 0`, ``Laplace_perp`` calls ``D2DXDY(f)`` which applies a boundary +condition to ``dfdy = DDY(f)`` before calculating ``DDX(dfdy)`` with a slightly different +result than the way boundary conditions are applied in ``LaplaceXY``. .. _sec-LaplaceXZ: From 074cdec7a3dc3cbafc92f8981cf43fc7136c87bd Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 25 Aug 2019 18:43:54 +0200 Subject: [PATCH 008/428] LaplaceXY: fix pre-allocation at upper y-boundary --- src/invert/laplacexy/laplacexy.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index eb5fbd6a9c..ee062e9bed 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -324,8 +324,8 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) { const int localIndex = globalIndex(it.ind, localmesh->yend); ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] += 1; - o_nnz[localIndex] -= 1; + //d_nnz[localIndex] += 3; + o_nnz[localIndex] -= 3; } } if (localmesh->hasBndryUpperY()) { From e64fc14a75f387b62e6283a1a93bc838bca00c42 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 25 Aug 2019 18:55:30 +0200 Subject: [PATCH 009/428] Add outer-X corner boundary conditions --- src/invert/laplacexy/laplacexy.cxx | 34 ++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index ee062e9bed..5493f7ab4a 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -858,6 +858,23 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } } + if (localmesh->lastX()) { + if (y_bndry_dirichlet) { + // Both Dirichlet + throw BoutException("Dirichlet y-boundary-condition not supported for mixed " + "second derivatives."); + } else { + // Neumann y-bc + // f(xe+1,ys-1) = f(xe+1,ys) + PetscScalar val = 1.0; + int row = globalIndex(localmesh->xend+1, localmesh->ystart-1); + MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); + + val = -1.0; + int col = globalIndex(localmesh->xend+1, localmesh->ystart); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } + } } if (localmesh->hasBndryUpperY()) { if (localmesh->firstX()) { @@ -877,6 +894,23 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } } + if (localmesh->lastX()) { + if (y_bndry_dirichlet) { + // Both Dirichlet + throw BoutException("Dirichlet y-boundary-condition not supported for mixed " + "second derivatives."); + } else { + // Neumann y-bc + // f(xe+1,ys-1) = f(xe+1,ys) + PetscScalar val = 1.0; + int row = globalIndex(localmesh->xend+1, localmesh->yend+1); + MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); + + val = -1.0; + int col = globalIndex(localmesh->xend+1, localmesh->yend); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } + } } } From b97c0e23805de395edf30b530e4b5e17180d8b44 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 25 Aug 2019 18:51:41 +0200 Subject: [PATCH 010/428] Option for 'free_o3' y-boundary condition in LaplaceXY --- include/bout/invert/laplacexy.hxx | 2 +- src/invert/laplacexy/laplacexy.cxx | 217 ++++++++++++++++++++++++----- 2 files changed, 184 insertions(+), 35 deletions(-) diff --git a/include/bout/invert/laplacexy.hxx b/include/bout/invert/laplacexy.hxx index 96f3cfbd6d..82839992a4 100644 --- a/include/bout/invert/laplacexy.hxx +++ b/include/bout/invert/laplacexy.hxx @@ -133,7 +133,7 @@ private: // Boundary conditions bool x_inner_dirichlet; // Dirichlet on inner X boundary? bool x_outer_dirichlet; // Dirichlet on outer X boundary? - bool y_bndry_dirichlet; // Dirichlet on Y boundary? + std::string y_bndry{"neumann"}; // Bonudary condition for y-boundary // Location of the rhs and solution CELL_LOC location; diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index 5493f7ab4a..c58e454106 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -42,6 +42,37 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) "Use finite volume rather than finite difference discritisation." ).withDefault(true); + /////////////////////////////////////////////////// + // Boundary condititions options + if (localmesh->periodicY(localmesh->xstart)) { + // Periodic in Y, so in the core + opt->get("core_bndry_dirichlet", x_inner_dirichlet, false); + } else { + // Non-periodic, so in the PF region + opt->get("pf_bndry_dirichlet", x_inner_dirichlet, true); + } + if ((*opt)["y_bndry_dirichlet"].isSet()) { + bool y_bndry_dirichlet; + opt->get("y_bndry_dirichlet", y_bndry_dirichlet, false); + if (y_bndry_dirichlet) { + y_bndry = "dirichlet"; + } else { + y_bndry = "neumann"; + } + } else { + opt->get("y_bndry", y_bndry, "neumann"); + } + + // Check value of y_bndry is a supported option + if (not( + y_bndry == "dirichlet" + or y_bndry == "neumann" + or y_bndry == "free_o3")) { + + throw BoutException("Unrecognized option '%s' for laplacexy:ybndry", + y_bndry.c_str()); + } + // Get MPI communicator MPI_Comm comm = BoutComm::get(); @@ -268,6 +299,10 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) } } // Y boundaries + int y_bndry_stencil_size = 2; + if (y_bndry == "free_o3") { + y_bndry_stencil_size = 4; + } for(int x=localmesh->xstart; x <=localmesh->xend; x++) { // Default to no boundary @@ -292,7 +327,7 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) { const int localIndex = globalIndex(it.ind, localmesh->ystart - 1); ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = 2; // Diagonal sub-matrix + d_nnz[localIndex] = y_bndry_stencil_size; // Diagonal sub-matrix o_nnz[localIndex] = 0; // Off-diagonal sub-matrix } { @@ -306,19 +341,19 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) if (localmesh->firstX()) { const int localIndex = globalIndex(localmesh->xstart-1, localmesh->ystart-1); ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = 2; + d_nnz[localIndex] = y_bndry_stencil_size; } if (localmesh->lastX()) { const int localIndex = globalIndex(localmesh->xend+1, localmesh->ystart-1); ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = 2; + d_nnz[localIndex] = y_bndry_stencil_size; } } for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { { const int localIndex = globalIndex(it.ind, localmesh->yend + 1); ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = 2; // Diagonal sub-matrix + d_nnz[localIndex] = y_bndry_stencil_size; // Diagonal sub-matrix o_nnz[localIndex] = 0; // Off-diagonal sub-matrix } { @@ -332,12 +367,12 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) if (localmesh->firstX()) { const int localIndex = globalIndex(localmesh->xstart-1, localmesh->yend+1); ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = 2; + d_nnz[localIndex] = y_bndry_stencil_size; } if (localmesh->lastX()) { const int localIndex = globalIndex(localmesh->xend+1, localmesh->yend+1); ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = 2; + d_nnz[localIndex] = y_bndry_stencil_size; } } } @@ -482,17 +517,6 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) KSPSetFromOptions( ksp ); - /////////////////////////////////////////////////// - // Decide boundary condititions - if (localmesh->periodicY(localmesh->xstart)) { - // Periodic in Y, so in the core - opt->get("core_bndry_dirichlet", x_inner_dirichlet, false); - } else { - // Non-periodic, so in the PF region - opt->get("pf_bndry_dirichlet", x_inner_dirichlet, true); - } - opt->get("y_bndry_dirichlet", y_bndry_dirichlet, false); - /////////////////////////////////////////////////// // Including Y derivatives? @@ -792,7 +816,7 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { } } - if(y_bndry_dirichlet) { + if (y_bndry == "dirichlet") { // Dirichlet on Y boundaries for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { int row = globalIndex(it.ind, localmesh->ystart-1); @@ -811,16 +835,16 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { int col = globalIndex(it.ind, localmesh->yend); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } - }else { + } else if (y_bndry == "neumann") { // Neumann on Y boundaries for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { int row = globalIndex(it.ind, localmesh->ystart-1); PetscScalar val = 1.0; MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); - + val = -1.0; int col = globalIndex(it.ind, localmesh->ystart); - + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } @@ -828,11 +852,50 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { int row = globalIndex(it.ind, localmesh->yend+1); PetscScalar val = 1.0; MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); - + val = -1.0; int col = globalIndex(it.ind, localmesh->yend); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } + } else if (y_bndry == "free_o3") { + // 'free_o3' extrapolating boundary condition on Y boundaries + for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { + int row = globalIndex(it.ind, localmesh->ystart-1); + PetscScalar val = -1.0; + MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); + + val = 3.0; + int col = globalIndex(it.ind, localmesh->ystart); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = -3.0; + col = globalIndex(it.ind, localmesh->ystart+1); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = 1.0; + col = globalIndex(it.ind, localmesh->ystart+2); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } + + for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { + int row = globalIndex(it.ind, localmesh->yend+1); + PetscScalar val = -1.0; + MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); + + val = 3.0; + int col = globalIndex(it.ind, localmesh->yend); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = -3.0; + col = globalIndex(it.ind, localmesh->yend-1); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = 1.0; + col = globalIndex(it.ind, localmesh->yend-2); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } + } else { + throw BoutException("Unsupported option for y_bndry"); } if (not finite_volume) { @@ -842,11 +905,11 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { if (localmesh->hasBndryLowerY()) { if (localmesh->firstX()) { - if (y_bndry_dirichlet) { + if (y_bndry == "dirichlet") { // Both Dirichlet throw BoutException("Dirichlet y-boundary-condition not supported for mixed " "second derivatives."); - } else { + } else if (y_bndry == "neumann") { // Neumann y-bc // f(xs-1,ys-1) = f(xs-1,ys) PetscScalar val = 1.0; @@ -856,14 +919,34 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { val = -1.0; int col = globalIndex(localmesh->xstart-1, localmesh->ystart); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } else if (y_bndry == "free_o3") { + // 'free_o3' extrapolating boundary condition on Y boundaries + // f(xs-1,ys-1) = 3*f(xs-1,ys) - 3*f(xs-1,ys+1) + f(xs-1,ys+2) + PetscScalar val = -1.0; + int row = globalIndex(localmesh->xstart-1, localmesh->ystart-1); + MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); + + val = 3.0; + int col = globalIndex(localmesh->xstart-1, localmesh->ystart); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = -3.0; + col = globalIndex(localmesh->xstart-1, localmesh->ystart+1); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = 1.0; + col = globalIndex(localmesh->xstart-1, localmesh->ystart+2); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } else { + throw BoutException("Unsupported option for y_bndry"); } } if (localmesh->lastX()) { - if (y_bndry_dirichlet) { + if (y_bndry == "dirichlet") { // Both Dirichlet throw BoutException("Dirichlet y-boundary-condition not supported for mixed " "second derivatives."); - } else { + } else if (y_bndry == "neumann") { // Neumann y-bc // f(xe+1,ys-1) = f(xe+1,ys) PetscScalar val = 1.0; @@ -873,16 +956,37 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { val = -1.0; int col = globalIndex(localmesh->xend+1, localmesh->ystart); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } else if (y_bndry == "free_o3") { + // 'free_o3' extrapolating boundary condition on Y boundaries + // f(xe+1,ys-1) = 3*f(xe+1,ys) - 3*f(xe+1,ys+1) + f(xe+1,ys+2) + PetscScalar val = -1.0; + int row = globalIndex(localmesh->xend+1, localmesh->ystart-1); + MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); + + val = 3.0; + int col = globalIndex(localmesh->xend+1, localmesh->ystart); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = -3.0; + col = globalIndex(localmesh->xend+1, localmesh->ystart+1); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = 1.0; + col = globalIndex(localmesh->xend+1, localmesh->ystart+2); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } else { + throw BoutException("Unsupported option for y_bndry"); } + } } if (localmesh->hasBndryUpperY()) { if (localmesh->firstX()) { - if (y_bndry_dirichlet) { + if (y_bndry == "dirichlet") { // Both Dirichlet throw BoutException("Dirichlet y-boundary-condition not supported for mixed " "second derivatives."); - } else { + } else if (y_bndry == "neumann") { // Neumann y-bc // f(xs-1,ys-1) = f(xs-1,ys) PetscScalar val = 1.0; @@ -892,14 +996,34 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { val = -1.0; int col = globalIndex(localmesh->xstart-1, localmesh->yend); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } else if (y_bndry == "free_o3") { + // 'free_o3' extrapolating boundary condition on Y boundaries + // f(xs-1,ys-1) = 3*f(xs-1,ys) - 3*f(xs-1,ys+1) + f(xs-1,ys+2) + PetscScalar val = -1.0; + int row = globalIndex(localmesh->xstart-1, localmesh->yend+1); + MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); + + val = 3.0; + int col = globalIndex(localmesh->xstart-1, localmesh->yend); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = -3.0; + col = globalIndex(localmesh->xstart-1, localmesh->yend-1); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = 3.0; + col = globalIndex(localmesh->xstart-1, localmesh->yend-2); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } else { + throw BoutException("Unsupported option for y_bndry"); } } if (localmesh->lastX()) { - if (y_bndry_dirichlet) { + if (y_bndry == "dirichlet") { // Both Dirichlet throw BoutException("Dirichlet y-boundary-condition not supported for mixed " "second derivatives."); - } else { + } else if (y_bndry == "neumann") { // Neumann y-bc // f(xe+1,ys-1) = f(xe+1,ys) PetscScalar val = 1.0; @@ -909,7 +1033,28 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { val = -1.0; int col = globalIndex(localmesh->xend+1, localmesh->yend); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } else if (y_bndry == "free_o3") { + // 'free_o3' extrapolating boundary condition on Y boundaries + // f(xe+1,ys-1) = 3*f(xe+1,ys) - 3*f(xe+1,ys+1) + f(xe+1,ys+2) + PetscScalar val = -1.0; + int row = globalIndex(localmesh->xend+1, localmesh->yend+1); + MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); + + val = 3.0; + int col = globalIndex(localmesh->xend+1, localmesh->yend); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = -3.0; + col = globalIndex(localmesh->xend+1, localmesh->yend-1); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + + val = 1.0; + col = globalIndex(localmesh->xend+1, localmesh->yend-2); + MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); + } else { + throw BoutException("Unsupported option for y_bndry"); } + } } } @@ -1006,7 +1151,7 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { } } - if(y_bndry_dirichlet) { + if(y_bndry == "dirichlet") { for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { int ind = globalIndex(it.ind, localmesh->ystart-1); @@ -1026,7 +1171,7 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { val = 0.5*(x0(it.ind, localmesh->yend+1) + x0(it.ind, localmesh->yend)); VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); } - } else { + } else if (y_bndry == "neumann" or y_bndry == "free_o3") { // Y boundaries Neumann for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { int ind = globalIndex(it.ind, localmesh->ystart-1); @@ -1047,6 +1192,8 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { val = 0.0; VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); } + } else { + throw BoutException("Unsupported option for y_bndry"); } } else { // For finite-difference implementation pass boundary values in the same way as for @@ -1097,7 +1244,7 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { } } - if(y_bndry_dirichlet) { + if(y_bndry == "dirichlet") { for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { int ind = globalIndex(it.ind, localmesh->ystart-1); @@ -1126,7 +1273,7 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { and (localmesh->firstX() or localmesh->lastX())) { throw BoutException("Dirichlet not implemented for mixed-derivatives."); } - } else { + } else if (y_bndry == "neumann" or y_bndry == "free_o3") { // Y boundaries Neumann for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { int ind = globalIndex(it.ind, localmesh->ystart-1); @@ -1191,6 +1338,8 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); } } + } else { + throw BoutException("Unsupported option for y_bndry"); } } From 234ac721ec9298a78b5b581157a85f49335ba624 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 25 Aug 2019 23:03:11 +0200 Subject: [PATCH 011/428] Adding y_bndry=free_o3 to test-laplacexy --- tests/integrated/test-laplacexy/data/BOUT.inp | 2 +- tests/integrated/test-laplacexy/runtest | 48 +++++++++++++------ 2 files changed, 34 insertions(+), 16 deletions(-) diff --git a/tests/integrated/test-laplacexy/data/BOUT.inp b/tests/integrated/test-laplacexy/data/BOUT.inp index 2541197765..b9cd89b78c 100644 --- a/tests/integrated/test-laplacexy/data/BOUT.inp +++ b/tests/integrated/test-laplacexy/data/BOUT.inp @@ -31,7 +31,7 @@ rtol = 1.e-14 core_bndry_dirichlet = true pf_bndry_dirichlet = true -y_bndry_dirichlet = false +y_bndry = neumann [f] # make an input: diff --git a/tests/integrated/test-laplacexy/runtest b/tests/integrated/test-laplacexy/runtest index c06788efc2..f86a74bd74 100755 --- a/tests/integrated/test-laplacexy/runtest +++ b/tests/integrated/test-laplacexy/runtest @@ -7,7 +7,7 @@ #requires: petsc #requires: all_tests -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, shell_safe, launch, launch_safe from boutdata.collect import collect from sys import exit @@ -17,27 +17,35 @@ tol_orth = 5.e-8 # conditions are applied in LaplaceXY and the way they are applied in the D2DXDY() # operator called by Laplace_perp(). In D2DXDY(f) 'free_o3' boundary conditions are # applied to dfdy before calculating DDX(dfdy). -tol_nonorth = 1.e-5 +tol_nonorth = 2.e-5 -argslist = [#'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry_dirichlet=true', - 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry_dirichlet=false ' +argslist = [#'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=dirichlet', + 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=neumann ' 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', - #'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=true ' + 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=free_o3 ' + 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=free_o3 f:bndry_ydown=free_o3', + #'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=dirichlet ' #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', - #'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=false ' + #'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=neumann ' #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', - #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry_dirichlet=true ' + #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=dirichlet ' #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', - #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry_dirichlet=false ' + #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=neumann ' #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', - #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=true ' + #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=dirichlet ' #'f:bndry_xin=neumann f:bndry_xout=neumann f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', - 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=false ' - 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', - 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry_dirichlet=false ' + 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=neumann ' + 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann laplacexy:pctype=hypre', + 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=free_o3 ' + 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=free_o3 f:bndry_ydown=free_o3 laplacexy:pctype=hypre', + 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=neumann ' 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann b:function=.1', - 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry_dirichlet=false ' - 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann b:function=.1', + 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=free_o3 ' + 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=free_o3 f:bndry_ydown=free_o3 b:function=.1', + 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=neumann ' + 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann b:function=.1 laplacexy:pctype=hypre', + 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=free_o3 ' + 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=free_o3 f:bndry_ydown=free_o3 b:function=.1 laplacexy:pctype=hypre', ] print('Making LaplaceXY inversion test') @@ -57,7 +65,17 @@ for nproc in [8]: shell('rm data/BOUT.dmp.*.nc > /dev/null 2>&1') - s, out = launch_safe(cmd, nproc=nproc, pipe=True,verbose=True) + if 'hypre' in args: + s, out = launch(cmd, nproc=nproc, pipe=True,verbose=True) + if s == 134: + # PETSc did not recognise pctype option, probably means it + # was not compiled with hypre, so skip tests that need + # hypre to converge + print('hypre not available as pre-conditioner in PETSc. Skipping...') + continue + else: + s, out = launch_safe(cmd, nproc=nproc, pipe=True,verbose=True) + f = open('run.log.'+str(nproc), 'w') f.write(out) f.close() From 3639f590d17f1fae8739eb0b488f2f10e2461242 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 28 Aug 2019 17:08:00 +0200 Subject: [PATCH 012/428] LaplaceXY: fix free_o3 y-boundary condition at inner, upper corner --- src/invert/laplacexy/laplacexy.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index c58e454106..c857cdc000 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -1011,7 +1011,7 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { col = globalIndex(localmesh->xstart-1, localmesh->yend-1); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = 3.0; + val = 1.0; col = globalIndex(localmesh->xstart-1, localmesh->yend-2); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } else { From 891f3b8d6e7337f3e353c402978dcb2946cb41e1 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 29 Aug 2019 13:30:58 +0100 Subject: [PATCH 013/428] LaplaceXY: non-uniform grid spacing correction when finite_volume=false --- src/invert/laplacexy/laplacexy.cxx | 13 +++++++++++-- tests/integrated/test-laplacexy/data/BOUT.inp | 4 ++-- tests/integrated/test-laplacexy/test-laplacexy.cxx | 14 +++++++++++++- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index c857cdc000..cd9a16a607 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -658,15 +658,19 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { BoutReal dx = coords->dx(x,y); // A*G1*dfdx - BoutReal val = A(x,y)*coords->G1(x,y)/(2.*dx); + BoutReal val = A(x, y)*coords->G1(x, y)/(2.*dx); xp = val; xm = -val; // A*g11*d2fdx2 - val = A(x,y)*coords->g11(x,y)/SQ(dx); + val = A(x, y)*coords->g11(x, y)/SQ(dx); xp += val; c = -2.*val; xm += val; + // Non-uniform grid correction + val = A(x, y)*coords->g11(x, y)*coords->d1_dx(x, y)/(2.*dx); + xp += val; + xm -= val; // g11*dAdx*dfdx val = coords->g11(x, y)*(A(x+1, y) - A(x-1, y))/(4.*SQ(dx)); @@ -696,6 +700,11 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { yp += val; c -= 2.*val; ym += val; + // Non-uniform mesh correction + val = A(x, y)*(coords->g22(x, y) - 1./coords->g_22(x,y)) + *coords->d1_dy(x, y)/(2.*dy); + yp += val; + ym -= val; // 2*A*g12*d2dfdxdy val = A(x, y)*coords->g12(x, y)/(2.*dx*dy); diff --git a/tests/integrated/test-laplacexy/data/BOUT.inp b/tests/integrated/test-laplacexy/data/BOUT.inp index b9cd89b78c..1065fe3cdf 100644 --- a/tests/integrated/test-laplacexy/data/BOUT.inp +++ b/tests/integrated/test-laplacexy/data/BOUT.inp @@ -4,8 +4,8 @@ mz = 1 nx = 132 ny = 128 -dx = 1./nx -dy = 40./ny +dx = (1.+.1*cos(pi*x))/nx +dy = 40.*(1.+.1*sin(y))/ny g11 = 1. g22 = 1. diff --git a/tests/integrated/test-laplacexy/test-laplacexy.cxx b/tests/integrated/test-laplacexy/test-laplacexy.cxx index c3c52451db..a2a1a1424c 100644 --- a/tests/integrated/test-laplacexy/test-laplacexy.cxx +++ b/tests/integrated/test-laplacexy/test-laplacexy.cxx @@ -26,15 +26,22 @@ #include #include #include +#include #include #include int main(int argc, char** argv) { BoutInitialise(argc, argv); + + auto coords = mesh->getCoordinates(); + + auto& opt = Options::root(); LaplaceXY laplacexy; + bool include_y_derivs = opt["laplacexy"]["include_y_derivs"]; + // Solving equations of the form // Div(A Grad_perp(f)) + B*f = rhs // A*Laplace_perp(f) + Grad_perp(A).Grad_perp(f) + B*f = rhs @@ -53,7 +60,12 @@ int main(int argc, char** argv) { //////////////////////////////////////////////////////////////////////////////////////// - Field2D rhs = a*Laplace_perp(f) + Grad_perp(a)*Grad_perp(f) + b*f; + Field2D rhs; + if (include_y_derivs) { + rhs = a*Laplace_perp(f) + Grad_perp(a)*Grad_perp(f) + b*f; + } else { + rhs = a*Delp2(f, CELL_DEFAULT, false) + coords->g11*DDX(a)*DDX(f) + b*f; + } laplacexy.setCoefs(a, b); From b05646087f6e6cca30bd35c42c37a0f1a510acbf Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 14 Sep 2019 22:04:27 +0100 Subject: [PATCH 014/428] Save input and recalculated rhs in test-laplacexy Also add a script to plot fields and errors in 2d. --- tests/integrated/test-laplacexy/plotcheck.py | 56 +++++++++++++++++++ .../test-laplacexy/test-laplacexy.cxx | 11 +++- 2 files changed, 66 insertions(+), 1 deletion(-) create mode 100755 tests/integrated/test-laplacexy/plotcheck.py diff --git a/tests/integrated/test-laplacexy/plotcheck.py b/tests/integrated/test-laplacexy/plotcheck.py new file mode 100755 index 0000000000..419612fdfa --- /dev/null +++ b/tests/integrated/test-laplacexy/plotcheck.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 + +from boutdata import collect +from matplotlib import pyplot +from sys import exit + +f = collect('f', path='data', yguards=True, info=False)[1:-1,1:-1] +sol = collect('sol', path='data', yguards=True, info=False)[1:-1,1:-1] +error = collect('error', path='data', yguards=True, info=False)[1:-1,1:-1] +absolute_error = collect('absolute_error', path='data', yguards=True, info=False)[1:-1,1:-1] + +# Note, cells closest to x-boundary in rhs and rhs_check may be slightly different because +# of different way x-boundary cells for D2DXDY are set: in LaplaceXY corner guard cells +# are set so that a 9-point stencil can be used; in the D2DXDY function (used in +# Laplace_perp) first dfdy=DDY(f) is calculated, communicated and has free_o3 x-boundary +# conditions applied, then DDX(dfdy) is returned. +# Therefore here exclude cells closest to the x-boundary so that the difference plotted +# should be small (as controlled by rtol, atol). +rhs = collect('rhs', path='data', yguards=True, info=False)[3:-3,2:-2] +rhs_check = collect('rhs_check', path='data', yguards=True, info=False)[3:-3,2:-2] + +pyplot.figure() + +pyplot.subplot(231) +pyplot.pcolormesh(f) +pyplot.title('f') +pyplot.colorbar() + +pyplot.subplot(232) +pyplot.pcolormesh(sol) +pyplot.title('sol') +pyplot.colorbar() + +pyplot.subplot(233) +pyplot.pcolormesh(error) +pyplot.title('error') +pyplot.colorbar() + +pyplot.subplot(234) +pyplot.pcolormesh(absolute_error) +pyplot.title('absolute_error') +pyplot.colorbar() + +pyplot.subplot(235) +pyplot.pcolormesh(rhs) +pyplot.title('rhs') +pyplot.colorbar() + +pyplot.subplot(236) +pyplot.pcolormesh(rhs - rhs_check) +pyplot.title('rhs diff') +pyplot.colorbar() + +pyplot.show() + +exit(0) diff --git a/tests/integrated/test-laplacexy/test-laplacexy.cxx b/tests/integrated/test-laplacexy/test-laplacexy.cxx index a2a1a1424c..4e85b48fef 100644 --- a/tests/integrated/test-laplacexy/test-laplacexy.cxx +++ b/tests/integrated/test-laplacexy/test-laplacexy.cxx @@ -60,7 +60,7 @@ int main(int argc, char** argv) { //////////////////////////////////////////////////////////////////////////////////////// - Field2D rhs; + Field2D rhs, rhs_check; if (include_y_derivs) { rhs = a*Laplace_perp(f) + Grad_perp(a)*Grad_perp(f) + b*f; } else { @@ -76,6 +76,13 @@ int main(int argc, char** argv) { output<<"Magnitude of maximum absolute error is "<communicate(sol); + if (include_y_derivs) { + rhs_check = a*Laplace_perp(sol) + Grad_perp(a)*Grad_perp(sol) + b*sol; + } else { + rhs_check = a*Delp2(sol, CELL_DEFAULT, false) + coords->g11*DDX(a)*DDX(sol) + b*sol; + } + dump.add(a, "a"); dump.add(b, "b"); dump.add(f, "f"); @@ -83,6 +90,8 @@ int main(int argc, char** argv) { dump.add(error, "error"); dump.add(absolute_error, "absolute_error"); dump.add(max_error, "max_error"); + dump.add(rhs, "rhs"); + dump.add(rhs_check, "rhs_check"); dump.write(); dump.close(); From 684da10d2ee392c355915b36529319e5ecf23f7d Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 15 Sep 2019 18:51:42 +0100 Subject: [PATCH 015/428] Use non-constant metric coefficients in test-laplacexy Ensures G1 and G2 are non-zero, so more terms in the matrix are tested. --- tests/integrated/test-laplacexy/data/BOUT.inp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integrated/test-laplacexy/data/BOUT.inp b/tests/integrated/test-laplacexy/data/BOUT.inp index 1065fe3cdf..66bf4b785a 100644 --- a/tests/integrated/test-laplacexy/data/BOUT.inp +++ b/tests/integrated/test-laplacexy/data/BOUT.inp @@ -7,12 +7,12 @@ ny = 128 dx = (1.+.1*cos(pi*x))/nx dy = 40.*(1.+.1*sin(y))/ny -g11 = 1. -g22 = 1. -g33 = 1. -g12 = 0.3 +g11 = 1. + .1*sin(2.*pi*x)*cos(y) +g22 = 1. + .05872*sin(2.*pi*x)*cos(y) +g33 = 1. + .115832*sin(2.*pi*x)*cos(y) +g12 = 0.3 + .08363*sin(2.*pi*x)*cos(y) g13 = 0. -g23 = 0.5 +g23 = 0.5 + .04672*sin(2.*pi*x)*cos(y) jyseps1_1 = 15 jyseps2_1 = 47 From a83b9a0e5850d5b36e8da97b8f89c9e28a6cb904 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 16 Sep 2019 19:52:49 +0200 Subject: [PATCH 016/428] Flip signs of coefficients for free_o3 boundary condition in LaplaceXY Solution should be identical since the rhs for the rows applying the boundary condition is 0, but convergence seems to be much better with a TORPEX X-point grid file this way, even though the test in test-laplacexy was OK before. Possibly having a positive diagonal matrix entry is a good thing? --- src/invert/laplacexy/laplacexy.cxx | 48 +++++++++++++++--------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index cd9a16a607..e4d1f044f6 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -870,36 +870,36 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { // 'free_o3' extrapolating boundary condition on Y boundaries for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { int row = globalIndex(it.ind, localmesh->ystart-1); - PetscScalar val = -1.0; + PetscScalar val = 1.0; MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); - val = 3.0; + val = -3.0; int col = globalIndex(it.ind, localmesh->ystart); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = -3.0; + val = 3.0; col = globalIndex(it.ind, localmesh->ystart+1); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = 1.0; + val = -1.0; col = globalIndex(it.ind, localmesh->ystart+2); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { int row = globalIndex(it.ind, localmesh->yend+1); - PetscScalar val = -1.0; + PetscScalar val = 1.0; MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); - val = 3.0; + val = -3.0; int col = globalIndex(it.ind, localmesh->yend); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = -3.0; + val = 3.0; col = globalIndex(it.ind, localmesh->yend-1); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = 1.0; + val = -1.0; col = globalIndex(it.ind, localmesh->yend-2); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } @@ -931,19 +931,19 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { } else if (y_bndry == "free_o3") { // 'free_o3' extrapolating boundary condition on Y boundaries // f(xs-1,ys-1) = 3*f(xs-1,ys) - 3*f(xs-1,ys+1) + f(xs-1,ys+2) - PetscScalar val = -1.0; + PetscScalar val = 1.0; int row = globalIndex(localmesh->xstart-1, localmesh->ystart-1); MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); - val = 3.0; + val = -3.0; int col = globalIndex(localmesh->xstart-1, localmesh->ystart); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = -3.0; + val = 3.0; col = globalIndex(localmesh->xstart-1, localmesh->ystart+1); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = 1.0; + val = -1.0; col = globalIndex(localmesh->xstart-1, localmesh->ystart+2); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } else { @@ -968,19 +968,19 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { } else if (y_bndry == "free_o3") { // 'free_o3' extrapolating boundary condition on Y boundaries // f(xe+1,ys-1) = 3*f(xe+1,ys) - 3*f(xe+1,ys+1) + f(xe+1,ys+2) - PetscScalar val = -1.0; + PetscScalar val = 1.0; int row = globalIndex(localmesh->xend+1, localmesh->ystart-1); MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); - val = 3.0; + val = -3.0; int col = globalIndex(localmesh->xend+1, localmesh->ystart); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = -3.0; + val = 3.0; col = globalIndex(localmesh->xend+1, localmesh->ystart+1); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = 1.0; + val = -1.0; col = globalIndex(localmesh->xend+1, localmesh->ystart+2); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } else { @@ -1008,19 +1008,19 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { } else if (y_bndry == "free_o3") { // 'free_o3' extrapolating boundary condition on Y boundaries // f(xs-1,ys-1) = 3*f(xs-1,ys) - 3*f(xs-1,ys+1) + f(xs-1,ys+2) - PetscScalar val = -1.0; + PetscScalar val = 1.0; int row = globalIndex(localmesh->xstart-1, localmesh->yend+1); MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); - val = 3.0; + val = -3.0; int col = globalIndex(localmesh->xstart-1, localmesh->yend); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = -3.0; + val = 3.0; col = globalIndex(localmesh->xstart-1, localmesh->yend-1); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = 1.0; + val = -1.0; col = globalIndex(localmesh->xstart-1, localmesh->yend-2); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } else { @@ -1045,19 +1045,19 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { } else if (y_bndry == "free_o3") { // 'free_o3' extrapolating boundary condition on Y boundaries // f(xe+1,ys-1) = 3*f(xe+1,ys) - 3*f(xe+1,ys+1) + f(xe+1,ys+2) - PetscScalar val = -1.0; + PetscScalar val = 1.0; int row = globalIndex(localmesh->xend+1, localmesh->yend+1); MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); - val = 3.0; + val = -3.0; int col = globalIndex(localmesh->xend+1, localmesh->yend); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = -3.0; + val = 3.0; col = globalIndex(localmesh->xend+1, localmesh->yend-1); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - val = 1.0; + val = -1.0; col = globalIndex(localmesh->xend+1, localmesh->yend-2); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); } else { From fc1b3b81ee5d9ff383f46f06e3932c0195f218c7 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 23 Sep 2019 18:37:35 +0100 Subject: [PATCH 017/428] Better initial guesses for free_o3 in LaplaceXY Previously used the same initial guess in the y-boundary points for 'free_o3' or 'neumann' boundary conditions. Now calculate initial guess for 'free_o3' by applying 'free_o3' boundary condition to the initial guess. --- src/invert/laplacexy/laplacexy.cxx | 77 +++++++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index e4d1f044f6..81c6a0d36e 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -1282,7 +1282,7 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { and (localmesh->firstX() or localmesh->lastX())) { throw BoutException("Dirichlet not implemented for mixed-derivatives."); } - } else if (y_bndry == "neumann" or y_bndry == "free_o3") { + } else if (y_bndry == "neumann") { // Y boundaries Neumann for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { int ind = globalIndex(it.ind, localmesh->ystart-1); @@ -1343,6 +1343,81 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { PetscScalar val = x0(localmesh->xend+1, localmesh->yend); VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + } + } else if (y_bndry == "free_o3") { + // Y boundaries Neumann + for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { + int ind = globalIndex(it.ind, localmesh->ystart-1); + + // Use the value that would be set by applying the boundary condition to the + // initial guess + PetscScalar val = 3.*x0(it.ind, localmesh->ystart) + - 3.*x0(it.ind, localmesh->ystart+1) + x0(it.ind, localmesh->ystart+2); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + if (localmesh->hasBndryLowerY()) { + if (localmesh->firstX()) { + int ind = globalIndex(localmesh->xstart-1, localmesh->ystart-1); + + PetscScalar val = 3.*x0(localmesh->xstart-1, localmesh->ystart) + - 3.*x0(localmesh->xstart-1, localmesh->ystart+1) + + x0(localmesh->xstart-1, localmesh->ystart+2); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + if (localmesh->lastX()) { + int ind = globalIndex(localmesh->xend+1, localmesh->ystart-1); + + PetscScalar val = 3.*x0(localmesh->xend+1, localmesh->ystart) + - 3.*x0(localmesh->xend+1, localmesh->ystart+1) + + x0(localmesh->xend+1, localmesh->ystart+2); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + } + + for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { + int ind = globalIndex(it.ind, localmesh->yend+1); + + // Use the value that would be set by applying the boundary condition to the + // initial guess + PetscScalar val = 3.*x0(it.ind, localmesh->yend) + - 3.*x0(it.ind, localmesh->yend-1) + x0(it.ind, localmesh->yend-2); + VecSetValues( xs, 1, &ind, &val, INSERT_VALUES ); + + val = 0.0; + VecSetValues( bs, 1, &ind, &val, INSERT_VALUES ); + } + if (localmesh->hasBndryUpperY()) { + if (localmesh->firstX()) { + int ind = globalIndex(localmesh->xstart-1, localmesh->yend+1); + + PetscScalar val = 3.*x0(localmesh->xstart-1, localmesh->yend) + - 3.*x0(localmesh->xstart-1, localmesh->yend-1) + + x0(localmesh->xstart-1, localmesh->yend-2); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + if (localmesh->lastX()) { + int ind = globalIndex(localmesh->xend+1, localmesh->yend+1); + + PetscScalar val = 3.*x0(localmesh->xend+1, localmesh->yend) + - 3.*x0(localmesh->xend+1, localmesh->yend-1) + + x0(localmesh->xend+1, localmesh->yend-2); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + val = 0.0; VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); } From ad5ef8bf34410fd84d537d4db8657e338c71dc73 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 2 Oct 2019 21:22:38 +0200 Subject: [PATCH 018/428] LaplaceXY: use free_o3 y-boundary for corner cells of Dirichlet When using Dirichlet y-boundary conditions, the corner boundary cells need to be set, but there isn't a sensible value to use. Therefore use a free_o3 y-boundary condition at xstart-1 and xend+1 just to define the corner boundary cells. --- include/bout/invert/laplacexy.hxx | 2 +- src/invert/laplacexy/laplacexy.cxx | 163 +++++++++++++++++------- tests/integrated/test-laplacexy/runtest | 3 +- 3 files changed, 120 insertions(+), 48 deletions(-) diff --git a/include/bout/invert/laplacexy.hxx b/include/bout/invert/laplacexy.hxx index 82839992a4..8db4234b1b 100644 --- a/include/bout/invert/laplacexy.hxx +++ b/include/bout/invert/laplacexy.hxx @@ -133,7 +133,7 @@ private: // Boundary conditions bool x_inner_dirichlet; // Dirichlet on inner X boundary? bool x_outer_dirichlet; // Dirichlet on outer X boundary? - std::string y_bndry{"neumann"}; // Bonudary condition for y-boundary + std::string y_bndry{"neumann"}; // Boundary condition for y-boundary // Location of the rhs and solution CELL_LOC location; diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index 81c6a0d36e..532f6e5ffe 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -338,15 +338,30 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) } } if (localmesh->hasBndryLowerY()) { - if (localmesh->firstX()) { - const int localIndex = globalIndex(localmesh->xstart-1, localmesh->ystart-1); - ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = y_bndry_stencil_size; - } - if (localmesh->lastX()) { - const int localIndex = globalIndex(localmesh->xend+1, localmesh->ystart-1); - ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = y_bndry_stencil_size; + if (y_bndry == "dirichlet") { + // special handling for the corners, since we use a free_o3 y-boundary + // condition just in the corners when y_bndry=="dirichlet" + if (localmesh->firstX()) { + const int localIndex = globalIndex(localmesh->xstart-1, localmesh->ystart-1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 4; + } + if (localmesh->lastX()) { + const int localIndex = globalIndex(localmesh->xend+1, localmesh->ystart-1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 4; + } + } else { + if (localmesh->firstX()) { + const int localIndex = globalIndex(localmesh->xstart-1, localmesh->ystart-1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = y_bndry_stencil_size; + } + if (localmesh->lastX()) { + const int localIndex = globalIndex(localmesh->xend+1, localmesh->ystart-1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = y_bndry_stencil_size; + } } } for(RangeIterator it=localmesh->iterateBndryUpperY(); !it.isDone(); it++) { @@ -364,15 +379,30 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) } } if (localmesh->hasBndryUpperY()) { - if (localmesh->firstX()) { - const int localIndex = globalIndex(localmesh->xstart-1, localmesh->yend+1); - ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = y_bndry_stencil_size; - } - if (localmesh->lastX()) { - const int localIndex = globalIndex(localmesh->xend+1, localmesh->yend+1); - ASSERT1((localIndex >= 0) && (localIndex < localN)); - d_nnz[localIndex] = y_bndry_stencil_size; + if (y_bndry == "dirichlet") { + // special handling for the corners, since we use a free_o3 y-boundary + // condition just in the corners when y_bndry=="dirichlet" + if (localmesh->firstX()) { + const int localIndex = globalIndex(localmesh->xstart-1, localmesh->yend+1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 4; + } + if (localmesh->lastX()) { + const int localIndex = globalIndex(localmesh->xend+1, localmesh->yend+1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = 4; + } + } else { + if (localmesh->firstX()) { + const int localIndex = globalIndex(localmesh->xstart-1, localmesh->yend+1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = y_bndry_stencil_size; + } + if (localmesh->lastX()) { + const int localIndex = globalIndex(localmesh->xend+1, localmesh->yend+1); + ASSERT1((localIndex >= 0) && (localIndex < localN)); + d_nnz[localIndex] = y_bndry_stencil_size; + } } } } @@ -914,11 +944,7 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { if (localmesh->hasBndryLowerY()) { if (localmesh->firstX()) { - if (y_bndry == "dirichlet") { - // Both Dirichlet - throw BoutException("Dirichlet y-boundary-condition not supported for mixed " - "second derivatives."); - } else if (y_bndry == "neumann") { + if (y_bndry == "neumann") { // Neumann y-bc // f(xs-1,ys-1) = f(xs-1,ys) PetscScalar val = 1.0; @@ -928,9 +954,12 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { val = -1.0; int col = globalIndex(localmesh->xstart-1, localmesh->ystart); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - } else if (y_bndry == "free_o3") { + } else if (y_bndry == "free_o3" or y_bndry == "dirichlet") { // 'free_o3' extrapolating boundary condition on Y boundaries // f(xs-1,ys-1) = 3*f(xs-1,ys) - 3*f(xs-1,ys+1) + f(xs-1,ys+2) + // + // Use free_o3 at the corners for Dirichlet y-boundaries because we don't know + // what value to pass for the corner PetscScalar val = 1.0; int row = globalIndex(localmesh->xstart-1, localmesh->ystart-1); MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); @@ -951,11 +980,7 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { } } if (localmesh->lastX()) { - if (y_bndry == "dirichlet") { - // Both Dirichlet - throw BoutException("Dirichlet y-boundary-condition not supported for mixed " - "second derivatives."); - } else if (y_bndry == "neumann") { + if (y_bndry == "neumann") { // Neumann y-bc // f(xe+1,ys-1) = f(xe+1,ys) PetscScalar val = 1.0; @@ -965,9 +990,12 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { val = -1.0; int col = globalIndex(localmesh->xend+1, localmesh->ystart); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - } else if (y_bndry == "free_o3") { + } else if (y_bndry == "free_o3" or y_bndry == "dirichlet") { // 'free_o3' extrapolating boundary condition on Y boundaries // f(xe+1,ys-1) = 3*f(xe+1,ys) - 3*f(xe+1,ys+1) + f(xe+1,ys+2) + // + // Use free_o3 at the corners for Dirichlet y-boundaries because we don't know + // what value to pass for the corner PetscScalar val = 1.0; int row = globalIndex(localmesh->xend+1, localmesh->ystart-1); MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); @@ -991,11 +1019,7 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { } if (localmesh->hasBndryUpperY()) { if (localmesh->firstX()) { - if (y_bndry == "dirichlet") { - // Both Dirichlet - throw BoutException("Dirichlet y-boundary-condition not supported for mixed " - "second derivatives."); - } else if (y_bndry == "neumann") { + if (y_bndry == "neumann") { // Neumann y-bc // f(xs-1,ys-1) = f(xs-1,ys) PetscScalar val = 1.0; @@ -1005,9 +1029,12 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { val = -1.0; int col = globalIndex(localmesh->xstart-1, localmesh->yend); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - } else if (y_bndry == "free_o3") { + } else if (y_bndry == "free_o3" or y_bndry == "dirichlet") { // 'free_o3' extrapolating boundary condition on Y boundaries // f(xs-1,ys-1) = 3*f(xs-1,ys) - 3*f(xs-1,ys+1) + f(xs-1,ys+2) + // + // Use free_o3 at the corners for Dirichlet y-boundaries because we don't know + // what value to pass for the corner PetscScalar val = 1.0; int row = globalIndex(localmesh->xstart-1, localmesh->yend+1); MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); @@ -1028,11 +1055,7 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { } } if (localmesh->lastX()) { - if (y_bndry == "dirichlet") { - // Both Dirichlet - throw BoutException("Dirichlet y-boundary-condition not supported for mixed " - "second derivatives."); - } else if (y_bndry == "neumann") { + if (y_bndry == "neumann") { // Neumann y-bc // f(xe+1,ys-1) = f(xe+1,ys) PetscScalar val = 1.0; @@ -1042,9 +1065,12 @@ void LaplaceXY::setCoefs(const Field2D &A, const Field2D &B) { val = -1.0; int col = globalIndex(localmesh->xend+1, localmesh->yend); MatSetValues(MatA,1,&row,1,&col,&val,INSERT_VALUES); - } else if (y_bndry == "free_o3") { + } else if (y_bndry == "free_o3" or y_bndry == "dirichlet") { // 'free_o3' extrapolating boundary condition on Y boundaries // f(xe+1,ys-1) = 3*f(xe+1,ys) - 3*f(xe+1,ys+1) + f(xe+1,ys+2) + // + // Use free_o3 at the corners for Dirichlet y-boundaries because we don't know + // what value to pass for the corner PetscScalar val = 1.0; int row = globalIndex(localmesh->xend+1, localmesh->yend+1); MatSetValues(MatA,1,&row,1,&row,&val,INSERT_VALUES); @@ -1278,9 +1304,54 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); } - if ((localmesh->hasBndryLowerY() or localmesh->hasBndryUpperY()) - and (localmesh->firstX() or localmesh->lastX())) { - throw BoutException("Dirichlet not implemented for mixed-derivatives."); + // Use free_o3 for the corner boundary cells + if (localmesh->hasBndryLowerY()) { + if (localmesh->firstX()) { + int ind = globalIndex(localmesh->xstart-1, localmesh->ystart-1); + + PetscScalar val = 3.*x0(localmesh->xstart-1, localmesh->ystart) + - 3.*x0(localmesh->xstart-1, localmesh->ystart+1) + + x0(localmesh->xstart-1, localmesh->ystart+2); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + if (localmesh->lastX()) { + int ind = globalIndex(localmesh->xend+1, localmesh->ystart-1); + + PetscScalar val = 3.*x0(localmesh->xend+1, localmesh->ystart) + - 3.*x0(localmesh->xend+1, localmesh->ystart+1) + + x0(localmesh->xend+1, localmesh->ystart+2); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + } + if (localmesh->hasBndryUpperY()) { + if (localmesh->firstX()) { + int ind = globalIndex(localmesh->xstart-1, localmesh->yend+1); + + PetscScalar val = 3.*x0(localmesh->xstart-1, localmesh->yend) + - 3.*x0(localmesh->xstart-1, localmesh->yend-1) + + x0(localmesh->xstart-1, localmesh->yend-2); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } + if (localmesh->lastX()) { + int ind = globalIndex(localmesh->xend+1, localmesh->yend+1); + + PetscScalar val = 3.*x0(localmesh->xend+1, localmesh->yend) + - 3.*x0(localmesh->xend+1, localmesh->yend-1) + + x0(localmesh->xend+1, localmesh->yend-2); + VecSetValues(xs, 1, &ind, &val, INSERT_VALUES); + + val = 0.0; + VecSetValues(bs, 1, &ind, &val, INSERT_VALUES); + } } } else if (y_bndry == "neumann") { // Y boundaries Neumann @@ -1348,7 +1419,7 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { } } } else if (y_bndry == "free_o3") { - // Y boundaries Neumann + // Y boundaries free_o3 for(RangeIterator it=localmesh->iterateBndryLowerY(); !it.isDone(); it++) { int ind = globalIndex(it.ind, localmesh->ystart-1); diff --git a/tests/integrated/test-laplacexy/runtest b/tests/integrated/test-laplacexy/runtest index f86a74bd74..2cfda89df4 100755 --- a/tests/integrated/test-laplacexy/runtest +++ b/tests/integrated/test-laplacexy/runtest @@ -19,7 +19,8 @@ tol_orth = 5.e-8 # applied to dfdy before calculating DDX(dfdy). tol_nonorth = 2.e-5 -argslist = [#'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=dirichlet', +argslist = ['laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=dirichlet ' + 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=neumann ' 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=free_o3 ' From 2e5e15185e998ba61a4e57df0ffbee15199df6a7 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 17 Oct 2019 11:43:42 +0200 Subject: [PATCH 019/428] LaplaceXY: optionally save performance monitoring Calculate and save the average number of iterations taken by the KSP solver for each output timestep, if the savePerformance() method has been called. --- include/bout/invert/laplacexy.hxx | 52 ++++++++++++++++++++++++++++++ src/invert/laplacexy/laplacexy.cxx | 37 ++++++++++++++++++++- 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/include/bout/invert/laplacexy.hxx b/include/bout/invert/laplacexy.hxx index 8db4234b1b..b04591a763 100644 --- a/include/bout/invert/laplacexy.hxx +++ b/include/bout/invert/laplacexy.hxx @@ -39,6 +39,8 @@ #warning LaplaceXY requires PETSc. No LaplaceXY available #include +#include "bout/solver.hxx" +#include "datafile.hxx" #include #include @@ -57,12 +59,17 @@ public: const Field2D solve(const Field2D& UNUSED(rhs), const Field2D& UNUSED(x0)) { throw BoutException("LaplaceXY requires PETSc. No LaplaceXY available"); } + void savePerformance(Datafile&, Solver&, std::string) { + throw BoutException("LaplaceXY requires PETSc. No LaplaceXY available"); + } }; #else // BOUT_HAS_PETSC #include #include +#include "bout/solver.hxx" +#include "datafile.hxx" #include #include "utils.hxx" @@ -108,6 +115,12 @@ public: * and should not be called by external users */ int precon(Vec x, Vec y); + + /*! + * If this method is called, save some performance monitoring information + */ + void savePerformance(Datafile& outputfile, Solver& solver, + std::string name = ""); private: PetscLib lib; ///< Requires PETSc library @@ -117,6 +130,9 @@ private: PC pc; ///< Preconditioner Mesh *localmesh; ///< The mesh this operates on, provides metrics and communication + + static int instance_count; + int my_id = 0; // Preconditioner int xstart, xend; @@ -160,6 +176,42 @@ private: */ int globalIndex(int x, int y); Field2D indexXY; ///< Global index (integer stored as BoutReal) + + // Save performance information? + bool save_performance = false; + + // Running average of number of iterations taken for solve in each output timestep + BoutReal average_iterations = 0.; + + // Variable to store the final result of average_iterations, since output is + // written after all other monitors have been called, and average_iterations + // must be reset in the monitor + BoutReal output_average_iterations = 0.; + + // Running total of number of calls to the solver in each output timestep + int n_calls = 0; + + // Monitor class used to reset performance-monitoring variables for a new + // output timestep + friend class LaplaceXYMonitor; + class LaplaceXYMonitor : public Monitor { + public: + LaplaceXYMonitor(LaplaceXY& owner) : laplacexy(owner) {} + + int call(Solver*, BoutReal, int, int) { + laplacexy.output_average_iterations = laplacexy.average_iterations; + + laplacexy.n_calls = 0; + laplacexy.average_iterations = 0.; + + return 0; + } + private: + // LaplaceXY object that this monitor belongs to + LaplaceXY& laplacexy; + }; + + LaplaceXYMonitor monitor; }; #endif // BOUT_HAS_PETSC diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index 532f6e5ffe..561853bae9 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -29,10 +29,15 @@ static PetscErrorCode laplacePCapply(PC pc,Vec x,Vec y) { PetscFunctionReturn(s->precon(x, y)); } +int LaplaceXY::instance_count = 0; + LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) - : localmesh(m==nullptr ? bout::globals::mesh : m), location(loc) { + : localmesh(m==nullptr ? bout::globals::mesh : m), location(loc), monitor(*this) { Timer timer("invert"); + instance_count++; + my_id = instance_count; + if (opt == nullptr) { // If no options supplied, use default opt = &(Options::root()["laplacexy"]); @@ -1515,6 +1520,17 @@ const Field2D LaplaceXY::solve(const Field2D &rhs, const Field2D &x0) { if(reason <= 0) { throw BoutException("LaplaceXY failed to converge. Reason %d", reason); } + + if (save_performance) { + // Update performance monitoring information + n_calls++; + + int iterations = 0; + KSPGetIterationNumber(ksp, &iterations); + + average_iterations = BoutReal(n_calls - 1)/BoutReal(n_calls)*average_iterations + + BoutReal(iterations)/BoutReal(n_calls); + } ////////////////////////// // Copy data into result @@ -1694,4 +1710,23 @@ int LaplaceXY::globalIndex(int x, int y) { // Get the index from a Field2D, round to integer return static_cast(std::round(indexXY(x, y))); } + +void LaplaceXY::savePerformance(Datafile& output_file, Solver& solver, + std::string name) { + // set flag so that performance monitoring values are calculated + save_performance = true; + + // add values to be saved to the output + if (name == "") { + name = "laplacexy"; + if (my_id > 1) { + name += std::to_string(my_id); + } + } + output_file.addRepeat(output_average_iterations, name + "_average_iterations"); + + // add monitor to reset counters/averages for new output timestep + // monitor added to back of queue, so that values are reset after being saved + solver.addMonitor(&monitor, Solver::BACK); +} #endif // BOUT_HAS_PETSC From 11afbbc8e171800463c9ad2dcbc28c6313ff87f0 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 20 Jul 2019 23:31:40 +0100 Subject: [PATCH 020/428] Options::overrideDefault() method Allows user to set a custom default for an option, as long as the method is called before the option is fetched using withDefault(). Will support a macro to allow users to override any library defaults. --- include/options.hxx | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/include/options.hxx b/include/options.hxx index 2d9936e616..d77e7dd8f2 100644 --- a/include/options.hxx +++ b/include/options.hxx @@ -473,6 +473,28 @@ public: return val; } + /// Allow the user to override defaults set later, also used by the + /// BOUT_OVERRIDE_DEFAULT_OPTION. + template T overrideDefault(T def) { + + // Set the type + attributes["type"] = bout::utils::typeName(); + + if (!is_value) { + // Option not found + assign(def, "user_default"); + value_used = true; // Mark the option as used + is_value = true; // Prevent this default being replaced by setDefault() + + output_info << _("\tOption ") << full_name << " = " << def << " (" << "user_default" + << ")" << std::endl; + return def; + } + + // Return value of this option as type 'T' + return as(def); + } + /// Get the parent Options object Options &parent() { if (parent_instance == nullptr) { From 6b30c5fb20cdf0d2d0b85689fac7f96634757fc3 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 21 Jul 2019 12:52:41 +0100 Subject: [PATCH 021/428] Do not output message from Options::overrideDefault The value of the option will be output where it is used, so output from overrideDefault is just duplication. --- include/options.hxx | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/options.hxx b/include/options.hxx index d77e7dd8f2..1fb46dfef7 100644 --- a/include/options.hxx +++ b/include/options.hxx @@ -486,8 +486,6 @@ public: value_used = true; // Mark the option as used is_value = true; // Prevent this default being replaced by setDefault() - output_info << _("\tOption ") << full_name << " = " << def << " (" << "user_default" - << ")" << std::endl; return def; } From 89e9f9b51020ca6fa36aea0abd1255ae2986229e Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 21 Jul 2019 12:55:15 +0100 Subject: [PATCH 022/428] Do not return value from Options::overrideDefault The overrideDefault method should not be used to get the value of an option - it does not make sense to do so because there is already the withDefault method. So there is no need to return a value from the method - removing the return reduces the number of lines in the method and avoids potential confusion about where it should be used. Also do not set value_used=true in overrideDefault, since it is possible that the value might never be used (unlike in withDefault). --- include/options.hxx | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/include/options.hxx b/include/options.hxx index 1fb46dfef7..18aadf8d94 100644 --- a/include/options.hxx +++ b/include/options.hxx @@ -475,7 +475,7 @@ public: /// Allow the user to override defaults set later, also used by the /// BOUT_OVERRIDE_DEFAULT_OPTION. - template T overrideDefault(T def) { + template void overrideDefault(T def) { // Set the type attributes["type"] = bout::utils::typeName(); @@ -483,14 +483,8 @@ public: if (!is_value) { // Option not found assign(def, "user_default"); - value_used = true; // Mark the option as used is_value = true; // Prevent this default being replaced by setDefault() - - return def; } - - // Return value of this option as type 'T' - return as(def); } /// Get the parent Options object From 8c47a1a95b2ef8cbabe0f830b670d25565b0840e Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 1 Nov 2019 16:45:01 +0000 Subject: [PATCH 023/428] Allow compound option names when getting options Allow sections to be given in the string, separated with ":", e.g. Options::root()["section:subsection:myopt"] --- manual/sphinx/user_docs/bout_options.rst | 4 ++++ src/sys/options.cxx | 12 ++++++++++++ tests/unit/sys/test_options.cxx | 22 ++++++++++++++++++++++ 3 files changed, 38 insertions(+) diff --git a/manual/sphinx/user_docs/bout_options.rst b/manual/sphinx/user_docs/bout_options.rst index 500b1ccc46..c07ea096c5 100644 --- a/manual/sphinx/user_docs/bout_options.rst +++ b/manual/sphinx/user_docs/bout_options.rst @@ -489,6 +489,10 @@ or just:: options["mysection"]["myswitch"] = true; +Names including sections, subsections, etc. can be specified using ``":"`` as a +separator, e.g.:: + options["mysection:mysubsection:myswitch"] = true; + To get options, they can be assigned to a variable:: int nout = options["nout"]; diff --git a/src/sys/options.cxx b/src/sys/options.cxx index 0683cd2f17..fb5a89500e 100644 --- a/src/sys/options.cxx +++ b/src/sys/options.cxx @@ -47,6 +47,12 @@ Options &Options::operator[](const std::string &name) { return *this; } + // If name is compound, e.g. "section:subsection", then split the name + auto subsection_split = name.find(":"); + if (subsection_split != std::string::npos) { + return (*this)[name.substr(0, subsection_split)][name.substr(subsection_split+1)]; + } + // Find and return if already exists auto it = children.find(lowercase(name)); if (it != children.end()) { @@ -76,6 +82,12 @@ const Options &Options::operator[](const std::string &name) const { return *this; } + // If name is compound, e.g. "section:subsection", then split the name + auto subsection_split = name.find(":"); + if (subsection_split != std::string::npos) { + return (*this)[name.substr(0, subsection_split)][name.substr(subsection_split+1)]; + } + // Find and return if already exists auto it = children.find(lowercase(name)); if (it == children.end()) { diff --git a/tests/unit/sys/test_options.cxx b/tests/unit/sys/test_options.cxx index 8c8fae0e00..a27b4a77bb 100644 --- a/tests/unit/sys/test_options.cxx +++ b/tests/unit/sys/test_options.cxx @@ -65,6 +65,28 @@ TEST_F(OptionsTest, IsSectionNotCaseSensitive) { ASSERT_TRUE(options.isSection("Subsection")); } +TEST_F(OptionsTest, CompoundName) { + Options options; + + // make sure options is initialized as a section + options["testkey"] = 1.; + + ASSERT_TRUE(options.isSection()); + ASSERT_FALSE(options["testkey"].isSection()); + ASSERT_TRUE(options.isSection("")); + ASSERT_FALSE(options.isSection("subsection")); + + options["subsection:testkey"] = 1.; + + ASSERT_TRUE(options.isSection("subsection")); + + BoutReal value = options["subsection"]["testkey"]; + EXPECT_EQ(value, 1.); + + BoutReal value2 = options["subsection:testkey"]; + EXPECT_EQ(value2, 1.); +} + TEST_F(OptionsTest, SetGetInt) { Options options; options.set("int_key", 42, "code"); From 7ac2295461f56176543116f9d1e9f733efed9093 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 1 Nov 2019 16:58:27 +0000 Subject: [PATCH 024/428] Return a value from Options::overrideDefault() Allows global variable to be created so that overrideDefault() can be called before main(), allowing the user to change library default options before they are used. --- include/options.hxx | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/include/options.hxx b/include/options.hxx index 18aadf8d94..4a174a9620 100644 --- a/include/options.hxx +++ b/include/options.hxx @@ -475,7 +475,7 @@ public: /// Allow the user to override defaults set later, also used by the /// BOUT_OVERRIDE_DEFAULT_OPTION. - template void overrideDefault(T def) { + template T overrideDefault(T def) { // Set the type attributes["type"] = bout::utils::typeName(); @@ -484,7 +484,10 @@ public: // Option not found assign(def, "user_default"); is_value = true; // Prevent this default being replaced by setDefault() + return def; } + + return as(); } /// Get the parent Options object From 8305e3d406a4d8be37a4aab09471acfa61a18956 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 1 Nov 2019 17:05:16 +0000 Subject: [PATCH 025/428] Define macro to allow user to override library defaults for options BOUT_OVERRIDE_DEFAULT_OPTION(name, value) can be called in global namespace to override the default for the option "name". --- include/options.hxx | 7 +++++++ manual/sphinx/user_docs/bout_options.rst | 10 ++++++++++ 2 files changed, 17 insertions(+) diff --git a/include/options.hxx b/include/options.hxx index 4a174a9620..42b6a5f455 100644 --- a/include/options.hxx +++ b/include/options.hxx @@ -741,4 +741,11 @@ template <> Field3D Options::as(const Field3D& similar_to) const; Options::getRoot()->getSection("all")->get(#var, var, def); \ }} \ +/// Define for over-riding library defaults for options, should be called in global +/// namespace so that the new default is set before main() is called. +#define BOUT_OVERRIDE_DEFAULT_OPTION(name, value) \ + namespace { \ + const auto user_default##__FILE__##__LINE__ = \ + Options::root()[name].overrideDefault(value); } \ + #endif // __OPTIONS_H__ diff --git a/manual/sphinx/user_docs/bout_options.rst b/manual/sphinx/user_docs/bout_options.rst index c07ea096c5..b1c9bde6b5 100644 --- a/manual/sphinx/user_docs/bout_options.rst +++ b/manual/sphinx/user_docs/bout_options.rst @@ -570,6 +570,16 @@ This string is stored in the attributes of the option:: std::string docstring = options["value"].attributes["doc"]; +Overriding library defaults +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +BOUT++ sets defaults for options controlling the mesh, etc. A physics model (or +other user code) can override these defaults by using the convenience macro +BOUT_OVERRIDE_DEFAULT_OPTION, for example if you want to change the default +value of ``mesh::staggergrids`` from false to true, put (outside any +class/function body):: + + BOUT_OVERRIDE_DEFAULT_OPTION("mesh:staggergrids", true); Older interface ~~~~~~~~~~~~~~~ From a19ea60672aeef2a126af117a2824ad726998147 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 2 Nov 2019 21:48:32 +0000 Subject: [PATCH 026/428] Avoid re-using option names/values in unit test --- tests/unit/sys/test_options.cxx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/unit/sys/test_options.cxx b/tests/unit/sys/test_options.cxx index a27b4a77bb..331ae37901 100644 --- a/tests/unit/sys/test_options.cxx +++ b/tests/unit/sys/test_options.cxx @@ -69,22 +69,22 @@ TEST_F(OptionsTest, CompoundName) { Options options; // make sure options is initialized as a section - options["testkey"] = 1.; + options["compoundkey"] = 321.; ASSERT_TRUE(options.isSection()); - ASSERT_FALSE(options["testkey"].isSection()); + ASSERT_FALSE(options["compoundkey"].isSection()); ASSERT_TRUE(options.isSection("")); - ASSERT_FALSE(options.isSection("subsection")); + ASSERT_FALSE(options.isSection("compoundsubsection")); - options["subsection:testkey"] = 1.; + options["compoundsubsection:compoundkey"] = 321.; - ASSERT_TRUE(options.isSection("subsection")); + ASSERT_TRUE(options.isSection("compoundsubsection")); - BoutReal value = options["subsection"]["testkey"]; - EXPECT_EQ(value, 1.); + BoutReal value = options["compoundsubsection"]["compoundkey"]; + EXPECT_EQ(value, 321.); - BoutReal value2 = options["subsection:testkey"]; - EXPECT_EQ(value2, 1.); + BoutReal value2 = options["compoundsubsection:compoundkey"]; + EXPECT_EQ(value2, 321.); } TEST_F(OptionsTest, SetGetInt) { From 830f29424414d656c8c677a9167bc242543af1ce Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 2 Nov 2019 22:03:17 +0000 Subject: [PATCH 027/428] Unit test for Options::overrideDefault() --- tests/unit/sys/test_options.cxx | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/unit/sys/test_options.cxx b/tests/unit/sys/test_options.cxx index 331ae37901..2f1550cb17 100644 --- a/tests/unit/sys/test_options.cxx +++ b/tests/unit/sys/test_options.cxx @@ -389,6 +389,16 @@ TEST_F(OptionsTest, InconsistentDefaultValueOptions) { EXPECT_EQ(value, 0); } +TEST_F(OptionsTest, OverrideDefaultValueOptions) { + Options options; + + options["override_key"].overrideDefault("override_value"); + + std::string value = options["override_key"].withDefault("default_value"); + + EXPECT_EQ(value, "override_value"); +} + TEST_F(OptionsTest, SingletonTest) { Options *root = Options::getRoot(); Options *second = Options::getRoot(); From bb1c95b09f8efa3a042731b1add6495c81a1d955 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 2 Nov 2019 22:03:46 +0000 Subject: [PATCH 028/428] const char* overload for Options::overrideDefault() Prevents errors from calls like options["somekey"].overrideDefault("string_value"); --- include/options.hxx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/options.hxx b/include/options.hxx index 42b6a5f455..54de2d8e72 100644 --- a/include/options.hxx +++ b/include/options.hxx @@ -490,6 +490,12 @@ public: return as(); } + /// Overloaded version for const char* + /// Note: Different from template since return type is different to input + std::string overrideDefault(const char* def) { + return overrideDefault(std::string(def)); + } + /// Get the parent Options object Options &parent() { if (parent_instance == nullptr) { From c36351a37c562803ec79345432b8ba18fd8cdd0d Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 13 Nov 2019 23:10:57 +0000 Subject: [PATCH 029/428] BOUT_ENUM_CLASS macro Allows creating an 'enum class' along with toString and FromString functions and an Options::as() overload for setting an instance of the enum class from options. Also includes operator<< overload so that Options::assign() and Options::withDefault() work. --- include/bout/bout_enum_class.hxx | 105 +++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 include/bout/bout_enum_class.hxx diff --git a/include/bout/bout_enum_class.hxx b/include/bout/bout_enum_class.hxx new file mode 100644 index 0000000000..743099ebc4 --- /dev/null +++ b/include/bout/bout_enum_class.hxx @@ -0,0 +1,105 @@ +/************************************************************************** + * Copyright 2019 B.D.Dudson, J.T.Omotani, P.Hill + * + * Contact Ben Dudson, bd512@york.ac.uk + * + * This file is part of BOUT++. + * + * BOUT++ is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * BOUT++ is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with BOUT++. If not, see . + **************************************************************************/ + +#ifndef __BOUT_ENUM_CLASS_H__ +#define __BOUT_ENUM_CLASS_H__ + +#include "bout/macro_for_each.hxx" +#include "boutexception.hxx" +#include "msg_stack.hxx" +#include "options.hxx" + +#include +#include + +/// Create some macro magic similar to bout/macro_for_each.hxx, but allowing for the enum +/// class name to be passed through to each _call +/// _ec_expand_x set of macros expand a number of arguments without ';' between them +#define _ec_expand_1(_call, enumname, x) _call(enumname, x) +#define _ec_expand_2(_call, enumname, x, ...) \ + _call(enumname, x) _ec_expand_1(_call, enumname, __VA_ARGS__) +#define _ec_expand_3(_call, enumname, x, ...) \ + _call(enumname, x) _ec_expand_2(_call, enumname, __VA_ARGS__) +#define _ec_expand_4(_call, enumname, x, ...) \ + _call(enumname, x) _ec_expand_3(_call, enumname, __VA_ARGS__) +#define _ec_expand_5(_call, enumname, x, ...) \ + _call(enumname, x) _ec_expand_4(_call, enumname, __VA_ARGS__) +#define _ec_expand_6(_call, enumname, x, ...) \ + _call(enumname, x) _ec_expand_5(_call, enumname, __VA_ARGS__) +#define _ec_expand_7(_call, enumname, x, ...) \ + _call(enumname, x) _ec_expand_6(_call, enumname, __VA_ARGS__) +#define _ec_expand_8(_call, enumname, x, ...) \ + _call(enumname, x) _ec_expand_7(_call, enumname, __VA_ARGS__) +#define _ec_expand_9(_call, enumname, x, ...) \ + _call(enumname, x) _ec_expand_8(_call, enumname, __VA_ARGS__) +#define _ec_expand_10(_call, enumname, x, ...) \ + _call(enumname, x) _ec_expand_9(_call, enumname, __VA_ARGS__) + +#define _BOUT_ENUM_CLASS_MAP_ARGS(mac, enumname, ...) \ + _GET_FOR_EACH_EXPANSION(__VA_ARGS__, \ + _ec_expand_10, _ec_expand_9, _ec_expand_8, _ec_expand_7, \ + _ec_expand_6, _ec_expand_5, _ec_expand_4, _ec_expand_3, \ + _ec_expand_2, _ec_expand_1) \ + (mac, enumname, __VA_ARGS__) + +#define _ENUM_CLASS_STR(enumname, val) {enumname::val, lowercase(#val)}, +#define _STR_ENUM_CLASS(enumname, val) {lowercase(#val), enumname::val}, + +#define _MAKE_FROMSTRING_NAME(enumname) enumname ## FromString + +/// Create an enum class with toString and FromString functions, and an +/// Options::as overload to read the enum +#define BOUT_ENUM_CLASS(enumname, ...) \ +enum class enumname { __VA_ARGS__ }; \ + \ +inline std::string toString(enumname e) { \ + AUTO_TRACE(); \ + const static std::map toString_map = { \ + _BOUT_ENUM_CLASS_MAP_ARGS(_ENUM_CLASS_STR, enumname, __VA_ARGS__) \ + }; \ + auto found = toString_map.find(e); \ + if (found == toString_map.end()) { \ + throw BoutException("Did not find enum %d", static_cast(e)); \ + } \ + return found->second; \ +} \ + \ +inline enumname _MAKE_FROMSTRING_NAME(enumname)(std::string s) { \ + AUTO_TRACE(); \ + const static std::map fromString_map = { \ + _BOUT_ENUM_CLASS_MAP_ARGS(_STR_ENUM_CLASS, enumname, __VA_ARGS__) \ + }; \ + auto found = fromString_map.find(s); \ + if (found == fromString_map.end()) { \ + throw BoutException("Did not find enum %s", s.c_str()); \ + } \ + return found->second; \ +} \ + \ +template <> inline enumname Options::as(const enumname&) const { \ + return _MAKE_FROMSTRING_NAME(enumname)(this->as()); \ +} \ + \ +inline std::ostream& operator<<(std::ostream& out, const enumname& e) { \ + return out << toString(e); \ +} + +#endif // __BOUT_ENUM_CLASS_H__ From 113000d7ed15d698c64547d299f1621e30b49e80 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 30 Oct 2019 09:34:29 +0000 Subject: [PATCH 030/428] Fix version major/minor/patch decomposition in configure.ac Enable use of version tags --- configure | 7 ++++--- configure.ac | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/configure b/configure index c1cef9668c..d3e5ef6d31 100755 --- a/configure +++ b/configure @@ -6119,9 +6119,10 @@ $as_echo "$as_me: Signaling floating point exceptions disabled" >&6;} fi BOUT_VERSION=$PACKAGE_VERSION -BOUT_VERSION_MAJOR=${PACKAGE_VERSION%%.*} -BOUT_VERSION_MINOR=${PACKAGE_VERSION#*.} -BOUT_VERSION_DOUBLE=${BOUT_VERSION_MAJOR}.$(echo ${BOUT_VERSION_MINOR}|tr -d .) +BOUT_VERSION_MAJOR=$(echo $PACKAGE_VERSION | cut -d. -f1) +BOUT_VERSION_MINOR=$(echo $PACKAGE_VERSION | cut -d. -f2) +BOUT_VERSION_PATCH=$(echo ${PACKAGE_VERSION%-alpha} | cut -d. -f3) +BOUT_VERSION_DOUBLE=${BOUT_VERSION_MAJOR}.${BOUT_VERSION_MINOR}${BOUT_VERSION_PATCH} CXXFLAGS="$CXXFLAGS -DBOUT_VERSION_STRING=\"\\\"$BOUT_VERSION\\\"\" -DBOUT_VERSION_DOUBLE=$BOUT_VERSION_DOUBLE " ############################################################# # Enable Backtrace if possible diff --git a/configure.ac b/configure.ac index 2821644220..3729adec21 100644 --- a/configure.ac +++ b/configure.ac @@ -346,9 +346,10 @@ AS_IF([test "x$enable_sigfpe" = "xyes"], [ ]) BOUT_VERSION=$PACKAGE_VERSION -BOUT_VERSION_MAJOR=${PACKAGE_VERSION%%.*} -BOUT_VERSION_MINOR=${PACKAGE_VERSION#*.} -BOUT_VERSION_DOUBLE=${BOUT_VERSION_MAJOR}.$(echo ${BOUT_VERSION_MINOR}|tr -d .) +BOUT_VERSION_MAJOR=$(echo $PACKAGE_VERSION | cut -d. -f1) +BOUT_VERSION_MINOR=$(echo $PACKAGE_VERSION | cut -d. -f2) +BOUT_VERSION_PATCH=$(echo ${PACKAGE_VERSION%-alpha} | cut -d. -f3) +BOUT_VERSION_DOUBLE=${BOUT_VERSION_MAJOR}.${BOUT_VERSION_MINOR}${BOUT_VERSION_PATCH} CXXFLAGS="$CXXFLAGS -DBOUT_VERSION_STRING=\"\\\"$BOUT_VERSION\\\"\" -DBOUT_VERSION_DOUBLE=$BOUT_VERSION_DOUBLE " ############################################################# # Enable Backtrace if possible From a13fe937411fa26ec77b8927e6c865bfe3321f06 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 30 Oct 2019 09:36:05 +0000 Subject: [PATCH 031/428] Fix version major/minor/patch decomposition in CMakeLists.txt Enable use of version tags --- CMakeLists.txt | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b909ab6482..863b1bfc10 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,9 +6,14 @@ else() cmake_policy(VERSION 3.12) endif() +# CMake currently doesn't support proper semver +# Set the version here, strip the prerelease identifier to use in `project` +set(BOUT_FULL_VERSION 4.2.2) +string(REPLACE "-alpha" "" BOUT_CMAKE_ACCEPTABLE_VERSION ${BOUT_FULL_VERSION}) + project(BOUT++ DESCRIPTION "Fluid PDE solver framework" - VERSION 4.2.2 + VERSION ${BOUT_CMAKE_ACCEPTABLE_VERSION} LANGUAGES CXX) # This might not be entirely sensible, but helps CMake to find the @@ -305,7 +310,7 @@ target_include_directories(bout++ PUBLIC ) target_compile_definitions(bout++ - PUBLIC "BOUT_VERSION_STRING=\"${PROJECT_VERSION}\"" + PUBLIC "BOUT_VERSION_STRING=\"${BOUT_FULL_VERSION}\"" PUBLIC "BOUT_VERSION_DOUBLE=${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}${PROJECT_VERSION_PATCH}" ) From a4f5d5c12e1fb1b9a013df1fc1c00e51275d9f75 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 22 Nov 2019 17:02:40 +0000 Subject: [PATCH 032/428] Bump version to 4.4.0-alpha --- CMakeLists.txt | 2 +- configure | 22 +++++++++++----------- configure.ac | 2 +- manual/doxygen/Doxyfile | 2 +- manual/doxygen/Doxyfile_readthedocs | 2 +- manual/sphinx/conf.py | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 863b1bfc10..6fceace276 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,7 +8,7 @@ endif() # CMake currently doesn't support proper semver # Set the version here, strip the prerelease identifier to use in `project` -set(BOUT_FULL_VERSION 4.2.2) +set(BOUT_FULL_VERSION 4.4.0-alpha) string(REPLACE "-alpha" "" BOUT_CMAKE_ACCEPTABLE_VERSION ${BOUT_FULL_VERSION}) project(BOUT++ diff --git a/configure b/configure index d3e5ef6d31..7362375b31 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for BOUT++ 4.3.0. +# Generated by GNU Autoconf 2.69 for BOUT++ 4.4.0-alpha. # # Report bugs to . # @@ -580,8 +580,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='BOUT++' PACKAGE_TARNAME='bout--' -PACKAGE_VERSION='4.3.0' -PACKAGE_STRING='BOUT++ 4.3.0' +PACKAGE_VERSION='4.4.0-alpha' +PACKAGE_STRING='BOUT++ 4.4.0-alpha' PACKAGE_BUGREPORT='bd512@york.ac.uk' PACKAGE_URL='' @@ -1382,7 +1382,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures BOUT++ 4.3.0 to adapt to many kinds of systems. +\`configure' configures BOUT++ 4.4.0-alpha to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1447,7 +1447,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of BOUT++ 4.3.0:";; + short | recursive ) echo "Configuration of BOUT++ 4.4.0-alpha:";; esac cat <<\_ACEOF @@ -1583,7 +1583,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -BOUT++ configure 4.3.0 +BOUT++ configure 4.4.0-alpha generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2164,7 +2164,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by BOUT++ $as_me 4.3.0, which was +It was created by BOUT++ $as_me 4.4.0-alpha, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -14943,7 +14943,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by BOUT++ $as_me 4.3.0, which was +This file was extended by BOUT++ $as_me 4.4.0-alpha, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -15000,7 +15000,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -BOUT++ config.status 4.3.0 +BOUT++ config.status 4.4.0-alpha configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" @@ -16312,7 +16312,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by BOUT++ $as_me 4.3.0, which was +This file was extended by BOUT++ $as_me 4.4.0-alpha, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -16369,7 +16369,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -BOUT++ config.status 4.3.0 +BOUT++ config.status 4.4.0-alpha configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 3729adec21..f2af283aa5 100644 --- a/configure.ac +++ b/configure.ac @@ -32,7 +32,7 @@ # AC_PREREQ([2.69]) -AC_INIT([BOUT++],[4.3.0],[bd512@york.ac.uk]) +AC_INIT([BOUT++],[4.4.0-alpha],[bd512@york.ac.uk]) AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_MACRO_DIR([m4]) diff --git a/manual/doxygen/Doxyfile b/manual/doxygen/Doxyfile index 213e3b1251..1fe9e8b923 100644 --- a/manual/doxygen/Doxyfile +++ b/manual/doxygen/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = BOUT++ # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 4.3.0 +PROJECT_NUMBER = 4.4.0-alpha # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/manual/doxygen/Doxyfile_readthedocs b/manual/doxygen/Doxyfile_readthedocs index 1a48ee7a47..5ec9606d3e 100644 --- a/manual/doxygen/Doxyfile_readthedocs +++ b/manual/doxygen/Doxyfile_readthedocs @@ -38,7 +38,7 @@ PROJECT_NAME = BOUT++ # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 4.3.0 +PROJECT_NUMBER = 4.4.0-alpha # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/manual/sphinx/conf.py b/manual/sphinx/conf.py index 31de82f900..ce69f3ab6f 100755 --- a/manual/sphinx/conf.py +++ b/manual/sphinx/conf.py @@ -131,9 +131,9 @@ def __getattr__(cls, name): # built documents. # # The short X.Y version. -version = '4.3' +version = '4.4' # The full version, including alpha/beta/rc tags. -release = '4.3.0' +release = '4.4.0-alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 66bcf25add053835305f9d020a16c5bed5a8c721 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 31 Oct 2019 12:02:55 +0000 Subject: [PATCH 033/428] CMake: Use a regex to strip any tags from version --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6fceace276..3767c9b0c3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -9,7 +9,7 @@ endif() # CMake currently doesn't support proper semver # Set the version here, strip the prerelease identifier to use in `project` set(BOUT_FULL_VERSION 4.4.0-alpha) -string(REPLACE "-alpha" "" BOUT_CMAKE_ACCEPTABLE_VERSION ${BOUT_FULL_VERSION}) +string(REGEX REPLACE "^([0-9]+\.[0-9]+\.[0-9]+)-.*" "\\1" BOUT_CMAKE_ACCEPTABLE_VERSION ${BOUT_FULL_VERSION}) project(BOUT++ DESCRIPTION "Fluid PDE solver framework" From 21276e995d3223b89bc73c36cc72559415dea15f Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 22 Nov 2019 17:06:19 +0000 Subject: [PATCH 034/428] Correct spelling in docstring --- src/invert/laplacexy/laplacexy.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index 561853bae9..73c769f02e 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -44,7 +44,7 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) } finite_volume = (*opt)["finite_volume"].doc( - "Use finite volume rather than finite difference discritisation." + "Use finite volume rather than finite difference discretisation." ).withDefault(true); /////////////////////////////////////////////////// From ba5d3545dd70869971223c02dbf19cbba5dd926d Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 22 Nov 2019 17:08:59 +0000 Subject: [PATCH 035/428] LaplaceXY: combine MPI_Waitall for all 4 corner cells Previously was a separate MPI_Waitall for each corner. --- src/invert/laplacexy/laplacexy.cxx | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index 73c769f02e..e19fd2368b 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -434,8 +434,10 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) auto xcomm = localmesh->getXcomm(); int proc_xind = localmesh->getXProcIndex(); int tag0 = localmesh->getYProcIndex()*localmesh->getNXPE(); - MPI_Request requests[] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL}; - MPI_Status statuses[2]; + MPI_Request requests[] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL, MPI_REQUEST_NULL, + MPI_REQUEST_NULL, MPI_REQUEST_NULL, MPI_REQUEST_NULL, + MPI_REQUEST_NULL, MPI_REQUEST_NULL}; + MPI_Status statuses[8]; // Get lower, inner corner if (not localmesh->firstX()) { @@ -448,46 +450,43 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) MPI_Isend(&indexXY(localmesh->xend, localmesh->ystart-1), 1, MPI_DOUBLE, proc_xind+1, tag0 + 1*(proc_xind + 1), xcomm, &requests[1]); } - MPI_Waitall(2, requests, statuses); // Get upper, inner corner if (not localmesh->firstX()) { // Receive from inner MPI_Irecv(&indexXY(localmesh->xstart-1, localmesh->yend+1), 1, MPI_DOUBLE, - proc_xind-1, tag0 + 1*proc_xind, xcomm, &requests[0]); + proc_xind-1, tag0 + 1*proc_xind, xcomm, &requests[2]); } if (not localmesh->lastX()) { // Need to send upper outer point out MPI_Isend(&indexXY(localmesh->xend, localmesh->yend+1), 1, MPI_DOUBLE, proc_xind+1, - tag0 + 1*(proc_xind + 1), xcomm, &requests[1]); + tag0 + 1*(proc_xind + 1), xcomm, &requests[3]); } - MPI_Waitall(2, requests, statuses); // Get lower, outer corner if (not localmesh->lastX()) { // Receive from outer MPI_Irecv(&indexXY(localmesh->xend+1, localmesh->ystart-1), 1, MPI_DOUBLE, - proc_xind+1, tag0 + proc_xind, xcomm, &requests[0]); + proc_xind+1, tag0 + proc_xind, xcomm, &requests[4]); } if (not localmesh->firstX()) { // Need to send lower inner point in MPI_Isend(&indexXY(localmesh->xstart, localmesh->ystart-1), 1, MPI_DOUBLE, - proc_xind-1, tag0 + (proc_xind - 1), xcomm, &requests[1]); + proc_xind-1, tag0 + (proc_xind - 1), xcomm, &requests[5]); } - MPI_Waitall(2, requests, statuses); // Get upper, outer corner if (not localmesh->lastX()) { // Receive from outer MPI_Irecv(&indexXY(localmesh->xend+1, localmesh->yend+1), 1, MPI_DOUBLE, - proc_xind+1, tag0 + proc_xind, xcomm, &requests[0]); + proc_xind+1, tag0 + proc_xind, xcomm, &requests[6]); } if (not localmesh->firstX()) { // Need to send upper inner point in MPI_Isend(&indexXY(localmesh->xstart, localmesh->yend+1), 1, MPI_DOUBLE, - proc_xind-1, tag0 + (proc_xind - 1), xcomm, &requests[1]); + proc_xind-1, tag0 + (proc_xind - 1), xcomm, &requests[7]); } - MPI_Waitall(2, requests, statuses); + MPI_Waitall(8, requests, statuses); ////////////////////////////////////////////////// // Set up KSP From 92e666088cdee39d55f93d2a2a62ab82a84d308b Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 22 Nov 2019 18:22:37 +0000 Subject: [PATCH 036/428] Correct note in LaplaceXY section of manual --- manual/sphinx/user_docs/laplacian.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/manual/sphinx/user_docs/laplacian.rst b/manual/sphinx/user_docs/laplacian.rst index 58c028bc3e..51ce64f918 100644 --- a/manual/sphinx/user_docs/laplacian.rst +++ b/manual/sphinx/user_docs/laplacian.rst @@ -887,8 +887,10 @@ cell faces. Notes: -- The ShiftXderivs option must be true for this to work, since it - assumes that :math:`g^{xz} = 0` +- The ``ShiftedMetric`` or ``FCITransform`` ParallelTransform must be used + (i.e. ``mesh:paralleltransform = shifted`` or + ``mesh:paralleltransform = fci``) for this to work, since it assumes that + :math:`g^{xz} = 0` - Setting the option ``pctype = hypre`` seems to work well, if PETSc has been compiled with the algebraic multigrid library hypre; this can be included by passing the option ``--download-hypre`` to PETSc's ``configure`` script. From 5f588ff091ba57aff08a87d4f4e19e69ba21a10e Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 26 Nov 2019 11:14:20 +0000 Subject: [PATCH 037/428] Update manual on passing boundary values for LaplaceXY --- manual/sphinx/user_docs/laplacian.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/manual/sphinx/user_docs/laplacian.rst b/manual/sphinx/user_docs/laplacian.rst index 51ce64f918..bc287d78ea 100644 --- a/manual/sphinx/user_docs/laplacian.rst +++ b/manual/sphinx/user_docs/laplacian.rst @@ -894,6 +894,11 @@ Notes: - Setting the option ``pctype = hypre`` seems to work well, if PETSc has been compiled with the algebraic multigrid library hypre; this can be included by passing the option ``--download-hypre`` to PETSc's ``configure`` script. +- ``LaplaceXY`` (with the default finite-volume discretisation) has a slightly + different convention for passing non-zero boundary values than the + ``Laplacian`` solvers. ``LaplaceXY`` uses the average of the last grid cell + and first boundary cell of the initial guess (second argument to + ``solve()``) as the value to impose for the boundary condition. An alternative discretization is available if the option ``finite_volume = false`` is set. Then a finite-difference discretization very close to the one used when @@ -903,6 +908,13 @@ supports non-orthogonal grids with :math:`g^{xy} \neq 0`. The difference is that condition to ``dfdy = DDY(f)`` before calculating ``DDX(dfdy)`` with a slightly different result than the way boundary conditions are applied in ``LaplaceXY``. +- The finite difference implementation of ``LaplaceXY`` passes non-zero values + for the boundary conditions in the same way as the ``Laplacian`` solvers. + The value in the first boundary cell of the initial guess (second argument + to ``solve()``) is used as the boundary value. (Note that this value is + imposed as a boundary condition on the returned solution at a location half + way between the last grid cell and first boundary cell.) + .. _sec-LaplaceXZ: LaplaceXZ From b0f7bb543a8a94d461e6730f5fee2ebd5c60611c Mon Sep 17 00:00:00 2001 From: johnomotani Date: Thu, 5 Dec 2019 12:20:23 +0000 Subject: [PATCH 038/428] Remove 3-element list indexers for collect() Previously there was an undocumented option to pass a 3-element list `[start, end, step]` to the `tind,xind,yind,zind` arguments of `collect()`. The 3-element list used an exclusive `end`, while the 2-element form uses an inclusive `end`, which is inconsistent and potentially confusing. `step` functionality is available by passing a Python `slice` to collect. Removing the 3-element list form is a safe solution: no existing scripts will silently change their behaviour, and the provided functionality is consistent. --- tools/pylib/boutdata/collect.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tools/pylib/boutdata/collect.py b/tools/pylib/boutdata/collect.py index 930cf41bdb..7d01357967 100644 --- a/tools/pylib/boutdata/collect.py +++ b/tools/pylib/boutdata/collect.py @@ -115,9 +115,6 @@ def _convert_to_nice_slice(r, N, name="range"): .format(name, *r2)) # Lists uses inclusive end, we need exclusive end temp_slice = slice(r2[0], r2[1] + 1) - elif len(r) == 3: - # Convert 3 element list to slice object - temp_slice = slice(r[0],r[1],r[2]) else: raise ValueError("Couldn't convert {} ('{}') to slice".format(name, r)) From 7aad2e94415ebc1ec3074eb116eeaf11a80a96a3 Mon Sep 17 00:00:00 2001 From: johnomotani Date: Thu, 5 Dec 2019 14:49:47 +0000 Subject: [PATCH 039/428] More informative exception message for collect indexers In case a user tries to pass a 3-element list to an indexer argument of `collect()` (as was previously supported), update the exception message to suggest passing a `slice` to get that functionality. --- tools/pylib/boutdata/collect.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/pylib/boutdata/collect.py b/tools/pylib/boutdata/collect.py index 7d01357967..69cf607479 100644 --- a/tools/pylib/boutdata/collect.py +++ b/tools/pylib/boutdata/collect.py @@ -116,7 +116,9 @@ def _convert_to_nice_slice(r, N, name="range"): # Lists uses inclusive end, we need exclusive end temp_slice = slice(r2[0], r2[1] + 1) else: - raise ValueError("Couldn't convert {} ('{}') to slice".format(name, r)) + raise ValueError("Couldn't convert {} ('{}') to slice. Please pass a " + "slice(start, stop, step) if you need to set a step." + .format(name, r)) # slice.indices converts None to actual values return slice(*temp_slice.indices(N)) From 6612bb1370faae21b214768ed58204b0bbc1572b Mon Sep 17 00:00:00 2001 From: Sajidah Ahmed Date: Tue, 24 Sep 2019 15:06:20 +0100 Subject: [PATCH 040/428] Implement toFieldAligned and fromFieldAligned for Vector3D --- include/vector3d.hxx | 3 +++ src/field/vector3d.cxx | 28 ++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/include/vector3d.hxx b/include/vector3d.hxx index 29e307c448..7ff01252dd 100644 --- a/include/vector3d.hxx +++ b/include/vector3d.hxx @@ -225,6 +225,9 @@ inline const Field3D abs(const Vector3D& v, REGION region) { return abs(v, toString(region)); } +const Vector3D toFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); +const Vector3D fromFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); + /*! * @brief Time derivative of 3D vector field */ diff --git a/src/field/vector3d.cxx b/src/field/vector3d.cxx index eb4ef000da..3c2f724a94 100644 --- a/src/field/vector3d.cxx +++ b/src/field/vector3d.cxx @@ -603,6 +603,34 @@ const Field3D abs(const Vector3D &v, const std::string& region) { return sqrt(v*v, region); } +const Vector3D toFieldAligned(const Vector3D& v, const std::string& region) { + Vector3D result; + result.setLocation(v.getLocation()); + if (not v.covariant) { + result.toContravariant(); + } + + result.x = toFieldAligned(v.x, region); + result.y = toFieldAligned(v.y, region); + result.z = toFieldAligned(v.z, region); + + return result; +} + +const Vector3D fromFieldAligned(const Vector3D& v, const std::string& region) { + Vector3D result; + result.setLocation(v.getLocation()); + if (not v.covariant) { + result.toContravariant(); + } + + result.x = fromFieldAligned(v.x, region); + result.y = fromFieldAligned(v.y, region); + result.z = fromFieldAligned(v.z, region); + + return result; +} + /*************************************************************** * FieldData VIRTUAL FUNCTIONS ***************************************************************/ From 71686137ae641b8576d29d6020f00d0482809df7 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 10 Jan 2020 15:06:56 +0000 Subject: [PATCH 041/428] Drop const from return type of to/fromFieldAligned(Vector3D) --- include/vector3d.hxx | 4 ++-- src/field/vector3d.cxx | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/vector3d.hxx b/include/vector3d.hxx index 7ff01252dd..f4874754a4 100644 --- a/include/vector3d.hxx +++ b/include/vector3d.hxx @@ -225,8 +225,8 @@ inline const Field3D abs(const Vector3D& v, REGION region) { return abs(v, toString(region)); } -const Vector3D toFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); -const Vector3D fromFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); +Vector3D toFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); +Vector3D fromFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); /*! * @brief Time derivative of 3D vector field diff --git a/src/field/vector3d.cxx b/src/field/vector3d.cxx index 3c2f724a94..e3f74db28b 100644 --- a/src/field/vector3d.cxx +++ b/src/field/vector3d.cxx @@ -603,7 +603,7 @@ const Field3D abs(const Vector3D &v, const std::string& region) { return sqrt(v*v, region); } -const Vector3D toFieldAligned(const Vector3D& v, const std::string& region) { +Vector3D toFieldAligned(const Vector3D& v, const std::string& region) { Vector3D result; result.setLocation(v.getLocation()); if (not v.covariant) { @@ -617,7 +617,7 @@ const Vector3D toFieldAligned(const Vector3D& v, const std::string& region) { return result; } -const Vector3D fromFieldAligned(const Vector3D& v, const std::string& region) { +Vector3D fromFieldAligned(const Vector3D& v, const std::string& region) { Vector3D result; result.setLocation(v.getLocation()); if (not v.covariant) { From a332d1fd8c7fd61c1bc56edd526784aba7692256 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 10 Jan 2020 15:03:06 +0000 Subject: [PATCH 042/428] toFieldAligned and fromFieldAligned for Vector2D These are null operations for the Field2D components of Vector2D, so they just return a copy. --- include/vector2d.hxx | 12 ++++++++++++ include/vector3d.hxx | 1 + 2 files changed, 13 insertions(+) diff --git a/include/vector2d.hxx b/include/vector2d.hxx index a92b5c984b..9afabc7a2d 100644 --- a/include/vector2d.hxx +++ b/include/vector2d.hxx @@ -181,6 +181,18 @@ inline const Field2D abs(const Vector2D &v, REGION region) { return abs(v, toString(region)); } +/// Transform to and from field-aligned coordinates +inline Vector2D toFieldAligned(const Vector2D v, const std::string& UNUSED(region) = "RGN_ALL") { + // toFieldAligned is a null operation for the Field2D components of v, so return a copy + // of the argument (hence pass-by-value instead of pass-by-reference) + return v; +} +inline Vector2D fromFieldAligned(const Vector2D v, const std::string& UNUSED(region) = "RGN_ALL") { + // fromFieldAligned is a null operation for the Field2D components of v, so return a copy + // of the argument (hence pass-by-value instead of pass-by-reference) + return v; +} + /*! * @brief Time derivative of 2D vector field */ diff --git a/include/vector3d.hxx b/include/vector3d.hxx index f4874754a4..111b7cb92d 100644 --- a/include/vector3d.hxx +++ b/include/vector3d.hxx @@ -225,6 +225,7 @@ inline const Field3D abs(const Vector3D& v, REGION region) { return abs(v, toString(region)); } +/// Transform to and from field-aligned coordinates Vector3D toFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); Vector3D fromFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); From 3e92d7d3e5568127247b88943bc27ae7839915b3 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 10 Jan 2020 15:29:50 +0000 Subject: [PATCH 043/428] emptyFrom and zeroFrom for Vector2D and Vector3D --- include/vector2d.hxx | 24 ++++++++++++++++++++++++ include/vector3d.hxx | 23 +++++++++++++++++++++++ src/field/vector2d.cxx | 6 ++++++ src/field/vector3d.cxx | 6 ++++++ 4 files changed, 59 insertions(+) diff --git a/include/vector2d.hxx b/include/vector2d.hxx index 9afabc7a2d..f9ef911c3b 100644 --- a/include/vector2d.hxx +++ b/include/vector2d.hxx @@ -49,6 +49,10 @@ class Vector2D : public FieldData { public: Vector2D(Mesh * fieldmesh = nullptr); Vector2D(const Vector2D &f); + + /// Many-argument constructor for fully specifying the initialisation of a Vector3D + Vector2D(Mesh* localmesh, bool covariant, CELL_LOC location); + ~Vector2D() override; Field2D x, y, z; ///< components @@ -193,6 +197,26 @@ inline Vector2D fromFieldAligned(const Vector2D v, const std::string& UNUSED(reg return v; } +/// Create new Vector2D with same attributes as the argument, but uninitialised components +inline Vector2D emptyFrom(const Vector2D v) { + auto result = Vector2D(v.x.getMesh(), v.covariant, v.getLocation()); + result.x = emptyFrom(v.x); + result.y = emptyFrom(v.y); + result.z = emptyFrom(v.z); + + return result; +} + +/// Create new Vector2D with same attributes as the argument, and zero-initialised components +inline Vector2D zeroFrom(const Vector2D v) { + auto result = Vector2D(v.x.getMesh(), v.covariant, v.getLocation()); + result.x = zeroFrom(v.x); + result.y = zeroFrom(v.y); + result.z = zeroFrom(v.z); + + return result; +} + /*! * @brief Time derivative of 2D vector field */ diff --git a/include/vector3d.hxx b/include/vector3d.hxx index 111b7cb92d..430c6d1604 100644 --- a/include/vector3d.hxx +++ b/include/vector3d.hxx @@ -66,6 +66,9 @@ class Vector3D : public FieldData { */ Vector3D(const Vector3D &f); + /// Many-argument constructor for fully specifying the initialisation of a Vector3D + Vector3D(Mesh* localmesh, bool covariant, CELL_LOC location); + /*! * Destructor. If the time derivative has been * used, then some book-keeping is needed to ensure @@ -229,6 +232,26 @@ inline const Field3D abs(const Vector3D& v, REGION region) { Vector3D toFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); Vector3D fromFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); +/// Create new Vector3D with same attributes as the argument, but uninitialised components +inline Vector3D emptyFrom(const Vector3D v) { + auto result = Vector3D(v.x.getMesh(), v.covariant, v.getLocation()); + result.x = emptyFrom(v.x); + result.y = emptyFrom(v.y); + result.z = emptyFrom(v.z); + + return result; +} + +/// Create new Vector3D with same attributes as the argument, and zero-initialised components +inline Vector3D zeroFrom(const Vector3D v) { + auto result = Vector3D(v.x.getMesh(), v.covariant, v.getLocation()); + result.x = zeroFrom(v.x); + result.y = zeroFrom(v.y); + result.z = zeroFrom(v.z); + + return result; +} + /*! * @brief Time derivative of 3D vector field */ diff --git a/src/field/vector2d.cxx b/src/field/vector2d.cxx index 6925279364..87a2344015 100644 --- a/src/field/vector2d.cxx +++ b/src/field/vector2d.cxx @@ -42,6 +42,12 @@ Vector2D::Vector2D(const Vector2D &f) : x(f.x), y(f.y), z(f.z), covariant(f.covariant), deriv(nullptr), location(f.getLocation()) {} +Vector2D::Vector2D(Mesh* localmesh, bool covariant, CELL_LOC location) + : x(localmesh), y(localmesh), z(localmesh), covariant(covariant) { + + setLocation(location); + } + Vector2D::~Vector2D() { if (deriv != nullptr) { // The ddt of the components (x.ddt) point to the same place as ddt.x diff --git a/src/field/vector3d.cxx b/src/field/vector3d.cxx index e3f74db28b..bf58079573 100644 --- a/src/field/vector3d.cxx +++ b/src/field/vector3d.cxx @@ -43,6 +43,12 @@ Vector3D::Vector3D(const Vector3D &f) : x(f.x), y(f.y), z(f.z), covariant(f.covariant), deriv(nullptr), location(f.getLocation()) {} +Vector3D::Vector3D(Mesh* localmesh, bool covariant, CELL_LOC location) + : x(localmesh), y(localmesh), z(localmesh), covariant(covariant) { + + setLocation(location); + } + Vector3D::~Vector3D() { if (deriv != nullptr) { // The ddt of the components (x.ddt) point to the same place as ddt.x From 20a98a9ebc973ac59dc043b1fdc3d0e71bf60502 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 10 Jan 2020 15:37:26 +0000 Subject: [PATCH 044/428] Use emptyFrom in to/fromFieldAligned(Vector3D) --- src/field/vector3d.cxx | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/src/field/vector3d.cxx b/src/field/vector3d.cxx index bf58079573..29c9963293 100644 --- a/src/field/vector3d.cxx +++ b/src/field/vector3d.cxx @@ -610,11 +610,7 @@ const Field3D abs(const Vector3D &v, const std::string& region) { } Vector3D toFieldAligned(const Vector3D& v, const std::string& region) { - Vector3D result; - result.setLocation(v.getLocation()); - if (not v.covariant) { - result.toContravariant(); - } + Vector3D result{emptyFrom(v)}; result.x = toFieldAligned(v.x, region); result.y = toFieldAligned(v.y, region); @@ -624,11 +620,7 @@ Vector3D toFieldAligned(const Vector3D& v, const std::string& region) { } Vector3D fromFieldAligned(const Vector3D& v, const std::string& region) { - Vector3D result; - result.setLocation(v.getLocation()); - if (not v.covariant) { - result.toContravariant(); - } + Vector3D result{emptyFrom(v)}; result.x = fromFieldAligned(v.x, region); result.y = fromFieldAligned(v.y, region); From 85ef73fa0fef9146943ce066a4a6c6377813f4a6 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 10 Jan 2020 15:49:17 +0000 Subject: [PATCH 045/428] Pass argument by reference to vector emptyFrom and zeroFrom --- include/vector2d.hxx | 4 ++-- include/vector3d.hxx | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/vector2d.hxx b/include/vector2d.hxx index f9ef911c3b..7ee61a92a6 100644 --- a/include/vector2d.hxx +++ b/include/vector2d.hxx @@ -198,7 +198,7 @@ inline Vector2D fromFieldAligned(const Vector2D v, const std::string& UNUSED(reg } /// Create new Vector2D with same attributes as the argument, but uninitialised components -inline Vector2D emptyFrom(const Vector2D v) { +inline Vector2D emptyFrom(const Vector2D& v) { auto result = Vector2D(v.x.getMesh(), v.covariant, v.getLocation()); result.x = emptyFrom(v.x); result.y = emptyFrom(v.y); @@ -208,7 +208,7 @@ inline Vector2D emptyFrom(const Vector2D v) { } /// Create new Vector2D with same attributes as the argument, and zero-initialised components -inline Vector2D zeroFrom(const Vector2D v) { +inline Vector2D zeroFrom(const Vector2D& v) { auto result = Vector2D(v.x.getMesh(), v.covariant, v.getLocation()); result.x = zeroFrom(v.x); result.y = zeroFrom(v.y); diff --git a/include/vector3d.hxx b/include/vector3d.hxx index 430c6d1604..8e6d02e0dc 100644 --- a/include/vector3d.hxx +++ b/include/vector3d.hxx @@ -233,7 +233,7 @@ Vector3D toFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL" Vector3D fromFieldAligned(const Vector3D& v, const std::string& region = "RGN_ALL"); /// Create new Vector3D with same attributes as the argument, but uninitialised components -inline Vector3D emptyFrom(const Vector3D v) { +inline Vector3D emptyFrom(const Vector3D& v) { auto result = Vector3D(v.x.getMesh(), v.covariant, v.getLocation()); result.x = emptyFrom(v.x); result.y = emptyFrom(v.y); @@ -243,7 +243,7 @@ inline Vector3D emptyFrom(const Vector3D v) { } /// Create new Vector3D with same attributes as the argument, and zero-initialised components -inline Vector3D zeroFrom(const Vector3D v) { +inline Vector3D zeroFrom(const Vector3D& v) { auto result = Vector3D(v.x.getMesh(), v.covariant, v.getLocation()); result.x = zeroFrom(v.x); result.y = zeroFrom(v.y); From a0f8033da55bf1d67834cc20b5a3ddcbdddd4b87 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 10 Jan 2020 15:49:38 +0000 Subject: [PATCH 046/428] Don't use const on pass-by-value argument --- include/vector2d.hxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/vector2d.hxx b/include/vector2d.hxx index 7ee61a92a6..0c95314ccb 100644 --- a/include/vector2d.hxx +++ b/include/vector2d.hxx @@ -186,12 +186,12 @@ inline const Field2D abs(const Vector2D &v, REGION region) { } /// Transform to and from field-aligned coordinates -inline Vector2D toFieldAligned(const Vector2D v, const std::string& UNUSED(region) = "RGN_ALL") { +inline Vector2D toFieldAligned(Vector2D v, const std::string& UNUSED(region) = "RGN_ALL") { // toFieldAligned is a null operation for the Field2D components of v, so return a copy // of the argument (hence pass-by-value instead of pass-by-reference) return v; } -inline Vector2D fromFieldAligned(const Vector2D v, const std::string& UNUSED(region) = "RGN_ALL") { +inline Vector2D fromFieldAligned(Vector2D v, const std::string& UNUSED(region) = "RGN_ALL") { // fromFieldAligned is a null operation for the Field2D components of v, so return a copy // of the argument (hence pass-by-value instead of pass-by-reference) return v; From 9afa708faba500c7f3a2d3bac3dc530ac762c3da Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 22 Jan 2020 18:31:45 +0000 Subject: [PATCH 047/428] Make string argument 'const std::string&' --- include/bout/bout_enum_class.hxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/bout/bout_enum_class.hxx b/include/bout/bout_enum_class.hxx index 743099ebc4..9b30a6a750 100644 --- a/include/bout/bout_enum_class.hxx +++ b/include/bout/bout_enum_class.hxx @@ -82,7 +82,7 @@ inline std::string toString(enumname e) { \ return found->second; \ } \ \ -inline enumname _MAKE_FROMSTRING_NAME(enumname)(std::string s) { \ +inline enumname _MAKE_FROMSTRING_NAME(enumname)(const std::string& s) { \ AUTO_TRACE(); \ const static std::map fromString_map = { \ _BOUT_ENUM_CLASS_MAP_ARGS(_STR_ENUM_CLASS, enumname, __VA_ARGS__) \ From c76b7aa203b307e9393a3c9a0771c2d3df85549a Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 22 Jan 2020 18:35:06 +0000 Subject: [PATCH 048/428] Rename macros to avoid leading underscore followed by capital Leading underscore followed by capital is reserved. Prefix the macros with BOUT_ instead. --- include/bout/bout_enum_class.hxx | 82 ++++++++++++++++---------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/include/bout/bout_enum_class.hxx b/include/bout/bout_enum_class.hxx index 9b30a6a750..ac45551d89 100644 --- a/include/bout/bout_enum_class.hxx +++ b/include/bout/bout_enum_class.hxx @@ -53,53 +53,53 @@ #define _ec_expand_10(_call, enumname, x, ...) \ _call(enumname, x) _ec_expand_9(_call, enumname, __VA_ARGS__) -#define _BOUT_ENUM_CLASS_MAP_ARGS(mac, enumname, ...) \ - _GET_FOR_EACH_EXPANSION(__VA_ARGS__, \ - _ec_expand_10, _ec_expand_9, _ec_expand_8, _ec_expand_7, \ - _ec_expand_6, _ec_expand_5, _ec_expand_4, _ec_expand_3, \ - _ec_expand_2, _ec_expand_1) \ +#define BOUT_ENUM_CLASS_MAP_ARGS(mac, enumname, ...) \ + BOUT_GET_FOR_EACH_EXPANSION(__VA_ARGS__, \ + _ec_expand_10, _ec_expand_9, _ec_expand_8, _ec_expand_7, \ + _ec_expand_6, _ec_expand_5, _ec_expand_4, _ec_expand_3, \ + _ec_expand_2, _ec_expand_1) \ (mac, enumname, __VA_ARGS__) -#define _ENUM_CLASS_STR(enumname, val) {enumname::val, lowercase(#val)}, -#define _STR_ENUM_CLASS(enumname, val) {lowercase(#val), enumname::val}, +#define BOUT_ENUM_CLASS_STR(enumname, val) {enumname::val, lowercase(#val)}, +#define BOUT_STR_ENUM_CLASS(enumname, val) {lowercase(#val), enumname::val}, -#define _MAKE_FROMSTRING_NAME(enumname) enumname ## FromString +#define BOUT_MAKE_FROMSTRING_NAME(enumname) enumname ## FromString /// Create an enum class with toString and FromString functions, and an /// Options::as overload to read the enum -#define BOUT_ENUM_CLASS(enumname, ...) \ -enum class enumname { __VA_ARGS__ }; \ - \ -inline std::string toString(enumname e) { \ - AUTO_TRACE(); \ - const static std::map toString_map = { \ - _BOUT_ENUM_CLASS_MAP_ARGS(_ENUM_CLASS_STR, enumname, __VA_ARGS__) \ - }; \ - auto found = toString_map.find(e); \ - if (found == toString_map.end()) { \ - throw BoutException("Did not find enum %d", static_cast(e)); \ - } \ - return found->second; \ -} \ - \ -inline enumname _MAKE_FROMSTRING_NAME(enumname)(const std::string& s) { \ - AUTO_TRACE(); \ - const static std::map fromString_map = { \ - _BOUT_ENUM_CLASS_MAP_ARGS(_STR_ENUM_CLASS, enumname, __VA_ARGS__) \ - }; \ - auto found = fromString_map.find(s); \ - if (found == fromString_map.end()) { \ - throw BoutException("Did not find enum %s", s.c_str()); \ - } \ - return found->second; \ -} \ - \ -template <> inline enumname Options::as(const enumname&) const { \ - return _MAKE_FROMSTRING_NAME(enumname)(this->as()); \ -} \ - \ -inline std::ostream& operator<<(std::ostream& out, const enumname& e) { \ - return out << toString(e); \ +#define BOUT_ENUM_CLASS(enumname, ...) \ +enum class enumname { __VA_ARGS__ }; \ + \ +inline std::string toString(enumname e) { \ + AUTO_TRACE(); \ + const static std::map toString_map = { \ + BOUT_ENUM_CLASS_MAP_ARGS(_ENUM_CLASS_STR, enumname, __VA_ARGS__) \ + }; \ + auto found = toString_map.find(e); \ + if (found == toString_map.end()) { \ + throw BoutException("Did not find enum %d", static_cast(e)); \ + } \ + return found->second; \ +} \ + \ +inline enumname BOUT_MAKE_FROMSTRING_NAME(enumname)(const std::string& s) { \ + AUTO_TRACE(); \ + const static std::map fromString_map = { \ + BOUT_ENUM_CLASS_MAP_ARGS(_STR_ENUM_CLASS, enumname, __VA_ARGS__) \ + }; \ + auto found = fromString_map.find(s); \ + if (found == fromString_map.end()) { \ + throw BoutException("Did not find enum %s", s.c_str()); \ + } \ + return found->second; \ +} \ + \ +template <> inline enumname Options::as(const enumname&) const { \ + return BOUT_MAKE_FROMSTRING_NAME(enumname)(this->as()); \ +} \ + \ +inline std::ostream& operator<<(std::ostream& out, const enumname& e) { \ + return out << toString(e); \ } #endif // __BOUT_ENUM_CLASS_H__ From d4b5c867cddf292f8965e8fdc9775b5b86db55cb Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 22 Jan 2020 18:53:50 +0000 Subject: [PATCH 049/428] Manual entry on BOUT_ENUM_CLASS --- manual/sphinx/user_docs/bout_options.rst | 44 ++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/manual/sphinx/user_docs/bout_options.rst b/manual/sphinx/user_docs/bout_options.rst index 500b1ccc46..318738c78e 100644 --- a/manual/sphinx/user_docs/bout_options.rst +++ b/manual/sphinx/user_docs/bout_options.rst @@ -781,3 +781,47 @@ FFTs take, and tries to find the optimal method. .. _FFTW FAQ: http://www.fftw.org/faq/section3.html#nondeterministic + + +Types for multi-valued options +------------------------------ + +An ``enum class`` can be a useful construct for options in a physics model. It +can have an arbitrary number of user-defined, named values (although the code +in ``include/bout/bout_enum_class.hxx`` needs extending for more than 10 +values). The advantage over using a ``std::string`` for an option is that a +typo cannot produce an unexpected value: in C++ code it is a compile-time error +and reading from ``BOUT.inp`` it is a run-time exception. We provide a utility +macro ``BOUT_ENUM_CLASS`` to define an ``enum class`` with some extra +convenience methods. For example, after defining ``myoption`` like:: + + BOUT_ENUM_TYPE(myoption, foo, bar, baz); + +it is possible not only to test for a value, e.g.:: + + myoption x = ; + ... + if (x == myoption::foo) { + do a foo thing + } + +but also to convert the option to a string:: + + std::string s = toString(x); + +pass it to a stream:: + + output << x; + +or get an option like ``myinput=baz`` from an input file or the command line as +a ``myoption``:: + + myoption y = Options::root()["myinput"].as(); + +or with a default value:: + + myoption y = Options::root()["myinput"].withDefault(myoption::bar); + +Only strings exactly (but case-insensitively) matching the name of one of the +defined ``myoption`` values are allowed, anything else results in an exception +being thrown. From 9bfa6367f07f8436162ad9dc7f71addb7223d9fb Mon Sep 17 00:00:00 2001 From: Joseph Parker Date: Tue, 4 Feb 2020 11:18:57 +0000 Subject: [PATCH 050/428] Backport of Laplace performance test --- examples/performance/laplace/.gitignore | 1 + examples/performance/laplace/README.md | 10 ++ examples/performance/laplace/data/BOUT.inp | 23 +++ examples/performance/laplace/laplace.cxx | 185 +++++++++++++++++++++ examples/performance/laplace/makefile | 6 + examples/performance/laplace/runtest | 20 +++ 6 files changed, 245 insertions(+) create mode 100644 examples/performance/laplace/.gitignore create mode 100644 examples/performance/laplace/README.md create mode 100644 examples/performance/laplace/data/BOUT.inp create mode 100644 examples/performance/laplace/laplace.cxx create mode 100644 examples/performance/laplace/makefile create mode 100755 examples/performance/laplace/runtest diff --git a/examples/performance/laplace/.gitignore b/examples/performance/laplace/.gitignore new file mode 100644 index 0000000000..4a3c523892 --- /dev/null +++ b/examples/performance/laplace/.gitignore @@ -0,0 +1 @@ +laplace diff --git a/examples/performance/laplace/README.md b/examples/performance/laplace/README.md new file mode 100644 index 0000000000..66f9ebef4a --- /dev/null +++ b/examples/performance/laplace/README.md @@ -0,0 +1,10 @@ +performance/laplace +=================== + +This times the Laplacian inversions called in the test-laplace integrated tests +to benchmark performance. It does not check correctness. + +This test uses both the old-style free function `invert_laplace`, as well as the +new class interface `Laplacian`. + +For details on the flags used, see the Laplacian inversion documentation. diff --git a/examples/performance/laplace/data/BOUT.inp b/examples/performance/laplace/data/BOUT.inp new file mode 100644 index 0000000000..3b30f12ae7 --- /dev/null +++ b/examples/performance/laplace/data/BOUT.inp @@ -0,0 +1,23 @@ +# Performance test for Laplace inversion + +NOUT = 0 # No timesteps + +MZ = 32 # Z size + +dump_format = "nc" # NetCDF format. Alternative is "pdb" + +nype = 1 # Set to 1 to ensure we only parallelize over x in scaling scan + +[LaplaceTest] +NUM_LOOPS = 1000 + +[mesh] +symmetricGlobalX = false +symmetricGlobalY = false + +nx = 12 +ny = 16 + +[laplace] +all_terms = false +include_yguards = false diff --git a/examples/performance/laplace/laplace.cxx b/examples/performance/laplace/laplace.cxx new file mode 100644 index 0000000000..7bc67dc1a0 --- /dev/null +++ b/examples/performance/laplace/laplace.cxx @@ -0,0 +1,185 @@ +/* + * Test performance of Laplacian inversion + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using SteadyClock = std::chrono::time_point; +using Duration = std::chrono::duration; +using namespace std::chrono; + +#define TEST_BLOCK(NAME, ...) \ + { \ + __VA_ARGS__ \ + names.push_back(NAME); \ + SteadyClock start = steady_clock::now(); \ + for (int repetitionIndex = 0; repetitionIndex < NUM_LOOPS; repetitionIndex++) { \ + __VA_ARGS__; \ + } \ + times.push_back(steady_clock::now() - start); \ + } + +int main(int argc, char **argv) { + + // Initialise BOUT++, setting up mesh + BoutInitialise(argc, argv); + std::vector names; + std::vector times; + + // Get options root + auto globalOptions = Options::root(); + auto modelOpts = globalOptions["LaplaceTest"]; + int NUM_LOOPS = modelOpts["NUM_LOOPS"].withDefault(1000); + + ConditionalOutput time_output(Output::getInstance()); + time_output.enable(true); + + FieldFactory f(mesh); + + Field3D input = f.create3D("(1-gauss(x-0.5,0.2))*gauss(y-pi)*gauss(z-pi)"); + Field2D a = f.create2D("gauss(x) * sin(y)"); + Field2D c = f.create2D("sin(x) * gauss(x-0.5) * gauss(y-pi)"); + Field2D d = f.create2D("y - pi/2"); + + Field3D flag0; + TEST_BLOCK("flag0", + flag0 = invert_laplace(input, 0); + ); + + Field3D flag3; + TEST_BLOCK("flag3", + flag3 = invert_laplace(input, 3); + ); + + Field3D flag0a; + TEST_BLOCK("flag0a", + flag0a = invert_laplace(input, 0, &a); + ); + + Field3D flag3a; + TEST_BLOCK("flag3a", + flag3a = invert_laplace(input, 3, &a); + ); + + Field3D flag0ac; + TEST_BLOCK("flag0ac", + flag0ac = invert_laplace(input, 0, &a, &c); + ); + + Field3D flag3ac; + TEST_BLOCK("flag3ac", + flag3ac = invert_laplace(input, 3, &a, &c); + ); + + Field3D flag0ad; + TEST_BLOCK("flag0ad", + flag0ad = invert_laplace(input, 0, &a, nullptr, &d); + ); + + Field3D flag3ad; + TEST_BLOCK("flag3ad", + flag3ad = invert_laplace(input, 3, &a, nullptr, &d); + ); + + /// Test new interface and INVERT_IN/OUT_SET flags + + Field2D set_to = f.create2D("cos(2*y)*(x - 0.5)"); + + Laplacian *lap = Laplacian::create(); + lap->setFlags(4096); + Field3D flagis; + TEST_BLOCK("flagis", + flagis = lap->solve(input, set_to); + ); + + lap->setFlags(8192); + Field3D flagos; + TEST_BLOCK("flagos", + flagos = lap->solve(input, set_to); + ); + + lap->setCoefA(a); + lap->setFlags(4096); + Field3D flagisa; + TEST_BLOCK("flagisa", + flagisa = lap->solve(input, set_to); + ); + + lap->setFlags(8192); + Field3D flagosa; + TEST_BLOCK("flagosa", + flagosa = lap->solve(input, set_to); + ); + + lap->setCoefC(c); + lap->setFlags(4096); + Field3D flagisac; + TEST_BLOCK("flagisac", + flagisac = lap->solve(input, set_to); + ); + + lap->setFlags(8192); + Field3D flagosac; + TEST_BLOCK("flagosac", + flagosac = lap->solve(input, set_to); + ); + + lap->setCoefC(1.0); + lap->setCoefD(d); + lap->setFlags(4096); + Field3D flagisad; + TEST_BLOCK("flagisad", + flagisad = lap->solve(input, set_to); + ); + + lap->setFlags(8192); + Field3D flagosad; + TEST_BLOCK("flagosad", + flagosad = lap->solve(input, set_to); + ); + + // Delete Laplacian when done + delete lap; + + // Write and close the output file + dump.write(); + dump.close(); + + MPI_Barrier(BoutComm::get()); // Wait for all processors to write data + + // Report + int width = 0; + for (const auto i : names) { + width = i.size() > width ? i.size() : width; + }; + width = width + 5; + time_output << std::setw(width) << "Case name" + << "\t" + << "Time (s)" + << "\t" + << "Time per iteration (s)" + << "\n"; + for (int i = 0; i < names.size(); i++) { + time_output << std::setw(width) << names[i] << "\t" << times[i].count() << "\t\t" << times[i].count() / NUM_LOOPS + << "\n"; + } + double sum_of_times = 0.0; + for (auto t : times) { + sum_of_times += t.count(); + } + time_output << std::setw(width) << "Total" << "\t" << sum_of_times << "\n"; + time_output << std::setw(width) << "Average" << "\t" << sum_of_times / times.size() << "\t\t" << sum_of_times / NUM_LOOPS / times.size() + << "\n"; + + BoutFinalise(); + return 0; +} diff --git a/examples/performance/laplace/makefile b/examples/performance/laplace/makefile new file mode 100644 index 0000000000..fa561d91da --- /dev/null +++ b/examples/performance/laplace/makefile @@ -0,0 +1,6 @@ + +BOUT_TOP = ../../.. + +SOURCEC = laplace.cxx + +include $(BOUT_TOP)/make.config diff --git a/examples/performance/laplace/runtest b/examples/performance/laplace/runtest new file mode 100755 index 0000000000..d52b88a925 --- /dev/null +++ b/examples/performance/laplace/runtest @@ -0,0 +1,20 @@ +#!/bin/bash + +# Run the laplace benchmark test on a range of core counts and grids + +GRID_SIZES=(16 32 64) +PROC_COUNTS=(1 2 4) +EXE=laplace +FLAGS="-q -q -q -q" + +make || exit + +for NX in ${GRID_SIZES[@]} +do + for NP in ${PROC_COUNTS[@]} + do + echo "Running laplace benchark on ${NP} cores with nx ${NX}" + mpirun -np ${NP} ./${EXE} ${FLAGS} mesh:nx=${NX} + done +done + From 8bee662ec9fc22e4efe23d872c4245c55551764e Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 7 Mar 2020 19:53:59 +0000 Subject: [PATCH 051/428] Add test-laplacexy to CMakeLists.txt --- tests/integrated/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integrated/CMakeLists.txt b/tests/integrated/CMakeLists.txt index 48a36ebe2c..44f4aefe0d 100644 --- a/tests/integrated/CMakeLists.txt +++ b/tests/integrated/CMakeLists.txt @@ -9,6 +9,7 @@ add_subdirectory(test-invertable-operator) add_subdirectory(test-io) add_subdirectory(test-io_hdf5) add_subdirectory(test-laplace) +add_subdirectory(test-laplacexy) add_subdirectory(test-slepc-solver) add_subdirectory(test-solver) add_subdirectory(test-stopCheck) From 46fb2422d442e0d22b5584e3d5e1deebe479d1bd Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 7 Mar 2020 22:23:13 +0000 Subject: [PATCH 052/428] Add CMakeLists.txt for test-laplacexy --- tests/integrated/test-laplacexy/CMakeLists.txt | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 tests/integrated/test-laplacexy/CMakeLists.txt diff --git a/tests/integrated/test-laplacexy/CMakeLists.txt b/tests/integrated/test-laplacexy/CMakeLists.txt new file mode 100644 index 0000000000..031ecdcf91 --- /dev/null +++ b/tests/integrated/test-laplacexy/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-laplacexy + SOURCES test-laplacexy.cxx + REQUIRES BOUT_HAS_PETSC + USE_RUNTEST + USE_DATA_BOUT_INP + ) From a076cbf45ef68e0f19381ad31a89c9bbd056db69 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 8 Mar 2020 12:32:12 +0000 Subject: [PATCH 053/428] Short version of test-laplacexy Defaults to using hypre as the preconditioner, as this is faster, but falls back to pctype=shell if hypre is not available. --- tests/integrated/CMakeLists.txt | 2 +- .../test-laplacexy-short/CMakeLists.txt | 6 + .../test-laplacexy-short/data/BOUT.inp | 53 +++++++++ .../integrated/test-laplacexy-short/makefile | 5 + .../test-laplacexy-short/plotcheck.py | 56 ++++++++++ tests/integrated/test-laplacexy-short/runtest | 93 ++++++++++++++++ .../test-laplacexy-short/test-laplacexy.cxx | 103 ++++++++++++++++++ 7 files changed, 317 insertions(+), 1 deletion(-) create mode 100644 tests/integrated/test-laplacexy-short/CMakeLists.txt create mode 100644 tests/integrated/test-laplacexy-short/data/BOUT.inp create mode 100644 tests/integrated/test-laplacexy-short/makefile create mode 100755 tests/integrated/test-laplacexy-short/plotcheck.py create mode 100755 tests/integrated/test-laplacexy-short/runtest create mode 100644 tests/integrated/test-laplacexy-short/test-laplacexy.cxx diff --git a/tests/integrated/CMakeLists.txt b/tests/integrated/CMakeLists.txt index 44f4aefe0d..cd67d1270b 100644 --- a/tests/integrated/CMakeLists.txt +++ b/tests/integrated/CMakeLists.txt @@ -9,7 +9,7 @@ add_subdirectory(test-invertable-operator) add_subdirectory(test-io) add_subdirectory(test-io_hdf5) add_subdirectory(test-laplace) -add_subdirectory(test-laplacexy) +add_subdirectory(test-laplacexy-short) add_subdirectory(test-slepc-solver) add_subdirectory(test-solver) add_subdirectory(test-stopCheck) diff --git a/tests/integrated/test-laplacexy-short/CMakeLists.txt b/tests/integrated/test-laplacexy-short/CMakeLists.txt new file mode 100644 index 0000000000..0f97dba818 --- /dev/null +++ b/tests/integrated/test-laplacexy-short/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-laplacexy-short + SOURCES test-laplacexy.cxx + REQUIRES BOUT_HAS_PETSC + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/integrated/test-laplacexy-short/data/BOUT.inp b/tests/integrated/test-laplacexy-short/data/BOUT.inp new file mode 100644 index 0000000000..dfe0496187 --- /dev/null +++ b/tests/integrated/test-laplacexy-short/data/BOUT.inp @@ -0,0 +1,53 @@ +mz = 1 + +[mesh] +nx = 132 +ny = 128 + +dx = (1.+.1*cos(pi*x))/nx +dy = 40.*(1.+.1*sin(y))/ny + +g11 = 1. + .1*sin(2.*pi*x)*cos(y) +g22 = 1. + .05872*sin(2.*pi*x)*cos(y) +g33 = 1. + .115832*sin(2.*pi*x)*cos(y) +g12 = 0.0 +g13 = 0. +g23 = 0.5 + .04672*sin(2.*pi*x)*cos(y) + +jyseps1_1 = 15 +jyseps2_1 = 47 +ny_inner = 64 +jyseps1_2 = 79 +jyseps2_2 = 111 + +ixseps1 = 64 +ixseps2 = 64 + +[laplacexy] +pctype = shell # Supply a second solver as a preconditioner +#pctype = hypre # Algebraic multigrid preconditioner using hypre library +finite_volume = false +rtol = 1.e-14 + +core_bndry_dirichlet = true +pf_bndry_dirichlet = true +y_bndry = neumann + +[f] +# make an input: +# - compatible with both Dirichlet and Neumann boundary conditions in either x- +# or y-directions +# - y-boundaries at -pi/2, 3pi/2, pi/2 and 5pi/2 +# - periodic in y 0->2pi +function = sin(2.*pi*x)^2 * sin(y - pi/2.)^2 + +bndry_xin = dirichlet +bndry_xout = dirichlet +bndry_yup = neumann +bndry_ydown = neumann + +[a] +function = 1. + .1*sin(x + .1)*sin(y/pi + .1) + +[b] +function = 0. diff --git a/tests/integrated/test-laplacexy-short/makefile b/tests/integrated/test-laplacexy-short/makefile new file mode 100644 index 0000000000..e7789b2203 --- /dev/null +++ b/tests/integrated/test-laplacexy-short/makefile @@ -0,0 +1,5 @@ +BOUT_TOP = ../../.. + +SOURCEC = test-laplacexy.cxx + +include $(BOUT_TOP)/make.config diff --git a/tests/integrated/test-laplacexy-short/plotcheck.py b/tests/integrated/test-laplacexy-short/plotcheck.py new file mode 100755 index 0000000000..419612fdfa --- /dev/null +++ b/tests/integrated/test-laplacexy-short/plotcheck.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 + +from boutdata import collect +from matplotlib import pyplot +from sys import exit + +f = collect('f', path='data', yguards=True, info=False)[1:-1,1:-1] +sol = collect('sol', path='data', yguards=True, info=False)[1:-1,1:-1] +error = collect('error', path='data', yguards=True, info=False)[1:-1,1:-1] +absolute_error = collect('absolute_error', path='data', yguards=True, info=False)[1:-1,1:-1] + +# Note, cells closest to x-boundary in rhs and rhs_check may be slightly different because +# of different way x-boundary cells for D2DXDY are set: in LaplaceXY corner guard cells +# are set so that a 9-point stencil can be used; in the D2DXDY function (used in +# Laplace_perp) first dfdy=DDY(f) is calculated, communicated and has free_o3 x-boundary +# conditions applied, then DDX(dfdy) is returned. +# Therefore here exclude cells closest to the x-boundary so that the difference plotted +# should be small (as controlled by rtol, atol). +rhs = collect('rhs', path='data', yguards=True, info=False)[3:-3,2:-2] +rhs_check = collect('rhs_check', path='data', yguards=True, info=False)[3:-3,2:-2] + +pyplot.figure() + +pyplot.subplot(231) +pyplot.pcolormesh(f) +pyplot.title('f') +pyplot.colorbar() + +pyplot.subplot(232) +pyplot.pcolormesh(sol) +pyplot.title('sol') +pyplot.colorbar() + +pyplot.subplot(233) +pyplot.pcolormesh(error) +pyplot.title('error') +pyplot.colorbar() + +pyplot.subplot(234) +pyplot.pcolormesh(absolute_error) +pyplot.title('absolute_error') +pyplot.colorbar() + +pyplot.subplot(235) +pyplot.pcolormesh(rhs) +pyplot.title('rhs') +pyplot.colorbar() + +pyplot.subplot(236) +pyplot.pcolormesh(rhs - rhs_check) +pyplot.title('rhs diff') +pyplot.colorbar() + +pyplot.show() + +exit(0) diff --git a/tests/integrated/test-laplacexy-short/runtest b/tests/integrated/test-laplacexy-short/runtest new file mode 100755 index 0000000000..481335c160 --- /dev/null +++ b/tests/integrated/test-laplacexy-short/runtest @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 + +# +# Run the test, compare results against the benchmark +# + +#requires: petsc + +from boututils.run_wrapper import build_and_log, shell, shell_safe, launch, launch_safe +from boutdata.collect import collect +from sys import exit + +tol = 5.e-8 + +# Note accuracy of test is limited when g12!=0 by inconsistency between the way boundary +# conditions are applied in LaplaceXY and the way they are applied in the D2DXDY() +# operator called by Laplace_perp(). In D2DXDY(f) 'free_o3' boundary conditions are +# applied to dfdy before calculating DDX(dfdy). +tol_nonorth = 2.e-5 + +argslist = ['laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=dirichlet ' + 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', + 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=neumann ' + 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', + 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=free_o3 ' + 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=free_o3 f:bndry_ydown=free_o3', + #'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=dirichlet ' + #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', + #'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=neumann ' + #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', + #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=dirichlet ' + #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', + #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=neumann ' + #'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', + #'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=dirichlet ' + #'f:bndry_xin=neumann f:bndry_xout=neumann f:bndry_yup=dirichlet f:bndry_ydown=dirichlet', + 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=neumann ' + 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann', + 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=free_o3 ' + 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=free_o3 f:bndry_ydown=free_o3', + 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=neumann ' + 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann b:function=.1', + 'laplacexy:core_bndry_dirichlet=true laplacexy:pf_bndry_dirichlet=true laplacexy:y_bndry=free_o3 ' + 'f:bndry_xin=dirichlet f:bndry_xout=dirichlet f:bndry_yup=free_o3 f:bndry_ydown=free_o3 b:function=.1', + 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=neumann ' + 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=neumann f:bndry_ydown=neumann b:function=.1', + 'laplacexy:core_bndry_dirichlet=false laplacexy:pf_bndry_dirichlet=false laplacexy:y_bndry=free_o3 ' + 'f:bndry_xin=neumann f:bndry_xout=dirichlet f:bndry_yup=free_o3 f:bndry_ydown=free_o3 b:function=.1', + ] + +print('Making LaplaceXY inversion test') +shell_safe('make > make.log') + +print('Running LaplaceXY inversion test') +success = True + +for nproc in [8]: + print(' %d processors....' % nproc) + for args in argslist: + cmd = './test-laplacexy ' + args + ' -ksp_gmres_restart 500' + + shell('rm data/BOUT.dmp.*.nc > /dev/null 2>&1') + + s, out = launch(cmd + ' laplacexy:pctype=hypre', nproc=nproc, pipe=True,verbose=True) + if s == 134: + # PETSc did not recognise pctype option, probably means it + # was not compiled with hypre, so skip tests that need + # hypre to converge + print('hypre not available as pre-conditioner in PETSc. Re-running with ' + + 'pctype=shell...') + s, out = launch(cmd, nproc=nproc, pipe=True,verbose=True) + + f = open('run.log.'+str(nproc), 'w') + f.write(out) + f.close() + + # Collect output data + error = collect('max_error', path='data', info=False) + if error <= 0: + print('Convergence error') + success = False + elif error > tol: + print('Fail, maximum error is = '+str(error)) + success = False + else: + print('Pass') + +if success: + print(' => All LaplaceXY inversion tests passed') + exit(0) +else: + print(' => Some failed tests') + exit(1) diff --git a/tests/integrated/test-laplacexy-short/test-laplacexy.cxx b/tests/integrated/test-laplacexy-short/test-laplacexy.cxx new file mode 100644 index 0000000000..4e85b48fef --- /dev/null +++ b/tests/integrated/test-laplacexy-short/test-laplacexy.cxx @@ -0,0 +1,103 @@ +/************************************************************************** + * Testing Perpendicular Laplacian inversion using PETSc solvers + * + ************************************************************************** + * Copyright 2019 J.T. Omotani, B.D. Dudson + * + * Contact: Ben Dudson, bd512@york.ac.uk + * + * This file is part of BOUT++. + * + * BOUT++ is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * BOUT++ is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with BOUT++. If not, see . + * + **************************************************************************/ + +#include +#include +#include +#include +#include +#include + +int main(int argc, char** argv) { + + BoutInitialise(argc, argv); + + auto coords = mesh->getCoordinates(); + + auto& opt = Options::root(); + + LaplaceXY laplacexy; + + bool include_y_derivs = opt["laplacexy"]["include_y_derivs"]; + + // Solving equations of the form + // Div(A Grad_perp(f)) + B*f = rhs + // A*Laplace_perp(f) + Grad_perp(A).Grad_perp(f) + B*f = rhs + Field2D f, a, b, sol; + Field2D error, absolute_error; //Absolute value of relative error: abs((f - sol)/f) + BoutReal max_error; //Output of test + + initial_profile("f", f); + initial_profile("a", a); + initial_profile("b", b); + + // Apply boundary conditions to f, so the boundary cells match the way boundary + // conditions will be applied to sol + f.setBoundary("f"); + f.applyBoundary(); + + //////////////////////////////////////////////////////////////////////////////////////// + + Field2D rhs, rhs_check; + if (include_y_derivs) { + rhs = a*Laplace_perp(f) + Grad_perp(a)*Grad_perp(f) + b*f; + } else { + rhs = a*Delp2(f, CELL_DEFAULT, false) + coords->g11*DDX(a)*DDX(f) + b*f; + } + + laplacexy.setCoefs(a, b); + + sol = laplacexy.solve(rhs, 0.); + error = (f - sol)/f; + absolute_error = f - sol; + max_error = max(abs(absolute_error), true); + + output<<"Magnitude of maximum absolute error is "<communicate(sol); + if (include_y_derivs) { + rhs_check = a*Laplace_perp(sol) + Grad_perp(a)*Grad_perp(sol) + b*sol; + } else { + rhs_check = a*Delp2(sol, CELL_DEFAULT, false) + coords->g11*DDX(a)*DDX(sol) + b*sol; + } + + dump.add(a, "a"); + dump.add(b, "b"); + dump.add(f, "f"); + dump.add(sol, "sol"); + dump.add(error, "error"); + dump.add(absolute_error, "absolute_error"); + dump.add(max_error, "max_error"); + dump.add(rhs, "rhs"); + dump.add(rhs_check, "rhs_check"); + + dump.write(); + dump.close(); + + MPI_Barrier(BoutComm::get()); // Wait for all processors to write data + + BoutFinalise(); + return 0; +} From 2f121e06dc66ec17f6a9ef0c3dd8b7a0d1ab507a Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 10 Mar 2020 20:37:02 +0000 Subject: [PATCH 054/428] Don't increase iterations before GMRES restart in test-laplacexy* Leaving the default (30) iterations before restarting results in faster convergence. --- tests/integrated/test-laplacexy-short/runtest | 2 +- tests/integrated/test-laplacexy/runtest | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integrated/test-laplacexy-short/runtest b/tests/integrated/test-laplacexy-short/runtest index 481335c160..f4dc4000ef 100755 --- a/tests/integrated/test-laplacexy-short/runtest +++ b/tests/integrated/test-laplacexy-short/runtest @@ -57,7 +57,7 @@ success = True for nproc in [8]: print(' %d processors....' % nproc) for args in argslist: - cmd = './test-laplacexy ' + args + ' -ksp_gmres_restart 500' + cmd = './test-laplacexy ' + args shell('rm data/BOUT.dmp.*.nc > /dev/null 2>&1') diff --git a/tests/integrated/test-laplacexy/runtest b/tests/integrated/test-laplacexy/runtest index 2cfda89df4..417dee4cae 100755 --- a/tests/integrated/test-laplacexy/runtest +++ b/tests/integrated/test-laplacexy/runtest @@ -62,7 +62,7 @@ for nproc in [8]: if not nonorth: args += ' mesh:g12=0.' - cmd = './test-laplacexy ' + args + ' -ksp_gmres_restart 500' + cmd = './test-laplacexy ' + args shell('rm data/BOUT.dmp.*.nc > /dev/null 2>&1') From 19c6a7e2ea36ca417a9feb7305dc32f820c9770c Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 13 Mar 2020 12:11:15 +0000 Subject: [PATCH 055/428] Remove import build_and_log mistakenly added in cherry-pick --- tests/integrated/test-laplacexy-short/runtest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integrated/test-laplacexy-short/runtest b/tests/integrated/test-laplacexy-short/runtest index f4dc4000ef..fac6ececf8 100755 --- a/tests/integrated/test-laplacexy-short/runtest +++ b/tests/integrated/test-laplacexy-short/runtest @@ -6,7 +6,7 @@ #requires: petsc -from boututils.run_wrapper import build_and_log, shell, shell_safe, launch, launch_safe +from boututils.run_wrapper import shell, shell_safe, launch, launch_safe from boutdata.collect import collect from sys import exit From 5a088c0396612754ae0afa23f5248e9316ed8184 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 13 Mar 2020 14:37:42 +0000 Subject: [PATCH 056/428] Match name of test-laplacexy-short executable to cmake target --- tests/integrated/test-laplacexy-short/makefile | 1 + tests/integrated/test-laplacexy-short/runtest | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integrated/test-laplacexy-short/makefile b/tests/integrated/test-laplacexy-short/makefile index e7789b2203..d1fcbd7947 100644 --- a/tests/integrated/test-laplacexy-short/makefile +++ b/tests/integrated/test-laplacexy-short/makefile @@ -1,5 +1,6 @@ BOUT_TOP = ../../.. SOURCEC = test-laplacexy.cxx +TARGET = test-laplacexy-short include $(BOUT_TOP)/make.config diff --git a/tests/integrated/test-laplacexy-short/runtest b/tests/integrated/test-laplacexy-short/runtest index fac6ececf8..7417326e8b 100755 --- a/tests/integrated/test-laplacexy-short/runtest +++ b/tests/integrated/test-laplacexy-short/runtest @@ -57,7 +57,7 @@ success = True for nproc in [8]: print(' %d processors....' % nproc) for args in argslist: - cmd = './test-laplacexy ' + args + cmd = './test-laplacexy-short ' + args shell('rm data/BOUT.dmp.*.nc > /dev/null 2>&1') From 9cdc206f4d5cc9e3c84d209abae451746fd803ce Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 Aug 2020 17:36:38 +0100 Subject: [PATCH 057/428] Allow saving diagnostic variables from Solver subclasses --- include/bout/solver.hxx | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index eaf75060cf..7a8125f8ae 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -378,6 +378,22 @@ protected: std::vector> v2d; std::vector> v3d; + /// Vectors of diagnostic variables to save + std::vector> diagnostic_int; + std::vector> diagnostic_BoutReal; + void add_int_diagnostic(int i, std::string name) { + VarStr v; + v.var = &i; + v.name = name; + diagnostic_int.emplace_back(std::move(v)); + }; + void add_BoutReal_diagnostic(BoutReal r, std::string name) { + VarStr v; + v.var = &r; + v.name = name; + diagnostic_BoutReal.emplace_back(std::move(v)); + }; + /// Can this solver handle constraints? Set to true if so. bool has_constraints{false}; /// Has init been called yet? From e6f6101de82e4ccdb57d89fd5f5722aacd4f4d0d Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 Aug 2020 17:37:20 +0100 Subject: [PATCH 058/428] Save diagnostics to dump files in CvodeSolver --- src/solver/impls/cvode/cvode.cxx | 79 +++++++++++++++++++------------- src/solver/impls/cvode/cvode.hxx | 12 +++++ src/solver/solver.cxx | 10 ++++ 3 files changed, 70 insertions(+), 31 deletions(-) diff --git a/src/solver/impls/cvode/cvode.cxx b/src/solver/impls/cvode/cvode.cxx index 3f4747268b..855b4685f0 100644 --- a/src/solver/impls/cvode/cvode.cxx +++ b/src/solver/impls/cvode/cvode.cxx @@ -112,6 +112,20 @@ constexpr auto& SUNLinSol_SPGMR = SUNSPGMR; CvodeSolver::CvodeSolver(Options* opts) : Solver(opts) { has_constraints = false; // This solver doesn't have constraints canReset = true; + + // Add diagnostics to output + // Needs to be in constructor not init() because init() is called after + // Solver::outputVars() + add_int_diagnostic(nsteps, "cvode_nsteps"); + add_int_diagnostic(nfevals, "cvode_nfevals"); + add_int_diagnostic(nniters, "cvode_nniters"); + add_int_diagnostic(npevals, "cvode_npevals"); + add_int_diagnostic(nliters, "cvode_nliters"); + add_BoutReal_diagnostic(last_step, "cvode_last_step"); + add_int_diagnostic(last_order, "cvode_last_order"); + add_int_diagnostic(num_fails, "cvode_num_fails"); + add_int_diagnostic(nonlin_fails, "cvode_nonlin_fails"); + add_int_diagnostic(stab_lims, "cvode_stab_lims"); } CvodeSolver::~CvodeSolver() { @@ -382,18 +396,41 @@ int CvodeSolver::run() { throw BoutException("SUNDIALS CVODE timestep failed\n"); } + // Get additional diagnostics + long int temp_long_int; + CVodeGetNumSteps(cvode_mem, &temp_long_int); + nsteps = int(temp_long_int); + CVodeGetNumRhsEvals(cvode_mem, &temp_long_int); + nfevals = int(temp_long_int); + CVodeGetNumNonlinSolvIters(cvode_mem, &temp_long_int); + nniters = int(temp_long_int); + CVSpilsGetNumPrecSolves(cvode_mem, &temp_long_int); + npevals = int(temp_long_int); + CVSpilsGetNumLinIters(cvode_mem, &temp_long_int); + nliters = int(temp_long_int); + + // Last step size + CVodeGetLastStep(cvode_mem, &last_step); + + // Order used in last step + CVodeGetLastOrder(cvode_mem, &last_order); + + // Local error test failures + CVodeGetNumErrTestFails(cvode_mem, &temp_long_int); + num_fails = int(temp_long_int); + + // Number of nonlinear convergence failures + CVodeGetNumNonlinSolvConvFails(cvode_mem, &temp_long_int); + nonlin_fails = int(temp_long_int); + + // Stability limit order reductions + CVodeGetNumStabLimOrderReds(cvode_mem, &temp_long_int); + stab_lims = int(temp_long_int); + if (diagnose) { // Print additional diagnostics - long int nsteps, nfevals, nniters, npevals, nliters; - - CVodeGetNumSteps(cvode_mem, &nsteps); - CVodeGetNumRhsEvals(cvode_mem, &nfevals); - CVodeGetNumNonlinSolvIters(cvode_mem, &nniters); - CVSpilsGetNumPrecSolves(cvode_mem, &npevals); - CVSpilsGetNumLinIters(cvode_mem, &nliters); - output.write( - "\nCVODE: nsteps %ld, nfevals %ld, nniters %ld, npevals %ld, nliters %ld\n", + "\nCVODE: nsteps %d, nfevals %d, nniters %d, npevals %d, nliters %d\n", nsteps, nfevals, nniters, npevals, nliters); output.write(" -> Newton iterations per step: %e\n", @@ -403,32 +440,12 @@ int CvodeSolver::run() { output.write(" -> Preconditioner evaluations per Newton: %e\n", static_cast(npevals) / static_cast(nniters)); - // Last step size - BoutReal last_step; - CVodeGetLastStep(cvode_mem, &last_step); - - // Order used in last step - int last_order; - CVodeGetLastOrder(cvode_mem, &last_order); - output.write(" -> Last step size: %e, order: %d\n", last_step, last_order); - // Local error test failures - long int num_fails; - CVodeGetNumErrTestFails(cvode_mem, &num_fails); - - // Number of nonlinear convergence failures - long int nonlin_fails; - CVodeGetNumNonlinSolvConvFails(cvode_mem, &nonlin_fails); - - output.write(" -> Local error fails: %ld, nonlinear convergence fails: %ld\n", + output.write(" -> Local error fails: %d, nonlinear convergence fails: %d\n", num_fails, nonlin_fails); - // Stability limit order reductions - long int stab_lims; - CVodeGetNumStabLimOrderReds(cvode_mem, &stab_lims); - - output.write(" -> Stability limit order reductions: %ld\n", stab_lims); + output.write(" -> Stability limit order reductions: %d\n", stab_lims); } /// Call the monitor function diff --git a/src/solver/impls/cvode/cvode.hxx b/src/solver/impls/cvode/cvode.hxx index 71415d5171..2ec2e519a7 100644 --- a/src/solver/impls/cvode/cvode.hxx +++ b/src/solver/impls/cvode/cvode.hxx @@ -90,6 +90,18 @@ private: BoutReal pre_Wtime{0.0}; // Time in preconditioner int pre_ncalls{0}; // Number of calls to preconditioner + // Diagnostics from CVODE + int nsteps; + int nfevals; + int nniters; + int npevals; + int nliters; + BoutReal last_step; + int last_order; + int num_fails; + int nonlin_fails; + int stab_lims; + bool cvode_initialised = false; void set_abstol_values(BoutReal* abstolvec_data, std::vector& f2dtols, diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index aa0c27db4f..0ef54779f6 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -553,6 +553,16 @@ void Solver::outputVars(Datafile &outputfile, bool save_repeat) { outputfile.add(*(f.MMS_err), ("E_" + f.name).c_str(), save_repeat); } } + + // Add solver diagnostics to output file + for (const auto &d : diagnostic_int) { + // Add to dump file (appending) + outputfile.add(*(d.var), d.name.c_str(), save_repeat); + } + for (const auto &d : diagnostic_BoutReal) { + // Add to dump file (appending) + outputfile.add(*(d.var), d.name.c_str(), save_repeat); + } } ///////////////////////////////////////////////////// From ab5312c3f0923e0bc5b5010343bdf7bb56a3b3fd Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 Aug 2020 17:37:44 +0100 Subject: [PATCH 059/428] Save diagnostics to output files in ArkodeSolver --- src/solver/impls/arkode/arkode.cxx | 33 ++++++++++++++++++++++-------- src/solver/impls/arkode/arkode.hxx | 8 ++++++++ 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/src/solver/impls/arkode/arkode.cxx b/src/solver/impls/arkode/arkode.cxx index d938825be2..37c5ebffd5 100644 --- a/src/solver/impls/arkode/arkode.cxx +++ b/src/solver/impls/arkode/arkode.cxx @@ -164,6 +164,14 @@ constexpr auto& ARKStepSetUserData = ARKodeSetUserData; ArkodeSolver::ArkodeSolver(Options* opts) : Solver(opts) { has_constraints = false; // This solver doesn't have constraints + + // Add diagnostics to output + add_int_diagnostic(nsteps, "arkode_nsteps"); + add_int_diagnostic(nfe_evals, "arkode_nfe_evals"); + add_int_diagnostic(nfi_evals, "arkode_nfi_evals"); + add_int_diagnostic(nniters, "arkode_nniters"); + add_int_diagnostic(npevals, "arkode_npevals"); + add_int_diagnostic(nliters, "arkode_nliters"); } ArkodeSolver::~ArkodeSolver() { @@ -503,18 +511,25 @@ int ArkodeSolver::run() { throw BoutException("ARKode timestep failed\n"); } + // Get additional diagnostics + long int temp_long_int, temp_long_int2; + ARKStepGetNumSteps(arkode_mem, &temp_long_int); + nsteps = int(temp_long_int); + ARKStepGetNumRhsEvals(arkode_mem, &temp_long_int, &temp_long_int2); + nfe_evals = int(temp_long_int); + nfi_evals = int(temp_long_int2); + ARKStepGetNumNonlinSolvIters(arkode_mem, &temp_long_int); + nniters = int(temp_long_int); + ARKStepGetNumPrecEvals(arkode_mem, &temp_long_int); + npevals = int(temp_long_int); + ARKStepGetNumLinIters(arkode_mem, &temp_long_int); + nliters = int(temp_long_int); + if (diagnose) { // Print additional diagnostics - long int nsteps, nfe_evals, nfi_evals, nniters, npevals, nliters; - - ARKStepGetNumSteps(arkode_mem, &nsteps); - ARKStepGetNumRhsEvals(arkode_mem, &nfe_evals, &nfi_evals); - ARKStepGetNumNonlinSolvIters(arkode_mem, &nniters); - ARKStepGetNumPrecEvals(arkode_mem, &npevals); - ARKStepGetNumLinIters(arkode_mem, &nliters); - output.write("\nARKODE: nsteps %ld, nfe_evals %ld, nfi_evals %ld, nniters %ld, " - "npevals %ld, nliters %ld\n", + output.write("\nARKODE: nsteps %d, nfe_evals %d, nfi_evals %d, nniters %d, " + "npevals %d, nliters %d\n", nsteps, nfe_evals, nfi_evals, nniters, npevals, nliters); output.write(" -> Newton iterations per step: %e\n", diff --git a/src/solver/impls/arkode/arkode.hxx b/src/solver/impls/arkode/arkode.hxx index c74ffad9ea..4bc3bc1a16 100644 --- a/src/solver/impls/arkode/arkode.hxx +++ b/src/solver/impls/arkode/arkode.hxx @@ -91,6 +91,14 @@ private: BoutReal pre_Wtime{0.0}; // Time in preconditioner int pre_ncalls{0}; // Number of calls to preconditioner + // Diagnostics from ARKODE + int nsteps; + int nfe_evals; + int nfi_evals; + int nniters; + int npevals; + int nliters; + void set_abstol_values(BoutReal* abstolvec_data, std::vector& f2dtols, std::vector& f3dtols); void loop_abstol_values_op(Ind2D i2d, BoutReal* abstolvec_data, int& p, From 10ca872d509595b7d8fe63c745ef43933beb7330 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 Aug 2020 18:20:12 +0100 Subject: [PATCH 060/428] Optional arguments to give descriptions for output variables in Solver Allows output dump files to be more self-documenting. --- include/bout/solver.hxx | 15 +++++++++------ src/solver/solver.cxx | 24 ++++++++++++++++++++---- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index 7a8125f8ae..49aad3beff 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -233,10 +233,10 @@ public: /// Add a variable to be solved. This must be done in the /// initialisation stage, before the simulation starts. - virtual void add(Field2D& v, const std::string& name); - virtual void add(Field3D& v, const std::string& name); - virtual void add(Vector2D& v, const std::string& name); - virtual void add(Vector3D& v, const std::string& name); + virtual void add(Field2D& v, const std::string& name, const std::string& description = ""); + virtual void add(Field3D& v, const std::string& name, const std::string& description = ""); + virtual void add(Vector2D& v, const std::string& name, const std::string& description = ""); + virtual void add(Vector3D& v, const std::string& name, const std::string& description = ""); /// Returns true if constraints available virtual bool constraints() { return has_constraints; } @@ -347,6 +347,7 @@ protected: bool covariant{false}; /// For vectors bool evolve_bndry{false}; /// Are the boundary regions being evolved? std::string name; /// Name of the variable + std::string description{""}; /// Description of what the variable is }; /// Does \p var represent field \p name? @@ -381,16 +382,18 @@ protected: /// Vectors of diagnostic variables to save std::vector> diagnostic_int; std::vector> diagnostic_BoutReal; - void add_int_diagnostic(int i, std::string name) { + void add_int_diagnostic(int i, std::string name, std::string description = "") { VarStr v; v.var = &i; v.name = name; + v.description = description; diagnostic_int.emplace_back(std::move(v)); }; - void add_BoutReal_diagnostic(BoutReal r, std::string name) { + void add_BoutReal_diagnostic(BoutReal r, std::string name, std::string description = "") { VarStr v; v.var = &r; v.name = name; + v.description = description; diagnostic_BoutReal.emplace_back(std::move(v)); }; diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 0ef54779f6..5658b04819 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -82,7 +82,7 @@ void Solver::setModel(PhysicsModel *m) { * Add fields **************************************************************************/ -void Solver::add(Field2D& v, const std::string& name) { +void Solver::add(Field2D& v, const std::string& name, const std::string& description) { TRACE("Adding 2D field: Solver::add(%s)", name.c_str()); #if CHECK > 0 @@ -103,6 +103,7 @@ void Solver::add(Field2D& v, const std::string& name) { d.F_var = &ddt(v); d.location = v.getLocation(); d.name = name; + d.description = description; #ifdef TRACK v.name = name; @@ -139,7 +140,7 @@ void Solver::add(Field2D& v, const std::string& name) { f2d.emplace_back(std::move(d)); } -void Solver::add(Field3D& v, const std::string& name) { +void Solver::add(Field3D& v, const std::string& name, const std::string& description) { TRACE("Adding 3D field: Solver::add(%s)", name.c_str()); Mesh* mesh = v.getMesh(); @@ -167,6 +168,7 @@ void Solver::add(Field3D& v, const std::string& name) { d.F_var = &ddt(v); d.location = v.getLocation(); d.name = name; + d.description = description; #ifdef TRACK v.name = name; @@ -198,7 +200,7 @@ void Solver::add(Field3D& v, const std::string& name) { f3d.emplace_back(std::move(d)); } -void Solver::add(Vector2D& v, const std::string& name) { +void Solver::add(Vector2D& v, const std::string& name, const std::string& description) { TRACE("Adding 2D vector: Solver::add(%s)", name.c_str()); if (varAdded(name)) @@ -217,6 +219,7 @@ void Solver::add(Vector2D& v, const std::string& name) { d.F_var = &ddt(v); d.covariant = v.covariant; d.name = name; + d.description = description; /// NOTE: No initial_profile call, because this will be done for each /// component individually. @@ -237,7 +240,7 @@ void Solver::add(Vector2D& v, const std::string& name) { v2d.emplace_back(std::move(d)); } -void Solver::add(Vector3D& v, const std::string& name) { +void Solver::add(Vector3D& v, const std::string& name, const std::string& description) { TRACE("Adding 3D vector: Solver::add(%s)", name.c_str()); if (varAdded(name)) @@ -256,6 +259,7 @@ void Solver::add(Vector3D& v, const std::string& name) { d.F_var = &ddt(v); d.covariant = v.covariant; d.name = name; + d.description = description; // Add suffix, depending on co- /contravariance if (v.covariant) { @@ -543,10 +547,16 @@ void Solver::outputVars(Datafile &outputfile, bool save_repeat) { for(const auto& f : f2d) { // Add to dump file (appending) outputfile.add(*(f.var), f.name.c_str(), save_repeat); + if (not f.description.empty()) { + outputfile.setAttribute(f.name, "description", f.description); + } } for(const auto& f : f3d) { // Add to dump file (appending) outputfile.add(*(f.var), f.name.c_str(), save_repeat); + if (not f.description.empty()) { + outputfile.setAttribute(f.name, "description", f.description); + } if(mms) { // Add an error variable @@ -558,10 +568,16 @@ void Solver::outputVars(Datafile &outputfile, bool save_repeat) { for (const auto &d : diagnostic_int) { // Add to dump file (appending) outputfile.add(*(d.var), d.name.c_str(), save_repeat); + if (not d.description.empty()) { + outputfile.setAttribute(d.name, "description", d.description); + } } for (const auto &d : diagnostic_BoutReal) { // Add to dump file (appending) outputfile.add(*(d.var), d.name.c_str(), save_repeat); + if (not d.description.empty()) { + outputfile.setAttribute(d.name, "description", d.description); + } } } From 9009de65e76ed926f5699bf2b96585fab9b08e6e Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 Aug 2020 18:21:00 +0100 Subject: [PATCH 061/428] Descriptions for diagnostic variables in CvodeSolver --- src/solver/impls/cvode/cvode.cxx | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/solver/impls/cvode/cvode.cxx b/src/solver/impls/cvode/cvode.cxx index 855b4685f0..e5914f8eeb 100644 --- a/src/solver/impls/cvode/cvode.cxx +++ b/src/solver/impls/cvode/cvode.cxx @@ -116,16 +116,21 @@ CvodeSolver::CvodeSolver(Options* opts) : Solver(opts) { // Add diagnostics to output // Needs to be in constructor not init() because init() is called after // Solver::outputVars() - add_int_diagnostic(nsteps, "cvode_nsteps"); - add_int_diagnostic(nfevals, "cvode_nfevals"); - add_int_diagnostic(nniters, "cvode_nniters"); - add_int_diagnostic(npevals, "cvode_npevals"); - add_int_diagnostic(nliters, "cvode_nliters"); - add_BoutReal_diagnostic(last_step, "cvode_last_step"); - add_int_diagnostic(last_order, "cvode_last_order"); - add_int_diagnostic(num_fails, "cvode_num_fails"); - add_int_diagnostic(nonlin_fails, "cvode_nonlin_fails"); - add_int_diagnostic(stab_lims, "cvode_stab_lims"); + add_int_diagnostic(nsteps, "cvode_nsteps", "Cumulative number of internal steps"); + add_int_diagnostic(nfevals, "cvode_nfevals", "No. of calls to r.h.s. function"); + add_int_diagnostic(nniters, "cvode_nniters", "No. of nonlinear solver iterations"); + add_int_diagnostic(npevals, "cvode_npevals", "No. of preconditioner solves"); + add_int_diagnostic(nliters, "cvode_nliters", "No. of linear iterations"); + add_BoutReal_diagnostic(last_step, "cvode_last_step", + "Step size used for the last step before each output"); + add_int_diagnostic(last_order, "cvode_last_order", + "Order used during the last step before each output"); + add_int_diagnostic(num_fails, "cvode_num_fails", + "No. of local error test failures that have occurred"); + add_int_diagnostic(nonlin_fails, "cvode_nonlin_fails", + "No. of nonlinear convergence failures"); + add_int_diagnostic(stab_lims, "cvode_stab_lims", + "No. of order reductions due to stability limit detection"); } CvodeSolver::~CvodeSolver() { From 452541d1f1c83037a4c3f591789cf8e6bc55c3ff Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 Aug 2020 18:21:20 +0100 Subject: [PATCH 062/428] Descriptions for diagnostic variables in ArkodeSolver --- src/solver/impls/arkode/arkode.cxx | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/solver/impls/arkode/arkode.cxx b/src/solver/impls/arkode/arkode.cxx index 37c5ebffd5..4cf0f885d8 100644 --- a/src/solver/impls/arkode/arkode.cxx +++ b/src/solver/impls/arkode/arkode.cxx @@ -166,12 +166,16 @@ ArkodeSolver::ArkodeSolver(Options* opts) : Solver(opts) { has_constraints = false; // This solver doesn't have constraints // Add diagnostics to output - add_int_diagnostic(nsteps, "arkode_nsteps"); - add_int_diagnostic(nfe_evals, "arkode_nfe_evals"); - add_int_diagnostic(nfi_evals, "arkode_nfi_evals"); - add_int_diagnostic(nniters, "arkode_nniters"); - add_int_diagnostic(npevals, "arkode_npevals"); - add_int_diagnostic(nliters, "arkode_nliters"); + add_int_diagnostic(nsteps, "arkode_nsteps", "Cumulative number of internal steps"); + add_int_diagnostic(nfe_evals, "arkode_nfe_evals", + "No. of calls to fe (explicit portion of the right-hand-side " + "function) function"); + add_int_diagnostic(nfi_evals, "arkode_nfi_evals", + "No. of calls to fi (implicit portion of the right-hand-side " + "function) function"); + add_int_diagnostic(nniters, "arkode_nniters", "No. of nonlinear solver iterations"); + add_int_diagnostic(npevals, "arkode_npevals", "No. of preconditioner evaluations"); + add_int_diagnostic(nliters, "arkode_nliters", "No. of linear iterations"); } ArkodeSolver::~ArkodeSolver() { From a619a4f5415ca44f79b20e7208b609a242e74e0d Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 Aug 2020 18:35:47 +0100 Subject: [PATCH 063/428] Pass description through DataFile::add() Cannot just call setAttribute() in Solver::outputVars() because DataFile might not be open at that point. Instead need to pass through and set at the same point that the Field attributes are written. --- include/datafile.hxx | 35 +++++++++++++++--------- src/fileio/datafile.cxx | 60 +++++++++++++++++++++++++++++++++++------ src/solver/solver.cxx | 20 +++----------- 3 files changed, 78 insertions(+), 37 deletions(-) diff --git a/include/datafile.hxx b/include/datafile.hxx index 084955a773..af983cb659 100644 --- a/include/datafile.hxx +++ b/include/datafile.hxx @@ -68,14 +68,22 @@ class Datafile { void addOnce(T& value, std::string name) { add(value, name.c_str(), false); } - void add(int &i, const char *name, bool save_repeat = false); - void add(BoutReal &r, const char *name, bool save_repeat = false); - void add(bool &b, const char* name, bool save_repeat = false); - void add(Field2D &f, const char *name, bool save_repeat = false); - void add(Field3D &f, const char *name, bool save_repeat = false); - void add(FieldPerp &f, const char *name, bool save_repeat = false); - void add(Vector2D &f, const char *name, bool save_repeat = false); - void add(Vector3D &f, const char *name, bool save_repeat = false); + void add(int &i, const char *name, bool save_repeat = false, + const std::string &description = ""); + void add(BoutReal &r, const char *name, bool save_repeat = false, + const std::string &description = ""); + void add(bool &b, const char* name, bool save_repeat = false, + const std::string &description = ""); + void add(Field2D &f, const char *name, bool save_repeat = false, + const std::string &description = ""); + void add(Field3D &f, const char *name, bool save_repeat = false, + const std::string &description = ""); + void add(FieldPerp &f, const char *name, bool save_repeat = false, + const std::string &description = ""); + void add(Vector2D &f, const char *name, bool save_repeat = false, + const std::string &description = ""); + void add(Vector3D &f, const char *name, bool save_repeat = false, + const std::string &description = ""); bool read(); ///< Read data into added variables bool write(); ///< Write added variables @@ -117,11 +125,12 @@ class Datafile { /// A structure to hold a pointer to a class, and associated name and flags template struct VarStr { - T *ptr; ///< Pointer to the data. - ///< Note that this may be a user object, not a copy, so must not be destroyed - std::string name; ///< Name as it appears in the output file - bool save_repeat; ///< If true, has a time dimension and is saved every time step - bool covar; ///< For vectors, true if a covariant vector, false if contravariant + T *ptr; ///< Pointer to the data. + ///< Note that this may be a user object, not a copy, so must not be destroyed + std::string name; ///< Name as it appears in the output file + bool save_repeat; ///< If true, has a time dimension and is saved every time step + bool covar; ///< For vectors, true if a covariant vector, false if contravariant + std::string description{""}; ///< Documentation of what the variable is }; // one set per variable type diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index c1c46d562e..8c0da4512a 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -440,7 +440,7 @@ void Datafile::setLowPrecision() { file->setLowPrecision(); } -void Datafile::add(int &i, const char *name, bool save_repeat) { +void Datafile::add(int &i, const char *name, bool save_repeat, const std::string &description) { TRACE("DataFile::add(int)"); if (!enabled) return; @@ -461,6 +461,7 @@ void Datafile::add(int &i, const char *name, bool save_repeat) { d.name = name; d.save_repeat = save_repeat; d.covar = false; + d.description = description; int_arr.push_back(d); @@ -497,7 +498,7 @@ void Datafile::add(int &i, const char *name, bool save_repeat) { } } -void Datafile::add(BoutReal &r, const char *name, bool save_repeat) { +void Datafile::add(BoutReal &r, const char *name, bool save_repeat, const std::string &description) { TRACE("DataFile::add(BoutReal)"); if (!enabled) return; @@ -518,6 +519,7 @@ void Datafile::add(BoutReal &r, const char *name, bool save_repeat) { d.name = name; d.save_repeat = save_repeat; d.covar = false; + d.description = description; BoutReal_arr.push_back(d); @@ -556,7 +558,7 @@ void Datafile::add(BoutReal &r, const char *name, bool save_repeat) { } } -void Datafile::add(bool &b, const char *name, bool save_repeat) { +void Datafile::add(bool &b, const char *name, bool save_repeat, const std::string &description) { TRACE("DataFile::add(bool)"); if (!enabled) return; @@ -577,6 +579,7 @@ void Datafile::add(bool &b, const char *name, bool save_repeat) { d.name = name; d.save_repeat = save_repeat; d.covar = false; + d.description = description; bool_arr.push_back(d); @@ -613,7 +616,7 @@ void Datafile::add(bool &b, const char *name, bool save_repeat) { } } -void Datafile::add(Field2D &f, const char *name, bool save_repeat) { +void Datafile::add(Field2D &f, const char *name, bool save_repeat, const std::string &description) { TRACE("DataFile::add(Field2D)"); if (!enabled) return; @@ -634,6 +637,7 @@ void Datafile::add(Field2D &f, const char *name, bool save_repeat) { d.name = name; d.save_repeat = save_repeat; d.covar = false; + d.description = description; f2d_arr.push_back(d); @@ -672,7 +676,7 @@ void Datafile::add(Field2D &f, const char *name, bool save_repeat) { } } -void Datafile::add(Field3D &f, const char *name, bool save_repeat) { +void Datafile::add(Field3D &f, const char *name, bool save_repeat, const std::string &description) { TRACE("DataFile::add(Field3D)"); if (!enabled) return; @@ -693,6 +697,7 @@ void Datafile::add(Field3D &f, const char *name, bool save_repeat) { d.name = name; d.save_repeat = save_repeat; d.covar = false; + d.description = description; f3d_arr.push_back(d); @@ -731,7 +736,7 @@ void Datafile::add(Field3D &f, const char *name, bool save_repeat) { } } -void Datafile::add(FieldPerp &f, const char *name, bool save_repeat) { +void Datafile::add(FieldPerp &f, const char *name, bool save_repeat, const std::string &description) { AUTO_TRACE(); if (!enabled) return; @@ -752,6 +757,7 @@ void Datafile::add(FieldPerp &f, const char *name, bool save_repeat) { d.name = name; d.save_repeat = save_repeat; d.covar = false; + d.description = description; fperp_arr.push_back(d); @@ -790,7 +796,7 @@ void Datafile::add(FieldPerp &f, const char *name, bool save_repeat) { } } -void Datafile::add(Vector2D &f, const char *name, bool save_repeat) { +void Datafile::add(Vector2D &f, const char *name, bool save_repeat, const std::string &description) { TRACE("DataFile::add(Vector2D)"); if (!enabled) return; @@ -811,6 +817,7 @@ void Datafile::add(Vector2D &f, const char *name, bool save_repeat) { d.name = name; d.save_repeat = save_repeat; d.covar = f.covariant; + d.description = description; v2d_arr.push_back(d); @@ -859,7 +866,7 @@ void Datafile::add(Vector2D &f, const char *name, bool save_repeat) { } } -void Datafile::add(Vector3D &f, const char *name, bool save_repeat) { +void Datafile::add(Vector3D &f, const char *name, bool save_repeat, const std::string &description) { TRACE("DataFile::add(Vector3D)"); if (!enabled) return; @@ -880,6 +887,7 @@ void Datafile::add(Vector3D &f, const char *name, bool save_repeat) { d.name = name; d.save_repeat = save_repeat; d.covar = f.covariant; + d.description = description; v3d_arr.push_back(d); @@ -1110,19 +1118,49 @@ bool Datafile::write() { // Set the field attributes from field meta-data. // Attributes must have been set for all fields before the first time // output is written, since this happens after the first rhs evaluation + // Integer variables + for(const auto& var : int_arr) { + if (not var.description.empty()) { + file->setAttribute(var.name, "description", var.description); + } + } + + // BoutReal variables + for(const auto& var : BoutReal_arr) { + if (not var.description.empty()) { + file->setAttribute(var.name, "description", var.description); + } + } + + // bool variables + for(const auto& var : bool_arr) { + if (not var.description.empty()) { + file->setAttribute(var.name, "description", var.description); + } + } + // 2D fields for (const auto& var : f2d_arr) { file->writeFieldAttributes(var.name, *var.ptr); + if (not var.description.empty()) { + setAttribute(var.name, "description", var.description); + } } // 3D fields for (const auto& var : f3d_arr) { file->writeFieldAttributes(var.name, *var.ptr); + if (not var.description.empty()) { + setAttribute(var.name, "description", var.description); + } } // FieldPerps for (const auto& var : fperp_arr) { file->writeFieldAttributes(var.name, *var.ptr); + if (not var.description.empty()) { + setAttribute(var.name, "description", var.description); + } } // 2D vectors @@ -1132,6 +1170,9 @@ bool Datafile::write() { file->writeFieldAttributes(name+"x", v.x); file->writeFieldAttributes(name+"y", v.y); file->writeFieldAttributes(name+"z", v.z); + if (not var.description.empty()) { + setAttribute(var.name, "description", var.description); + } } // 3D vectors @@ -1141,6 +1182,9 @@ bool Datafile::write() { file->writeFieldAttributes(name+"x", v.x); file->writeFieldAttributes(name+"y", v.y); file->writeFieldAttributes(name+"z", v.z); + if (not var.description.empty()) { + setAttribute(var.name, "description", var.description); + } } } diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 5658b04819..2516c4c256 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -546,17 +546,11 @@ void Solver::outputVars(Datafile &outputfile, bool save_repeat) { // Add 2D and 3D evolving fields to output file for(const auto& f : f2d) { // Add to dump file (appending) - outputfile.add(*(f.var), f.name.c_str(), save_repeat); - if (not f.description.empty()) { - outputfile.setAttribute(f.name, "description", f.description); - } + outputfile.add(*(f.var), f.name.c_str(), save_repeat, f.description); } for(const auto& f : f3d) { // Add to dump file (appending) - outputfile.add(*(f.var), f.name.c_str(), save_repeat); - if (not f.description.empty()) { - outputfile.setAttribute(f.name, "description", f.description); - } + outputfile.add(*(f.var), f.name.c_str(), save_repeat, f.description); if(mms) { // Add an error variable @@ -567,17 +561,11 @@ void Solver::outputVars(Datafile &outputfile, bool save_repeat) { // Add solver diagnostics to output file for (const auto &d : diagnostic_int) { // Add to dump file (appending) - outputfile.add(*(d.var), d.name.c_str(), save_repeat); - if (not d.description.empty()) { - outputfile.setAttribute(d.name, "description", d.description); - } + outputfile.add(*(d.var), d.name.c_str(), save_repeat, d.description); } for (const auto &d : diagnostic_BoutReal) { // Add to dump file (appending) - outputfile.add(*(d.var), d.name.c_str(), save_repeat); - if (not d.description.empty()) { - outputfile.setAttribute(d.name, "description", d.description); - } + outputfile.add(*(d.var), d.name.c_str(), save_repeat, d.description); } } From b02ef3ec124e1333841d9249c57920f8035955e7 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 Aug 2020 19:50:30 +0100 Subject: [PATCH 064/428] Add description argument in SlepcSolver overrides --- src/solver/impls/slepc/slepc.hxx | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/src/solver/impls/slepc/slepc.hxx b/src/solver/impls/slepc/slepc.hxx index db8d65326a..1b95880dc2 100644 --- a/src/solver/impls/slepc/slepc.hxx +++ b/src/solver/impls/slepc/slepc.hxx @@ -96,28 +96,32 @@ public: //////Following overrides all just pass through to advanceSolver // Override virtual add functions in order to pass through to advanceSolver - void add(Field2D& v, const std::string& name) override { - Solver::add(v, name); + void add(Field2D& v, const std::string& name, + const std::string& description = "") override { + Solver::add(v, name, description); if (!selfSolve) { - advanceSolver->add(v, name); + advanceSolver->add(v, name, description); } } - void add(Field3D& v, const std::string& name) override { - Solver::add(v, name); + void add(Field3D& v, const std::string& name, + const std::string& description = "") override { + Solver::add(v, name, description); if (!selfSolve) { - advanceSolver->add(v, name); + advanceSolver->add(v, name, description); } } - void add(Vector2D& v, const std::string& name) override { - Solver::add(v, name); + void add(Vector2D& v, const std::string& name, + const std::string& description = "") override { + Solver::add(v, name, description); if (!selfSolve) { - advanceSolver->add(v, name); + advanceSolver->add(v, name, description); } } - void add(Vector3D& v, const std::string& name) override { - Solver::add(v, name); + void add(Vector3D& v, const std::string& name, + const std::string& description = "") override { + Solver::add(v, name, description); if (!selfSolve) { - advanceSolver->add(v, name); + advanceSolver->add(v, name, description); } } From e81fafd625cfef66f6e012de6b80423305277551 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 Aug 2020 22:19:30 +0100 Subject: [PATCH 065/428] Call file->setAttribute() not setAttribute() Datafile::setAttribute() opens the DataFormat object and then calls its setAttribute() method. Don't want to do this inside Datafile::write() where the DataFormat is already opened. --- src/fileio/datafile.cxx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index 8c0da4512a..55085a4937 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -1143,7 +1143,7 @@ bool Datafile::write() { for (const auto& var : f2d_arr) { file->writeFieldAttributes(var.name, *var.ptr); if (not var.description.empty()) { - setAttribute(var.name, "description", var.description); + file->setAttribute(var.name, "description", var.description); } } @@ -1151,7 +1151,7 @@ bool Datafile::write() { for (const auto& var : f3d_arr) { file->writeFieldAttributes(var.name, *var.ptr); if (not var.description.empty()) { - setAttribute(var.name, "description", var.description); + file->setAttribute(var.name, "description", var.description); } } @@ -1159,7 +1159,7 @@ bool Datafile::write() { for (const auto& var : fperp_arr) { file->writeFieldAttributes(var.name, *var.ptr); if (not var.description.empty()) { - setAttribute(var.name, "description", var.description); + file->setAttribute(var.name, "description", var.description); } } @@ -1171,7 +1171,7 @@ bool Datafile::write() { file->writeFieldAttributes(name+"y", v.y); file->writeFieldAttributes(name+"z", v.z); if (not var.description.empty()) { - setAttribute(var.name, "description", var.description); + file->setAttribute(var.name, "description", var.description); } } @@ -1183,7 +1183,7 @@ bool Datafile::write() { file->writeFieldAttributes(name+"y", v.y); file->writeFieldAttributes(name+"z", v.z); if (not var.description.empty()) { - setAttribute(var.name, "description", var.description); + file->setAttribute(var.name, "description", var.description); } } } From 1318baf3a695b58d14946aa1750465cf6d38857a Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 Aug 2020 22:58:41 +0100 Subject: [PATCH 066/428] Initialise diagnostic variables in CvodeSolver and ArkodeSolver --- src/fileio/datafile.cxx | 1 + src/solver/impls/arkode/arkode.hxx | 12 ++++++------ src/solver/impls/cvode/cvode.hxx | 20 ++++++++++---------- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index 55085a4937..3f78cf85eb 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -1118,6 +1118,7 @@ bool Datafile::write() { // Set the field attributes from field meta-data. // Attributes must have been set for all fields before the first time // output is written, since this happens after the first rhs evaluation + // Integer variables for(const auto& var : int_arr) { if (not var.description.empty()) { diff --git a/src/solver/impls/arkode/arkode.hxx b/src/solver/impls/arkode/arkode.hxx index 4bc3bc1a16..9fa6a2f741 100644 --- a/src/solver/impls/arkode/arkode.hxx +++ b/src/solver/impls/arkode/arkode.hxx @@ -92,12 +92,12 @@ private: int pre_ncalls{0}; // Number of calls to preconditioner // Diagnostics from ARKODE - int nsteps; - int nfe_evals; - int nfi_evals; - int nniters; - int npevals; - int nliters; + int nsteps{0}; + int nfe_evals{0}; + int nfi_evals{0}; + int nniters{0}; + int npevals{0}; + int nliters{0}; void set_abstol_values(BoutReal* abstolvec_data, std::vector& f2dtols, std::vector& f3dtols); diff --git a/src/solver/impls/cvode/cvode.hxx b/src/solver/impls/cvode/cvode.hxx index 2ec2e519a7..a628c6c041 100644 --- a/src/solver/impls/cvode/cvode.hxx +++ b/src/solver/impls/cvode/cvode.hxx @@ -91,16 +91,16 @@ private: int pre_ncalls{0}; // Number of calls to preconditioner // Diagnostics from CVODE - int nsteps; - int nfevals; - int nniters; - int npevals; - int nliters; - BoutReal last_step; - int last_order; - int num_fails; - int nonlin_fails; - int stab_lims; + int nsteps{0}; + int nfevals{0}; + int nniters{0}; + int npevals{0}; + int nliters{0}; + BoutReal last_step{0.0}; + int last_order{0}; + int num_fails{0}; + int nonlin_fails{0}; + int stab_lims{0}; bool cvode_initialised = false; From 008724b95a4ac33bee5f2c0c12753edf43e480fb Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 7 Aug 2020 16:07:15 +0100 Subject: [PATCH 067/428] Pass by reference in add_diagnostic_int() and add_diagnostic_BoutReal() Poiner to the int or BoutReal is stored in a VarStr, so need to pass by reference and not by value, otherwise the pointer ends up pointing to some random place. --- include/bout/solver.hxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index 49aad3beff..39529d3a5e 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -382,14 +382,14 @@ protected: /// Vectors of diagnostic variables to save std::vector> diagnostic_int; std::vector> diagnostic_BoutReal; - void add_int_diagnostic(int i, std::string name, std::string description = "") { + void add_int_diagnostic(int &i, std::string name, std::string description = "") { VarStr v; v.var = &i; v.name = name; v.description = description; diagnostic_int.emplace_back(std::move(v)); }; - void add_BoutReal_diagnostic(BoutReal r, std::string name, std::string description = "") { + void add_BoutReal_diagnostic(BoutReal &r, std::string name, std::string description = "") { VarStr v; v.var = &r; v.name = name; From 7fdd97ef149d105b8e0358bf80e7645bedeccdba Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 7 Aug 2020 16:14:13 +0100 Subject: [PATCH 068/428] Do not add Solver diagnostic variables to restart files If solver type is changed, diagnostic variables will change and so might cause errors if they are added to restart files but cannot be read. So do not add diagnostic variables to the restart files. --- src/solver/solver.cxx | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 2516c4c256..0d1bd1053c 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -558,14 +558,20 @@ void Solver::outputVars(Datafile &outputfile, bool save_repeat) { } } - // Add solver diagnostics to output file - for (const auto &d : diagnostic_int) { - // Add to dump file (appending) - outputfile.add(*(d.var), d.name.c_str(), save_repeat, d.description); - } - for (const auto &d : diagnostic_BoutReal) { - // Add to dump file (appending) - outputfile.add(*(d.var), d.name.c_str(), save_repeat, d.description); + if (save_repeat) { + // Do not save if save_repeat=false so we avoid adding diagnostic variables to restart + // files, otherwise they might cause errors if the solver type is changed before + // restarting + + // Add solver diagnostics to output file + for (const auto &d : diagnostic_int) { + // Add to dump file (appending) + outputfile.add(*(d.var), d.name.c_str(), save_repeat, d.description); + } + for (const auto &d : diagnostic_BoutReal) { + // Add to dump file (appending) + outputfile.add(*(d.var), d.name.c_str(), save_repeat, d.description); + } } } From a64b134e4042bb74962b826cecbebbff6c87b1d7 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 21 Aug 2020 13:59:50 +0100 Subject: [PATCH 069/428] Use 'false' and 'true' for save_repeat args of Datafile::add() in manual Previously used '0' and '1' which are correct but less explicit. --- manual/sphinx/user_docs/physics_models.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/manual/sphinx/user_docs/physics_models.rst b/manual/sphinx/user_docs/physics_models.rst index 5ab237474b..de042fe215 100644 --- a/manual/sphinx/user_docs/physics_models.rst +++ b/manual/sphinx/user_docs/physics_models.rst @@ -820,17 +820,17 @@ values to file. For example:: Field2D Ni0; ... GRID_LOAD(Ni0); - dump.add(Ni0, "Ni0", 0); + dump.add(Ni0, "Ni0", false); -where the ’0’ at the end means the variable should only be written to -file once at the start of the simulation. For convenience there are +where the ’false’ at the end means the variable should only be written +to file once at the start of the simulation. For convenience there are some macros e.g.:: SAVE_ONCE(Ni0); is equivalent to:: - dump.add(Ni0, "Ni0", 0); + dump.add(Ni0, "Ni0", false); (see `Datafile::add`). In some situations you might also want to write some data to a different file. To do this, create a `Datafile` object:: @@ -869,7 +869,7 @@ in ``init``, you then: // Not evolving. Every time the file is written, this will be overwritten mydata.add(variable, "name"); // Evolving. Will output a sequence of values - mydata.add(variable2, "name2", 1); + mydata.add(variable2, "name2", true); Whenever you want to write values to the file, for example in ``rhs`` or a monitor, just call:: From 1e697b84cf4fc0883eb0d81dbe85c871862ec94f Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 21 Aug 2020 14:08:19 +0100 Subject: [PATCH 070/428] Add description argument to bout_solve() function/methods Pass description through to solver->add() --- include/bout/physicsmodel.hxx | 8 ++++---- include/boutmain.hxx | 4 ++-- src/physics/physicsmodel.cxx | 20 ++++++++++++-------- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/include/bout/physicsmodel.hxx b/include/bout/physicsmodel.hxx index 064136a541..106b794322 100644 --- a/include/bout/physicsmodel.hxx +++ b/include/bout/physicsmodel.hxx @@ -237,10 +237,10 @@ protected: * To evolve the state, the solver will set \p var, and the user-supplied * rhs() function should calculate ddt(var). */ - void bout_solve(Field2D &var, const char *name); - void bout_solve(Field3D &var, const char *name); - void bout_solve(Vector2D &var, const char *name); - void bout_solve(Vector3D &var, const char *name); + void bout_solve(Field2D &var, const char *name, const std::string& description=""); + void bout_solve(Field3D &var, const char *name, const std::string& description=""); + void bout_solve(Vector2D &var, const char *name, const std::string& description=""); + void bout_solve(Vector3D &var, const char *name, const std::string& description=""); /// Stores the state for restarting Datafile restart; diff --git a/include/boutmain.hxx b/include/boutmain.hxx index fb740988dc..28ce2a0208 100644 --- a/include/boutmain.hxx +++ b/include/boutmain.hxx @@ -71,8 +71,8 @@ protected: /// Global functions used by some legacy models template -void bout_solve(T &var, const char *name) { - solver->add(var, name); +void bout_solve(T &var, const char *name, const std::string& description="") { + solver->add(var, name, description); } /*! diff --git a/src/physics/physicsmodel.cxx b/src/physics/physicsmodel.cxx index e0657fcd48..67c056bc9d 100644 --- a/src/physics/physicsmodel.cxx +++ b/src/physics/physicsmodel.cxx @@ -72,21 +72,25 @@ int PhysicsModel::runJacobian(BoutReal t) { return (*this.*userjacobian)(t); } -void PhysicsModel::bout_solve(Field2D &var, const char *name) { +void PhysicsModel::bout_solve(Field2D &var, const char *name, + const std::string& description) { // Add to solver - solver->add(var, name); + solver->add(var, name, description); } -void PhysicsModel::bout_solve(Field3D &var, const char *name) { - solver->add(var, name); +void PhysicsModel::bout_solve(Field3D &var, const char *name, + const std::string& description) { + solver->add(var, name, description); } -void PhysicsModel::bout_solve(Vector2D &var, const char *name) { - solver->add(var, name); +void PhysicsModel::bout_solve(Vector2D &var, const char *name, + const std::string& description) { + solver->add(var, name, description); } -void PhysicsModel::bout_solve(Vector3D &var, const char *name) { - solver->add(var, name); +void PhysicsModel::bout_solve(Vector3D &var, const char *name, + const std::string& description) { + solver->add(var, name, description); } int PhysicsModel::postInit(bool restarting) { From c9c15765e1b73ec3dd08d866c245cf82c3a5369b Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 21 Aug 2020 15:06:50 +0100 Subject: [PATCH 071/428] Document 'description' argument for bout_solve() and Datafile::add() --- manual/sphinx/user_docs/physics_models.rst | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/manual/sphinx/user_docs/physics_models.rst b/manual/sphinx/user_docs/physics_models.rst index de042fe215..5695a04d81 100644 --- a/manual/sphinx/user_docs/physics_models.rst +++ b/manual/sphinx/user_docs/physics_models.rst @@ -360,6 +360,13 @@ data files. These will be automatically read and written depending on input options (see :ref:`sec-options`). Input options based on these names are also used to initialise the variables. +You can add a description of the variable which will be saved as an +attribute in the output files by adding a third argument to +``bout_solve()`` e.g.:: + + bout_solve(rho, "density", "electron density"); + bout_solve(B, "B", "total magnetic field strength"); + If the name of the variable in the output file is the same as the variable name, you can use a shorthand macro. In this case, we could use this shorthand for ``v`` and ``B``:: @@ -832,6 +839,12 @@ is equivalent to:: dump.add(Ni0, "Ni0", false); +Optionally, you can add a description to document what the variable +represents, which will be saved as an attribute of the variable in the +output file, e.g.:: + + dump.add(Ni0, "Ni0", false, "background density profile"); + (see `Datafile::add`). In some situations you might also want to write some data to a different file. To do this, create a `Datafile` object:: From 535bd9df093d8c44da7a52ae165f3abf072f5d98 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 24 Aug 2020 16:40:47 +0100 Subject: [PATCH 072/428] Support staggered grids in InvertPar --- include/invert_parderiv.hxx | 6 +- src/invert/parderiv/impls/cyclic/cyclic.cxx | 69 ++++++++++++--------- src/invert/parderiv/impls/cyclic/cyclic.hxx | 15 ++++- src/invert/parderiv/invert_parderiv.cxx | 2 +- src/invert/parderiv/parderiv_factory.cxx | 14 +++-- src/invert/parderiv/parderiv_factory.hxx | 10 ++- 6 files changed, 72 insertions(+), 44 deletions(-) diff --git a/include/invert_parderiv.hxx b/include/invert_parderiv.hxx index 2ad993dcc9..b1e48e5f15 100644 --- a/include/invert_parderiv.hxx +++ b/include/invert_parderiv.hxx @@ -63,8 +63,9 @@ public: * with pure virtual members, so can't be created directly. * To create an InvertPar object call the create() static function. */ - InvertPar(Options *UNUSED(opt), Mesh *mesh_in = nullptr) - : localmesh(mesh_in==nullptr ? bout::globals::mesh : mesh_in) {} + InvertPar(Options *UNUSED(opt), CELL_LOC location_in, Mesh *mesh_in = nullptr) + : location(location_in), + localmesh(mesh_in==nullptr ? bout::globals::mesh : mesh_in) {} virtual ~InvertPar() = default; /*! @@ -131,6 +132,7 @@ public: virtual void setCoefE(BoutReal f) { setCoefE(Field2D(f, localmesh)); } protected: + CELL_LOC location; Mesh* localmesh; ///< Mesh object for this solver private: diff --git a/src/invert/parderiv/impls/cyclic/cyclic.cxx b/src/invert/parderiv/impls/cyclic/cyclic.cxx index 7c85d67514..b9cc57b5eb 100644 --- a/src/invert/parderiv/impls/cyclic/cyclic.cxx +++ b/src/invert/parderiv/impls/cyclic/cyclic.cxx @@ -49,24 +49,19 @@ #include -InvertParCR::InvertParCR(Options *opt, Mesh *mesh_in) - : InvertPar(opt, mesh_in), A(1.0), B(0.0), C(0.0), D(0.0), E(0.0) { +InvertParCR::InvertParCR(Options *opt, CELL_LOC location, Mesh *mesh_in) + : InvertPar(opt, location, mesh_in), A(1.0), B(0.0), C(0.0), D(0.0), E(0.0) { // Number of k equations to solve for each x location nsys = 1 + (localmesh->LocalNz)/2; - sg = sqrt(localmesh->getCoordinates()->g_22); + sg = sqrt(localmesh->getCoordinates(location)->g_22); sg = DDY(1. / sg) / sg; } const Field3D InvertParCR::solve(const Field3D &f) { TRACE("InvertParCR::solve(Field3D)"); ASSERT1(localmesh == f.getMesh()); - ASSERT1(A.getLocation() == f.getLocation()); - ASSERT1(B.getLocation() == f.getLocation()); - ASSERT1(C.getLocation() == f.getLocation()); - ASSERT1(D.getLocation() == f.getLocation()); - ASSERT1(E.getLocation() == f.getLocation()); - ASSERT1(sg.getLocation() == f.getLocation()); + ASSERT1(location == f.getLocation()); Field3D result = emptyFrom(f).setDirectionY(YDirectionType::Aligned); @@ -84,10 +79,17 @@ const Field3D InvertParCR::solve(const Field3D &f) { int n = localmesh->LocalNy - 2 * localmesh->ystart; if (!surf.closed()) { // Open field line - if (surf.firstY()) - n += localmesh->ystart; - if (surf.lastY()) + if (surf.firstY()) { + if (location == CELL_YLOW) { + // The 'boundary' includes the grid point at mesh->ystart + n += localmesh->ystart - 1; + } else { + n += localmesh->ystart; + } + } + if (surf.lastY()) { n += localmesh->ystart; + } if (n > size) size = n; // Maximum size @@ -111,14 +113,23 @@ const Field3D InvertParCR::solve(const Field3D &f) { // Number of rows int y0 = 0; + int local_ystart = localmesh->ystart; size = localmesh->LocalNy - 2 * localmesh->ystart; // If no boundaries if (!closed) { if (surf.firstY()) { - y0 += localmesh->ystart; - size += localmesh->ystart; + if (location == CELL_YLOW) { + // The 'boundary' includes the grid point at mesh->ystart + y0 += localmesh->ystart; + size += localmesh->ystart - 1; + local_ystart = localmesh->ystart + 1; + } else { + y0 += localmesh->ystart; + size += localmesh->ystart; + } } - if (surf.lastY()) + if (surf.lastY()) { size += localmesh->ystart; + } } // Setup CyclicReduce object @@ -126,30 +137,30 @@ const Field3D InvertParCR::solve(const Field3D &f) { cr->setPeriodic(closed); // Take Fourier transform - for (int y = 0; y < localmesh->LocalNy - 2 * localmesh->ystart; y++) - rfft(alignedField(x, y + localmesh->ystart), localmesh->LocalNz, &rhs(y + y0, 0)); + for (int y = 0; y < localmesh->LocalNy - localmesh->ystart - local_ystart; y++) + rfft(alignedField(x, y + local_ystart), localmesh->LocalNz, &rhs(y + y0, 0)); // Set up tridiagonal system for (int k = 0; k < nsys; k++) { BoutReal kwave=k*2.0*PI/coord->zlength(); // wave number is 1/length - for (int y = 0; y < localmesh->LocalNy - 2 * localmesh->ystart; y++) { + for (int y = 0; y < localmesh->LocalNy - localmesh->ystart - local_ystart; y++) { - BoutReal acoef = A(x, y + localmesh->ystart); // Constant + BoutReal acoef = A(x, y + local_ystart); // Constant BoutReal bcoef = - B(x, y + localmesh->ystart) / coord->g_22(x, y + localmesh->ystart); // d2dy2 - BoutReal ccoef = C(x, y + localmesh->ystart); // d2dydz - BoutReal dcoef = D(x, y + localmesh->ystart); // d2dz2 + B(x, y + local_ystart) / coord->g_22(x, y + local_ystart); // d2dy2 + BoutReal ccoef = C(x, y + local_ystart); // d2dydz + BoutReal dcoef = D(x, y + local_ystart); // d2dz2 BoutReal ecoef = - E(x, y + localmesh->ystart) - + sg(x, y + localmesh->ystart)*B(x, y + localmesh->ystart); // ddy + E(x, y + local_ystart) + + sg(x, y + local_ystart)*B(x, y + local_ystart); // ddy if (coord->non_uniform) { - ecoef += bcoef * coord->d1_dy(x, y + localmesh->ystart); + ecoef += bcoef * coord->d1_dy(x, y + local_ystart); } - bcoef /= SQ(coord->dy(x, y + localmesh->ystart)); - ccoef /= coord->dy(x, y + localmesh->ystart); - ecoef /= coord->dy(x, y + localmesh->ystart); + bcoef /= SQ(coord->dy(x, y + local_ystart)); + ccoef /= coord->dy(x, y + local_ystart); + ecoef /= coord->dy(x, y + local_ystart); // const d2dy2 d2dydz d2dz2 ddy // ----- ----- ------ ----- --- @@ -218,7 +229,7 @@ const Field3D InvertParCR::solve(const Field3D &f) { // Inverse Fourier transform for (int y = 0; y < size; y++) - irfft(&rhs(y, 0), localmesh->LocalNz, result(x, y + localmesh->ystart - y0)); + irfft(&rhs(y, 0), localmesh->LocalNz, result(x, y + local_ystart - y0)); } return fromFieldAligned(result, "RGN_NOBNDRY"); diff --git a/src/invert/parderiv/impls/cyclic/cyclic.hxx b/src/invert/parderiv/impls/cyclic/cyclic.hxx index 657a6d25a8..c1e2fd7b39 100644 --- a/src/invert/parderiv/impls/cyclic/cyclic.hxx +++ b/src/invert/parderiv/impls/cyclic/cyclic.hxx @@ -12,7 +12,10 @@ * Known issues: * ------------ * - * + * - For CELL_YLOW implementation, boundary conditions are only 1st order accurate. + * Should be OK for preconditioners, which are allowed to be less accurate. + * Only 1st-order accurate one-sided derivative is possible in a tri-diagonal matrix and + * staggered mesh requires one-sided derivative as boundary condition. * ************************************************************************** * Copyright 2010 B.D.Dudson, S.Farley, M.V.Umansky, X.Q.Xu @@ -46,7 +49,8 @@ class InvertParCR : public InvertPar { public: - InvertParCR(Options *opt, Mesh *mesh_in = bout::globals::mesh); + InvertParCR(Options *opt, CELL_LOC location = CELL_CENTRE, + Mesh *mesh_in = bout::globals::mesh); using InvertPar::solve; const Field3D solve(const Field3D &f) override; @@ -54,31 +58,36 @@ public: using InvertPar::setCoefA; void setCoefA(const Field2D &f) override { ASSERT1(localmesh == f.getMesh()); + ASSERT1(location == f.getLocation()); A = f; } using InvertPar::setCoefB; void setCoefB(const Field2D &f) override { ASSERT1(localmesh == f.getMesh()); + ASSERT1(location == f.getLocation()); B = f; } using InvertPar::setCoefC; void setCoefC(const Field2D &f) override { ASSERT1(localmesh == f.getMesh()); + ASSERT1(location == f.getLocation()); C = f; } using InvertPar::setCoefD; void setCoefD(const Field2D &f) override { ASSERT1(localmesh == f.getMesh()); + ASSERT1(location == f.getLocation()); D = f; } using InvertPar::setCoefE; void setCoefE(const Field2D &f) override { ASSERT1(localmesh == f.getMesh()); + ASSERT1(location == f.getLocation()); E = f; } private: - Field2D A, B, C, D, E; + Field2D A{0.0}, B{0.0}, C{0.0}, D{0.0}, E{0.0}; Field2D sg; // Coefficient of DDY contribution to Grad2_par2 int nsys; diff --git a/src/invert/parderiv/invert_parderiv.cxx b/src/invert/parderiv/invert_parderiv.cxx index 9a06c9449e..b8ee88309b 100644 --- a/src/invert/parderiv/invert_parderiv.cxx +++ b/src/invert/parderiv/invert_parderiv.cxx @@ -31,7 +31,7 @@ #include "parderiv_factory.hxx" InvertPar* InvertPar::Create(Mesh* mesh_in) { - return ParDerivFactory::getInstance()->createInvertPar(mesh_in); + return ParDerivFactory::getInstance()->createInvertPar(CELL_CENTRE, mesh_in); } const Field2D InvertPar::solve(const Field2D &f) { diff --git a/src/invert/parderiv/parderiv_factory.cxx b/src/invert/parderiv/parderiv_factory.cxx index bf9ff4fa42..c49e42c3bc 100644 --- a/src/invert/parderiv/parderiv_factory.cxx +++ b/src/invert/parderiv/parderiv_factory.cxx @@ -22,14 +22,15 @@ ParDerivFactory* ParDerivFactory::getInstance() { return instance; } -InvertPar* ParDerivFactory::createInvertPar(Mesh *mesh_in) { +InvertPar* ParDerivFactory::createInvertPar(CELL_LOC location, Mesh *mesh_in) { // Get the default options section Options *opt = Options::getRoot()->getSection(default_section); - return createInvertPar(opt, mesh_in); + return createInvertPar(opt, location, mesh_in); } -InvertPar* ParDerivFactory::createInvertPar(const char* type, Options *opt, Mesh *mesh_in) { +InvertPar* ParDerivFactory::createInvertPar(const char* type, Options *opt, + CELL_LOC location, Mesh *mesh_in) { int NPES; MPI_Comm_size(BoutComm::get(), &NPES); @@ -37,15 +38,16 @@ InvertPar* ParDerivFactory::createInvertPar(const char* type, Options *opt, Mesh opt = Options::getRoot()->getSection(default_section); if (!strcasecmp(type, PARDERIVCYCLIC)) { - return new InvertParCR(opt, mesh_in); + return new InvertParCR(opt, location, mesh_in); } throw BoutException("No such ParDeriv solver exists in this build, type: %s", type); } -InvertPar* ParDerivFactory::createInvertPar(Options *opts, Mesh *mesh_in) { +InvertPar* ParDerivFactory::createInvertPar(Options *opts, CELL_LOC location, + Mesh *mesh_in) { std::string type; opts->get("type", type, "cyclic"); - return createInvertPar(type.c_str(), opts, mesh_in); + return createInvertPar(type.c_str(), opts, location, mesh_in); } diff --git a/src/invert/parderiv/parderiv_factory.hxx b/src/invert/parderiv/parderiv_factory.hxx index 5701f5ef48..2f8dc89127 100644 --- a/src/invert/parderiv/parderiv_factory.hxx +++ b/src/invert/parderiv/parderiv_factory.hxx @@ -11,9 +11,13 @@ class ParDerivFactory { /// Return a pointer to the only instance static ParDerivFactory* getInstance(); - InvertPar* createInvertPar(Mesh* mesh_in = bout::globals::mesh); - InvertPar *createInvertPar(const char *type, Options *opt = nullptr, Mesh* mesh_in = bout::globals::mesh); - InvertPar* createInvertPar(Options *opts, Mesh* mesh_in = bout::globals::mesh); + InvertPar* createInvertPar(CELL_LOC location = CELL_CENTRE, + Mesh* mesh_in = bout::globals::mesh); + InvertPar *createInvertPar(const char *type, Options *opt = nullptr, + CELL_LOC location = CELL_CENTRE, + Mesh* mesh_in = bout::globals::mesh); + InvertPar* createInvertPar(Options *opts, CELL_LOC location = CELL_CENTRE, + Mesh* mesh_in = bout::globals::mesh); private: ParDerivFactory() {} // Prevent instantiation of this class static ParDerivFactory* instance; ///< The only instance of this class (Singleton) From cabf597cc78d3b49acf3e70944fd8f5420d7f7c4 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 24 Aug 2020 17:05:47 +0100 Subject: [PATCH 073/428] Test staggered location in test-invpar --- tests/integrated/test-invpar/data/BOUT.inp | 2 + tests/integrated/test-invpar/runtest | 44 +++++++++++--------- tests/integrated/test-invpar/test_invpar.cxx | 26 +++++++----- 3 files changed, 41 insertions(+), 31 deletions(-) diff --git a/tests/integrated/test-invpar/data/BOUT.inp b/tests/integrated/test-invpar/data/BOUT.inp index 4c33894219..72b5170394 100644 --- a/tests/integrated/test-invpar/data/BOUT.inp +++ b/tests/integrated/test-invpar/data/BOUT.inp @@ -13,6 +13,8 @@ Ballooning = false tol = 1e-10 [mesh] +staggergrids = true + nx = 12 ny = 12 diff --git a/tests/integrated/test-invpar/runtest b/tests/integrated/test-invpar/runtest index 476526d38c..b257e5532f 100755 --- a/tests/integrated/test-invpar/runtest +++ b/tests/integrated/test-invpar/runtest @@ -30,33 +30,37 @@ flags = [ "ecoef=-1", "'input=ballooning(exp(-y*y)*cos(z)*gauss(x,0.2))'", ] +locations = ["CELL_CENTRE", "CELL_XLOW", "CELL_YLOW", "CELL_ZLOW"] code = 0 # Return code for nproc in [1,2,4]: cmd = "./test_invpar" - + print(" %d processors...." % (nproc)) r = 0 for f in flags: - stdout.write("\tflags '"+f+"' ... ") - - shell("rm data/BOUT.dmp.* 2> err.log") - - # Run the case - s, out = launch_safe(cmd+" "+f, nproc=nproc, mthread=1, pipe=True) - - with open("run.log."+str(nproc)+"."+str(r), "w") as f: - f.write(out) - - r = r + 1 - - # Find out if it worked - allpassed = collect("allpassed", path="data", info=False) - if allpassed: - print("PASSED") - else: - print("FAILED") - code = 1 + for l in locations: + stdout.write("\tflags '"+f+"', location '"+l+"' ... ") + + shell("rm data/BOUT.dmp.* 2> err.log") + + # Run the case + s, out = launch_safe( + cmd+" "+f+" test_location="+l, nproc=nproc, mthread=1, pipe=True + ) + + with open("run.log."+str(nproc)+"."+str(r), "w") as outfile: + outfile.write(out) + + r = r + 1 + + # Find out if it worked + allpassed = collect("allpassed", path="data", info=False) + if allpassed: + print("PASSED") + else: + print("FAILED") + code = 1 if code == 0: print(" => All inversion tests passed") diff --git a/tests/integrated/test-invpar/test_invpar.cxx b/tests/integrated/test-invpar/test_invpar.cxx index 8f130ec475..a8909ff14a 100644 --- a/tests/integrated/test-invpar/test_invpar.cxx +++ b/tests/integrated/test-invpar/test_invpar.cxx @@ -8,13 +8,13 @@ #include #include #include +#include <../src/invert/parderiv/parderiv_factory.hxx> int main(int argc, char **argv) { // Initialise BOUT++, setting up mesh BoutInitialise(argc, argv); - InvertPar *inv = InvertPar::Create(); FieldFactory f(mesh); // Get options @@ -26,13 +26,16 @@ int main(int argc, char **argv) { options.get("dcoef", dcoef, "0.0"); options.get("ecoef", ecoef, "0.0"); options.get("input", func, "sin(2*y)*(1. + 0.2*exp(cos(z)))"); + auto location = CELL_LOCFromString(options["test_location"].withDefault("CELL_CENTRE")); BoutReal tol = options["tol"].withDefault(1e-10); - Field2D A = f.create2D(acoef); - Field2D B = f.create2D(bcoef); - Field2D C = f.create2D(ccoef); - Field2D D = f.create2D(dcoef); - Field2D E = f.create2D(ecoef); + InvertPar *inv = ParDerivFactory::getInstance()->createInvertPar(location); + + Field2D A = f.create2D(acoef, nullptr, nullptr, location); + Field2D B = f.create2D(bcoef, nullptr, nullptr, location); + Field2D C = f.create2D(ccoef, nullptr, nullptr, location); + Field2D D = f.create2D(dcoef, nullptr, nullptr, location); + Field2D E = f.create2D(ecoef, nullptr, nullptr, location); inv->setCoefA(A); inv->setCoefB(B); @@ -40,7 +43,7 @@ int main(int argc, char **argv) { inv->setCoefD(D); inv->setCoefE(E); - Field3D input = f.create3D(func); + Field3D input = f.create3D(func, nullptr, nullptr, location); Field3D result = inv->solve(input); mesh->communicate(result); @@ -49,12 +52,13 @@ int main(int argc, char **argv) { // Check the result int passed = 1; - for (int y = 2; y < mesh->LocalNy - 2; y++) { + for (int y = mesh->ystart; y < mesh->yend; y++) { for (int z = 0; z < mesh->LocalNz; z++) { - output.write("result: [%d,%d] : %e, %e, %e\n", y, z, input(2, y, z), - result(2, y, z), deriv(2, y, z)); - if (abs(input(2, y, z) - deriv(2, y, z)) > tol) + output.write("result: [%d,%d] : %e, %e, %e\n", y, z, input(mesh->xstart, y, z), + result(mesh->xstart, y, z), deriv(mesh->xstart, y, z)); + if (abs(input(mesh->xstart, y, z) - deriv(mesh->xstart, y, z)) > tol) { passed = 0; + } } } From 67f0f17aaf136011cd308680e1b6425c7c292615 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 24 Aug 2020 17:33:45 +0100 Subject: [PATCH 074/428] Test open field-line regions in test-invpar --- tests/integrated/test-invpar/runtest | 38 +++++++++++--------- tests/integrated/test-invpar/test_invpar.cxx | 8 ++++- 2 files changed, 28 insertions(+), 18 deletions(-) diff --git a/tests/integrated/test-invpar/runtest b/tests/integrated/test-invpar/runtest index b257e5532f..b59fbad293 100755 --- a/tests/integrated/test-invpar/runtest +++ b/tests/integrated/test-invpar/runtest @@ -30,7 +30,12 @@ flags = [ "ecoef=-1", "'input=ballooning(exp(-y*y)*cos(z)*gauss(x,0.2))'", ] + locations = ["CELL_CENTRE", "CELL_XLOW", "CELL_YLOW", "CELL_ZLOW"] +flags = [f + " test_location=" + l for f in flags for l in locations] + +regions = ["", " mesh:ixseps1=0 mesh:ixseps2=0"] +flags = [f + r for f in flags for r in regions] code = 0 # Return code for nproc in [1,2,4]: @@ -39,28 +44,27 @@ for nproc in [1,2,4]: print(" %d processors...." % (nproc)) r = 0 for f in flags: - for l in locations: - stdout.write("\tflags '"+f+"', location '"+l+"' ... ") + stdout.write("\tflags '"+f+"' ... ") - shell("rm data/BOUT.dmp.* 2> err.log") + shell("rm data/BOUT.dmp.* 2> err.log") - # Run the case - s, out = launch_safe( - cmd+" "+f+" test_location="+l, nproc=nproc, mthread=1, pipe=True - ) + # Run the case + s, out = launch_safe( + cmd+" "+f, nproc=nproc, mthread=1, pipe=True + ) - with open("run.log."+str(nproc)+"."+str(r), "w") as outfile: - outfile.write(out) + with open("run.log."+str(nproc)+"."+str(r), "w") as outfile: + outfile.write(out) - r = r + 1 + r = r + 1 - # Find out if it worked - allpassed = collect("allpassed", path="data", info=False) - if allpassed: - print("PASSED") - else: - print("FAILED") - code = 1 + # Find out if it worked + allpassed = collect("allpassed", path="data", info=False) + if allpassed: + print("PASSED") + else: + print("FAILED") + code = 1 if code == 0: print(" => All inversion tests passed") diff --git a/tests/integrated/test-invpar/test_invpar.cxx b/tests/integrated/test-invpar/test_invpar.cxx index a8909ff14a..1dfb086830 100644 --- a/tests/integrated/test-invpar/test_invpar.cxx +++ b/tests/integrated/test-invpar/test_invpar.cxx @@ -52,7 +52,13 @@ int main(int argc, char **argv) { // Check the result int passed = 1; - for (int y = mesh->ystart; y < mesh->yend; y++) { + int local_ystart = mesh->ystart; + if (location == CELL_YLOW) { + // Point at mesh->ystart in 'result' is set by the Neumann boundary condition, so may + // not agree with 'deriv' + local_ystart = mesh->ystart + 1; + } + for (int y = local_ystart; y < mesh->yend; y++) { for (int z = 0; z < mesh->LocalNz; z++) { output.write("result: [%d,%d] : %e, %e, %e\n", y, z, input(mesh->xstart, y, z), result(mesh->xstart, y, z), deriv(mesh->xstart, y, z)); From b3f4adf2e49507528142fbc90abd1e265336ccb5 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 24 Aug 2020 18:21:31 +0100 Subject: [PATCH 075/428] Move ParDerivFactory declaration into invert_parderiv.hxx Previously was in header file under src/, which made the factory class hard to import. The factory is needed to use staggered InvertPar objects. --- include/invert_parderiv.hxx | 16 ++++++++++++ src/invert/parderiv/invert_parderiv.cxx | 1 - src/invert/parderiv/parderiv_factory.cxx | 2 +- src/invert/parderiv/parderiv_factory.hxx | 26 -------------------- tests/integrated/test-invpar/test_invpar.cxx | 1 - 5 files changed, 17 insertions(+), 29 deletions(-) delete mode 100644 src/invert/parderiv/parderiv_factory.hxx diff --git a/include/invert_parderiv.hxx b/include/invert_parderiv.hxx index b1e48e5f15..cd2da8daab 100644 --- a/include/invert_parderiv.hxx +++ b/include/invert_parderiv.hxx @@ -138,6 +138,22 @@ protected: private: }; +class ParDerivFactory { + public: + /// Return a pointer to the only instance + static ParDerivFactory* getInstance(); + + InvertPar* createInvertPar(CELL_LOC location = CELL_CENTRE, + Mesh* mesh_in = bout::globals::mesh); + InvertPar *createInvertPar(const char *type, Options *opt = nullptr, + CELL_LOC location = CELL_CENTRE, + Mesh* mesh_in = bout::globals::mesh); + InvertPar* createInvertPar(Options *opts, CELL_LOC location = CELL_CENTRE, + Mesh* mesh_in = bout::globals::mesh); + private: + ParDerivFactory() {} // Prevent instantiation of this class + static ParDerivFactory* instance; ///< The only instance of this class (Singleton) +}; #endif // __INV_PAR_H__ diff --git a/src/invert/parderiv/invert_parderiv.cxx b/src/invert/parderiv/invert_parderiv.cxx index b8ee88309b..090d1c7a38 100644 --- a/src/invert/parderiv/invert_parderiv.cxx +++ b/src/invert/parderiv/invert_parderiv.cxx @@ -28,7 +28,6 @@ ************************************************************************/ #include -#include "parderiv_factory.hxx" InvertPar* InvertPar::Create(Mesh* mesh_in) { return ParDerivFactory::getInstance()->createInvertPar(CELL_CENTRE, mesh_in); diff --git a/src/invert/parderiv/parderiv_factory.cxx b/src/invert/parderiv/parderiv_factory.cxx index c49e42c3bc..b9ca731724 100644 --- a/src/invert/parderiv/parderiv_factory.cxx +++ b/src/invert/parderiv/parderiv_factory.cxx @@ -5,7 +5,7 @@ #include -#include "parderiv_factory.hxx" +#include #include "impls/cyclic/cyclic.hxx" diff --git a/src/invert/parderiv/parderiv_factory.hxx b/src/invert/parderiv/parderiv_factory.hxx deleted file mode 100644 index 2f8dc89127..0000000000 --- a/src/invert/parderiv/parderiv_factory.hxx +++ /dev/null @@ -1,26 +0,0 @@ -class ParDerivFactory; - -#ifndef __PARDERIV_FACTORY_H__ -#define __PARDERIV_FACTORY_H__ - -#include -#include - -class ParDerivFactory { - public: - /// Return a pointer to the only instance - static ParDerivFactory* getInstance(); - - InvertPar* createInvertPar(CELL_LOC location = CELL_CENTRE, - Mesh* mesh_in = bout::globals::mesh); - InvertPar *createInvertPar(const char *type, Options *opt = nullptr, - CELL_LOC location = CELL_CENTRE, - Mesh* mesh_in = bout::globals::mesh); - InvertPar* createInvertPar(Options *opts, CELL_LOC location = CELL_CENTRE, - Mesh* mesh_in = bout::globals::mesh); - private: - ParDerivFactory() {} // Prevent instantiation of this class - static ParDerivFactory* instance; ///< The only instance of this class (Singleton) -}; - -#endif // __PARDERIV_FACTORY_H__ diff --git a/tests/integrated/test-invpar/test_invpar.cxx b/tests/integrated/test-invpar/test_invpar.cxx index 1dfb086830..cb0275bf07 100644 --- a/tests/integrated/test-invpar/test_invpar.cxx +++ b/tests/integrated/test-invpar/test_invpar.cxx @@ -8,7 +8,6 @@ #include #include #include -#include <../src/invert/parderiv/parderiv_factory.hxx> int main(int argc, char **argv) { From 4e26dc3ca6bcb3b0124685f76ab019f67a9644de Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 25 Aug 2020 16:08:12 +0100 Subject: [PATCH 076/428] Remove deleted parderiv_factory.hxx from CMakeLists.txt --- CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3767c9b0c3..74ffebfad0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -203,7 +203,6 @@ set(BOUT_SOURCES ./src/invert/parderiv/impls/cyclic/cyclic.hxx ./src/invert/parderiv/invert_parderiv.cxx ./src/invert/parderiv/parderiv_factory.cxx - ./src/invert/parderiv/parderiv_factory.hxx ./src/mesh/boundary_factory.cxx ./src/mesh/boundary_region.cxx ./src/mesh/boundary_standard.cxx From c7ff5214610323eab13737ec135c3e5fad5ce273 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 29 Aug 2020 18:16:31 +0100 Subject: [PATCH 077/428] Set location when setting InvertPar coefficients from BoutReal --- include/invert_parderiv.hxx | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/include/invert_parderiv.hxx b/include/invert_parderiv.hxx index cd2da8daab..4e205cfc52 100644 --- a/include/invert_parderiv.hxx +++ b/include/invert_parderiv.hxx @@ -101,35 +101,55 @@ public: */ virtual void setCoefA(const Field2D &f) = 0; virtual void setCoefA(const Field3D &f) {setCoefA(DC(f));} - virtual void setCoefA(BoutReal f) {setCoefA(Field2D(f, localmesh));} + virtual void setCoefA(BoutReal f) { + auto A = Field2D(f, localmesh); + A.setLocation(location); + setCoefA(A); + } /*! * Set the Grad2_par2 coefficient B */ virtual void setCoefB(const Field2D &f) = 0; virtual void setCoefB(const Field3D &f) {setCoefB(DC(f));} - virtual void setCoefB(BoutReal f) {setCoefB(Field2D(f, localmesh));} + virtual void setCoefB(BoutReal f) { + auto B = Field2D(f, localmesh); + B.setLocation(location); + setCoefB(B); + } /*! * Set the D2DYDZ coefficient C */ virtual void setCoefC(const Field2D& f) = 0; virtual void setCoefC(const Field3D& f) { setCoefC(DC(f)); } - virtual void setCoefC(BoutReal f) { setCoefC(Field2D(f, localmesh)); } + virtual void setCoefC(BoutReal f) { + auto C = Field2D(f, localmesh); + C.setLocation(location); + setCoefC(C); + } /*! * Set the D2DZ2 coefficient D */ virtual void setCoefD(const Field2D& f) = 0; virtual void setCoefD(const Field3D& f) { setCoefD(DC(f)); } - virtual void setCoefD(BoutReal f) { setCoefD(Field2D(f, localmesh)); } + virtual void setCoefD(BoutReal f) { + auto D = Field2D(f, localmesh); + D.setLocation(location); + setCoefD(D); + } /*! * Set the DDY coefficient E */ virtual void setCoefE(const Field2D& f) = 0; virtual void setCoefE(const Field3D& f) { setCoefE(DC(f)); } - virtual void setCoefE(BoutReal f) { setCoefE(Field2D(f, localmesh)); } + virtual void setCoefE(BoutReal f) { + auto E = Field2D(f, localmesh); + E.setLocation(location); + setCoefE(E); + } protected: CELL_LOC location; From 1b4b1b7142471ed05bcd8a25adaac76366284bd3 Mon Sep 17 00:00:00 2001 From: David Date: Sat, 19 Sep 2020 21:06:38 +0200 Subject: [PATCH 078/428] Make example relocatable By not hard-coding BOUT_TOP it is easier to compile the examples with a different BOUT_TOP, and thus use them with a different, e.g. precompiled BOUT++ version. --- examples/2Dturbulence_multigrid/makefile | 2 +- examples/6field-simple/makefile | 2 +- examples/IMEX/advection-diffusion/makefile | 2 +- examples/IMEX/advection-reaction/makefile | 2 +- examples/IMEX/diffusion-nl/makefile | 2 +- examples/IMEX/drift-wave-constraint/makefile | 2 +- examples/IMEX/drift-wave/makefile | 2 +- examples/advdiff/makefile | 2 +- examples/advdiff2/makefile | 2 +- examples/backtrace/makefile | 2 +- examples/blob2d-laplacexz/makefile | 2 +- examples/blob2d/makefile | 2 +- examples/boundary-conditions/advection/makefile | 2 +- examples/bout_runners_example/makefile | 2 +- examples/conducting-wall-mode/makefile | 2 +- examples/conduction-snb/makefile | 2 +- examples/conduction/makefile | 2 +- examples/constraints/alfven-wave/makefile | 2 +- examples/constraints/laplace-dae/makefile | 2 +- examples/dalf3/makefile | 2 +- examples/eigen-box/makefile | 2 +- examples/elm-pb/makefile | 2 +- examples/em-drift/makefile | 2 +- examples/fci-wave-logn/makefile | 2 +- examples/fci-wave/makefile | 2 +- examples/finite-volume/diffusion/makefile | 2 +- examples/finite-volume/fluid/makefile | 2 +- examples/finite-volume/test/makefile | 2 +- examples/gas-compress/makefile | 2 +- examples/gravity_reduced/makefile | 2 +- examples/gyro-gem/makefile | 2 +- examples/hasegawa-wakatani/makefile | 2 +- examples/invertable_operator/makefile | 2 +- examples/jorek-compare/makefile | 2 +- examples/lapd-drift/makefile | 2 +- examples/laplacexy/alfven-wave/makefile | 2 +- examples/laplacexy/laplace_perp/makefile | 2 +- examples/laplacexy/simple/makefile | 2 +- examples/monitor-newapi/makefile | 2 +- examples/monitor/makefile | 2 +- examples/orszag-tang/makefile | 2 +- examples/performance/arithmetic/makefile | 2 +- examples/performance/arithmetic_3d2d/makefile | 2 +- examples/performance/bracket/makefile | 2 +- examples/performance/ddx/makefile | 2 +- examples/performance/ddy/makefile | 2 +- examples/performance/ddz/makefile | 2 +- examples/performance/iterator-offsets/makefile | 2 +- examples/performance/iterator/makefile | 2 +- examples/performance/laplace/makefile | 2 +- examples/performance/tuning_regionblocksize/makefile | 2 +- examples/preconditioning/wave/makefile | 2 +- examples/reconnect-2field/makefile | 2 +- examples/shear-alfven-wave/makefile | 2 +- examples/staggered_grid/makefile | 2 +- examples/subsampling/makefile | 2 +- examples/tokamak-2fluid/makefile | 2 +- examples/uedge-benchmark/makefile | 2 +- examples/wave-slab/makefile | 2 +- 59 files changed, 59 insertions(+), 59 deletions(-) diff --git a/examples/2Dturbulence_multigrid/makefile b/examples/2Dturbulence_multigrid/makefile index dc6f78e21f..f67cb0d1ba 100644 --- a/examples/2Dturbulence_multigrid/makefile +++ b/examples/2Dturbulence_multigrid/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../ +BOUT_TOP ?= ../../ SOURCEC = esel.cxx diff --git a/examples/6field-simple/makefile b/examples/6field-simple/makefile index e3ac7eee28..6f70c9d751 100644 --- a/examples/6field-simple/makefile +++ b/examples/6field-simple/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../ +BOUT_TOP ?= ../../ SOURCEC = elm_6f.cxx diff --git a/examples/IMEX/advection-diffusion/makefile b/examples/IMEX/advection-diffusion/makefile index 1b69656434..588242e834 100644 --- a/examples/IMEX/advection-diffusion/makefile +++ b/examples/IMEX/advection-diffusion/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = imex.cxx diff --git a/examples/IMEX/advection-reaction/makefile b/examples/IMEX/advection-reaction/makefile index 78a30da270..689fd0c614 100644 --- a/examples/IMEX/advection-reaction/makefile +++ b/examples/IMEX/advection-reaction/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = split_operator.cxx diff --git a/examples/IMEX/diffusion-nl/makefile b/examples/IMEX/diffusion-nl/makefile index 91349da18d..8dcd38cc0b 100644 --- a/examples/IMEX/diffusion-nl/makefile +++ b/examples/IMEX/diffusion-nl/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = diffusion-nl.cxx diff --git a/examples/IMEX/drift-wave-constraint/makefile b/examples/IMEX/drift-wave-constraint/makefile index 01e3c41894..fdf3abccc9 100644 --- a/examples/IMEX/drift-wave-constraint/makefile +++ b/examples/IMEX/drift-wave-constraint/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = test-drift.cxx diff --git a/examples/IMEX/drift-wave/makefile b/examples/IMEX/drift-wave/makefile index 01e3c41894..fdf3abccc9 100644 --- a/examples/IMEX/drift-wave/makefile +++ b/examples/IMEX/drift-wave/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = test-drift.cxx diff --git a/examples/advdiff/makefile b/examples/advdiff/makefile index 16edf55acb..15bffe5c29 100644 --- a/examples/advdiff/makefile +++ b/examples/advdiff/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = advdiff.cxx diff --git a/examples/advdiff2/makefile b/examples/advdiff2/makefile index ef6b8c32f4..5572e1e3e1 100644 --- a/examples/advdiff2/makefile +++ b/examples/advdiff2/makefile @@ -1,4 +1,4 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = globals.cxx init.cxx run.cxx diff --git a/examples/backtrace/makefile b/examples/backtrace/makefile index 2dc3ca2a68..cb5c651884 100644 --- a/examples/backtrace/makefile +++ b/examples/backtrace/makefile @@ -1,4 +1,4 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. TARGET=backtrace diff --git a/examples/blob2d-laplacexz/makefile b/examples/blob2d-laplacexz/makefile index 1ab6de5387..ca2f949a73 100644 --- a/examples/blob2d-laplacexz/makefile +++ b/examples/blob2d-laplacexz/makefile @@ -1,4 +1,4 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = blob2d.cxx diff --git a/examples/blob2d/makefile b/examples/blob2d/makefile index 1ab6de5387..ca2f949a73 100644 --- a/examples/blob2d/makefile +++ b/examples/blob2d/makefile @@ -1,4 +1,4 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = blob2d.cxx diff --git a/examples/boundary-conditions/advection/makefile b/examples/boundary-conditions/advection/makefile index e164e47499..962ca894c2 100644 --- a/examples/boundary-conditions/advection/makefile +++ b/examples/boundary-conditions/advection/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = advection.cxx diff --git a/examples/bout_runners_example/makefile b/examples/bout_runners_example/makefile index 076715e8ea..a3cdcc1c3f 100644 --- a/examples/bout_runners_example/makefile +++ b/examples/bout_runners_example/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = diffusion_3D.cxx diff --git a/examples/conducting-wall-mode/makefile b/examples/conducting-wall-mode/makefile index f1a53b9399..e3375e06d9 100644 --- a/examples/conducting-wall-mode/makefile +++ b/examples/conducting-wall-mode/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = cwm.cxx diff --git a/examples/conduction-snb/makefile b/examples/conduction-snb/makefile index 83941ca021..90ecf242d8 100644 --- a/examples/conduction-snb/makefile +++ b/examples/conduction-snb/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = conduction-snb.cxx diff --git a/examples/conduction/makefile b/examples/conduction/makefile index 9188f05e6e..26a2d2686e 100644 --- a/examples/conduction/makefile +++ b/examples/conduction/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = conduction.cxx diff --git a/examples/constraints/alfven-wave/makefile b/examples/constraints/alfven-wave/makefile index 9b3ad0460c..cc53facb8e 100644 --- a/examples/constraints/alfven-wave/makefile +++ b/examples/constraints/alfven-wave/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = alfven.cxx diff --git a/examples/constraints/laplace-dae/makefile b/examples/constraints/laplace-dae/makefile index 90b6da62c5..224a696106 100644 --- a/examples/constraints/laplace-dae/makefile +++ b/examples/constraints/laplace-dae/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = laplace_dae.cxx diff --git a/examples/dalf3/makefile b/examples/dalf3/makefile index 8fdd506cdb..3c9eed60c1 100644 --- a/examples/dalf3/makefile +++ b/examples/dalf3/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = dalf3.cxx diff --git a/examples/eigen-box/makefile b/examples/eigen-box/makefile index 61698bfff7..53855e725b 100644 --- a/examples/eigen-box/makefile +++ b/examples/eigen-box/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = eigen-box.cxx diff --git a/examples/elm-pb/makefile b/examples/elm-pb/makefile index f4ce0684e3..ea19cf6f83 100644 --- a/examples/elm-pb/makefile +++ b/examples/elm-pb/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../ +BOUT_TOP ?= ../../ SOURCEC = elm_pb.cxx diff --git a/examples/em-drift/makefile b/examples/em-drift/makefile index b469e9d9ac..e9c69ca443 100644 --- a/examples/em-drift/makefile +++ b/examples/em-drift/makefile @@ -1,4 +1,4 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = 2fluid.cxx diff --git a/examples/fci-wave-logn/makefile b/examples/fci-wave-logn/makefile index 6a96f13590..24c0928db5 100644 --- a/examples/fci-wave-logn/makefile +++ b/examples/fci-wave-logn/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = fci-wave.cxx diff --git a/examples/fci-wave/makefile b/examples/fci-wave/makefile index 6a96f13590..24c0928db5 100644 --- a/examples/fci-wave/makefile +++ b/examples/fci-wave/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = fci-wave.cxx diff --git a/examples/finite-volume/diffusion/makefile b/examples/finite-volume/diffusion/makefile index dccf12d3df..c633a109d9 100644 --- a/examples/finite-volume/diffusion/makefile +++ b/examples/finite-volume/diffusion/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = diffusion.cxx diff --git a/examples/finite-volume/fluid/makefile b/examples/finite-volume/fluid/makefile index 694958abf6..4effb80b45 100644 --- a/examples/finite-volume/fluid/makefile +++ b/examples/finite-volume/fluid/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = fluid.cxx diff --git a/examples/finite-volume/test/makefile b/examples/finite-volume/test/makefile index d4d7c2dcf9..96b5469333 100644 --- a/examples/finite-volume/test/makefile +++ b/examples/finite-volume/test/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = finite_volume.cxx diff --git a/examples/gas-compress/makefile b/examples/gas-compress/makefile index 3be499639f..029f9335a1 100644 --- a/examples/gas-compress/makefile +++ b/examples/gas-compress/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = gas_compress.cxx diff --git a/examples/gravity_reduced/makefile b/examples/gravity_reduced/makefile index db6ad4515d..eb5a62641a 100644 --- a/examples/gravity_reduced/makefile +++ b/examples/gravity_reduced/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = gravity_reduced.cxx diff --git a/examples/gyro-gem/makefile b/examples/gyro-gem/makefile index 8b5865c9bf..709cf2d5fa 100644 --- a/examples/gyro-gem/makefile +++ b/examples/gyro-gem/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = gem.cxx diff --git a/examples/hasegawa-wakatani/makefile b/examples/hasegawa-wakatani/makefile index 8f33b716b9..34395b1bf0 100644 --- a/examples/hasegawa-wakatani/makefile +++ b/examples/hasegawa-wakatani/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = hw.cxx diff --git a/examples/invertable_operator/makefile b/examples/invertable_operator/makefile index 12c82cc6c3..ff5ec8c809 100644 --- a/examples/invertable_operator/makefile +++ b/examples/invertable_operator/makefile @@ -1,4 +1,4 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = invertable_operator.cxx diff --git a/examples/jorek-compare/makefile b/examples/jorek-compare/makefile index 5a5c85b656..4d0898da1e 100644 --- a/examples/jorek-compare/makefile +++ b/examples/jorek-compare/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = jorek_compare.cxx diff --git a/examples/lapd-drift/makefile b/examples/lapd-drift/makefile index 308cb596b2..abb567e24b 100644 --- a/examples/lapd-drift/makefile +++ b/examples/lapd-drift/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = lapd_drift.cxx diff --git a/examples/laplacexy/alfven-wave/makefile b/examples/laplacexy/alfven-wave/makefile index 9b3ad0460c..cc53facb8e 100644 --- a/examples/laplacexy/alfven-wave/makefile +++ b/examples/laplacexy/alfven-wave/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = alfven.cxx diff --git a/examples/laplacexy/laplace_perp/makefile b/examples/laplacexy/laplace_perp/makefile index a89a691c53..76f2d7cc5b 100644 --- a/examples/laplacexy/laplace_perp/makefile +++ b/examples/laplacexy/laplace_perp/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = test.cxx diff --git a/examples/laplacexy/simple/makefile b/examples/laplacexy/simple/makefile index 5c7d991638..d3da2c0196 100644 --- a/examples/laplacexy/simple/makefile +++ b/examples/laplacexy/simple/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = test-laplacexy.cxx diff --git a/examples/monitor-newapi/makefile b/examples/monitor-newapi/makefile index c45b976d68..0ec013a133 100644 --- a/examples/monitor-newapi/makefile +++ b/examples/monitor-newapi/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = monitor.cxx diff --git a/examples/monitor/makefile b/examples/monitor/makefile index c45b976d68..0ec013a133 100644 --- a/examples/monitor/makefile +++ b/examples/monitor/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = monitor.cxx diff --git a/examples/orszag-tang/makefile b/examples/orszag-tang/makefile index f20fe9594f..9796b1c7a7 100644 --- a/examples/orszag-tang/makefile +++ b/examples/orszag-tang/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = mhd.cxx diff --git a/examples/performance/arithmetic/makefile b/examples/performance/arithmetic/makefile index 390fe5baa8..c9e6f1e69c 100644 --- a/examples/performance/arithmetic/makefile +++ b/examples/performance/arithmetic/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = arithmetic.cxx diff --git a/examples/performance/arithmetic_3d2d/makefile b/examples/performance/arithmetic_3d2d/makefile index fb80d64565..386ab242e6 100644 --- a/examples/performance/arithmetic_3d2d/makefile +++ b/examples/performance/arithmetic_3d2d/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = arithmetic_3d2d.cxx diff --git a/examples/performance/bracket/makefile b/examples/performance/bracket/makefile index 80b7a5b383..3559c27e2d 100644 --- a/examples/performance/bracket/makefile +++ b/examples/performance/bracket/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = bracket.cxx diff --git a/examples/performance/ddx/makefile b/examples/performance/ddx/makefile index d6aae3a42b..52f6ed7dc0 100644 --- a/examples/performance/ddx/makefile +++ b/examples/performance/ddx/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = ddx.cxx diff --git a/examples/performance/ddy/makefile b/examples/performance/ddy/makefile index 99df08f35d..57b502450a 100644 --- a/examples/performance/ddy/makefile +++ b/examples/performance/ddy/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = ddy.cxx diff --git a/examples/performance/ddz/makefile b/examples/performance/ddz/makefile index c389553de1..5f7aefa6cc 100644 --- a/examples/performance/ddz/makefile +++ b/examples/performance/ddz/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = ddz.cxx diff --git a/examples/performance/iterator-offsets/makefile b/examples/performance/iterator-offsets/makefile index c24d04a25e..94dac859f6 100644 --- a/examples/performance/iterator-offsets/makefile +++ b/examples/performance/iterator-offsets/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = iterator-offsets.cxx diff --git a/examples/performance/iterator/makefile b/examples/performance/iterator/makefile index 7087d48761..33a11d30be 100644 --- a/examples/performance/iterator/makefile +++ b/examples/performance/iterator/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = iterator.cxx diff --git a/examples/performance/laplace/makefile b/examples/performance/laplace/makefile index fa561d91da..3ff28a130e 100644 --- a/examples/performance/laplace/makefile +++ b/examples/performance/laplace/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = laplace.cxx diff --git a/examples/performance/tuning_regionblocksize/makefile b/examples/performance/tuning_regionblocksize/makefile index c912f27a82..6f906e3793 100644 --- a/examples/performance/tuning_regionblocksize/makefile +++ b/examples/performance/tuning_regionblocksize/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = tuning_regionblocksize.cxx diff --git a/examples/preconditioning/wave/makefile b/examples/preconditioning/wave/makefile index a40563b6cd..095c893d8e 100644 --- a/examples/preconditioning/wave/makefile +++ b/examples/preconditioning/wave/makefile @@ -1,4 +1,4 @@ -BOUT_TOP = ../../.. +BOUT_TOP ?= ../../.. SOURCEC = test_precon.cxx diff --git a/examples/reconnect-2field/makefile b/examples/reconnect-2field/makefile index 5cbb18867e..0b7745f5af 100644 --- a/examples/reconnect-2field/makefile +++ b/examples/reconnect-2field/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = 2field.cxx diff --git a/examples/shear-alfven-wave/makefile b/examples/shear-alfven-wave/makefile index 50630ea75d..92ac3ce50d 100644 --- a/examples/shear-alfven-wave/makefile +++ b/examples/shear-alfven-wave/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = 2fluid.cxx diff --git a/examples/staggered_grid/makefile b/examples/staggered_grid/makefile index 3c0009546e..e00eaecd74 100644 --- a/examples/staggered_grid/makefile +++ b/examples/staggered_grid/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = test_staggered.cxx diff --git a/examples/subsampling/makefile b/examples/subsampling/makefile index c45b976d68..0ec013a133 100644 --- a/examples/subsampling/makefile +++ b/examples/subsampling/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = monitor.cxx diff --git a/examples/tokamak-2fluid/makefile b/examples/tokamak-2fluid/makefile index 50630ea75d..92ac3ce50d 100644 --- a/examples/tokamak-2fluid/makefile +++ b/examples/tokamak-2fluid/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = 2fluid.cxx diff --git a/examples/uedge-benchmark/makefile b/examples/uedge-benchmark/makefile index b373f49070..4c8a72d394 100644 --- a/examples/uedge-benchmark/makefile +++ b/examples/uedge-benchmark/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = ue_bmark.cxx diff --git a/examples/wave-slab/makefile b/examples/wave-slab/makefile index 72a58f1fe7..b20fcf6c8e 100644 --- a/examples/wave-slab/makefile +++ b/examples/wave-slab/makefile @@ -1,5 +1,5 @@ -BOUT_TOP = ../.. +BOUT_TOP ?= ../.. SOURCEC = wave_slab.cxx From 38f8c21913bd8fcbf592017cb4074305b494340c Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 1 Nov 2020 23:10:49 +0000 Subject: [PATCH 079/428] Fix BOUT_ENUM_CLASS() macro Had mistakenly re-named some macros from bout/macro_for_each.hxx --- include/bout/bout_enum_class.hxx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/bout/bout_enum_class.hxx b/include/bout/bout_enum_class.hxx index ac45551d89..a3692f64bc 100644 --- a/include/bout/bout_enum_class.hxx +++ b/include/bout/bout_enum_class.hxx @@ -54,7 +54,7 @@ _call(enumname, x) _ec_expand_9(_call, enumname, __VA_ARGS__) #define BOUT_ENUM_CLASS_MAP_ARGS(mac, enumname, ...) \ - BOUT_GET_FOR_EACH_EXPANSION(__VA_ARGS__, \ + _GET_FOR_EACH_EXPANSION(__VA_ARGS__, \ _ec_expand_10, _ec_expand_9, _ec_expand_8, _ec_expand_7, \ _ec_expand_6, _ec_expand_5, _ec_expand_4, _ec_expand_3, \ _ec_expand_2, _ec_expand_1) \ @@ -73,7 +73,7 @@ enum class enumname { __VA_ARGS__ }; \ inline std::string toString(enumname e) { \ AUTO_TRACE(); \ const static std::map toString_map = { \ - BOUT_ENUM_CLASS_MAP_ARGS(_ENUM_CLASS_STR, enumname, __VA_ARGS__) \ + BOUT_ENUM_CLASS_MAP_ARGS(BOUT_ENUM_CLASS_STR, enumname, __VA_ARGS__) \ }; \ auto found = toString_map.find(e); \ if (found == toString_map.end()) { \ @@ -85,7 +85,7 @@ inline std::string toString(enumname e) { \ inline enumname BOUT_MAKE_FROMSTRING_NAME(enumname)(const std::string& s) { \ AUTO_TRACE(); \ const static std::map fromString_map = { \ - BOUT_ENUM_CLASS_MAP_ARGS(_STR_ENUM_CLASS, enumname, __VA_ARGS__) \ + BOUT_ENUM_CLASS_MAP_ARGS(BOUT_STR_ENUM_CLASS, enumname, __VA_ARGS__) \ }; \ auto found = fromString_map.find(s); \ if (found == fromString_map.end()) { \ From 65ae4c0be39156567979ec3151069354697a06bb Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 1 Nov 2020 21:36:08 +0000 Subject: [PATCH 080/428] Allow setting FFTW_EXHAUSTIVE May sometimes make FFTs faster than the previously available FFTW_MEASURE setting. --- include/fft.hxx | 5 +++ src/invert/fft_fftw.cxx | 74 +++++++++++++++++++++++++---------------- 2 files changed, 50 insertions(+), 29 deletions(-) diff --git a/include/fft.hxx b/include/fft.hxx index 876217dd1a..7e354e5e2e 100644 --- a/include/fft.hxx +++ b/include/fft.hxx @@ -30,9 +30,12 @@ #include "dcomplex.hxx" #include +#include class Options; +BOUT_ENUM_CLASS(FFT_FLAG, estimate, measure, exhaustive); + namespace bout { namespace fft { @@ -87,6 +90,8 @@ void DST_rev(dcomplex *in, int length, BoutReal *out); /// Should the FFT functions find and use an optimised plan? void fft_init(bool fft_measure); /// Should the FFT functions find and use an optimised plan? +void fft_init(FFT_FLAG fft_flag); +/// Should the FFT functions find and use an optimised plan? /// /// If \p options is not nullptr, it should contain a bool called /// "fftw_measure". If it is nullptr, use the global `Options` root diff --git a/src/invert/fft_fftw.cxx b/src/invert/fft_fftw.cxx index e5c4b5f532..732d88239f 100644 --- a/src/invert/fft_fftw.cxx +++ b/src/invert/fft_fftw.cxx @@ -50,7 +50,7 @@ namespace fft { /// Have we set fft_measure? bool fft_initialised{false}; /// Should FFTW find an optimised plan by measuring various plans? -bool fft_measure{false}; +FFT_FLAG fft_flag{FFT_FLAG::estimate}; void fft_init(Options* options) { if (fft_initialised) { @@ -59,13 +59,47 @@ void fft_init(Options* options) { if (options == nullptr) { options = Options::getRoot()->getSection("fft"); } - fft_init((*options)["fft_measure"] - .doc("Perform speed measurements to optimise settings?") - .withDefault(false)); + bool fft_measure = (*options)["fft_measure"] + .doc("Perform speed measurements to optimise settings?") + .withDefault(false); + fft_flag = (*options)["fft_flag"] + .doc("Level speed measurements to optimise FFT settings: [estimate], measure, exhaustive") + .withDefault(FFT_FLAG::estimate); + + if ((*options)["fft_measure"].isSet()) { + if ((*options)["fft_flag"].isSet()) { + throw BoutException("Cannot set both fft_measure and fft_flag"); + } + fft_init(fft_measure); + } else { + fft_init(fft_flag); + } +} + +unsigned int get_flags(FFT_FLAG fft_flag) { + switch (fft_flag) { + case FFT_FLAG::estimate: + return FFTW_ESTIMATE; + case FFT_FLAG::measure: + return FFTW_MEASURE; + case FFT_FLAG::exhaustive: + return FFTW_EXHAUSTIVE; + default: + throw BoutException("Error, unimplemented fft_flag"); + } +} + +void fft_init(FFT_FLAG fft_flag) { + bout::fft::fft_flag = fft_flag; + fft_initialised = true; } void fft_init(bool fft_measure) { - bout::fft::fft_measure = fft_measure; + if (fft_measure) { + fft_flag = FFT_FLAG::measure; + } else { + fft_flag = FFT_FLAG::estimate; + } fft_initialised = true; } @@ -106,10 +140,7 @@ void rfft(MAYBE_UNUSED(const BoutReal *in), MAYBE_UNUSED(int length), MAYBE_UNUS */ fout = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * (length/2 + 1)); - unsigned int flags = FFTW_ESTIMATE; - if (fft_measure) { - flags = FFTW_MEASURE; - } + auto flags = get_flags(fft_flag); /* fftw call * Plan a real-input/complex-output discrete Fourier transform (DFT) @@ -169,10 +200,7 @@ void irfft(MAYBE_UNUSED(const dcomplex *in), MAYBE_UNUSED(int length), MAYBE_UNU // Initialize the output of the fourier transformation fout = (double*) fftw_malloc(sizeof(double) * length); - unsigned int flags = FFTW_ESTIMATE; - if (fft_measure) { - flags = FFTW_MEASURE; - } + auto flags = get_flags(fft_flag); /* fftw call * Plan a complex-input/real-output discrete Fourier transform (DFT) @@ -242,10 +270,7 @@ void rfft(MAYBE_UNUSED(const BoutReal *in), MAYBE_UNUSED(int length), MAYBE_UNUS fftw_malloc(sizeof(fftw_complex) * (length / 2 + 1) * n_th)); p = new fftw_plan[n_th]; //Never freed - unsigned int flags = FFTW_ESTIMATE; - if (fft_measure) { - flags = FFTW_MEASURE; - } + auto flags = get_flags(fft_flag); for(int i=0;i(fftw_malloc(sizeof(double) * 2 * length)); fout = static_cast(fftw_malloc(sizeof(fftw_complex) * 2 * length)); - unsigned int flags = FFTW_ESTIMATE; - if (fft_measure) { - flags = FFTW_MEASURE; - } + auto flags = get_flags(fft_flag); // fftw call // Plan a real-input/complex-output discrete Fourier transform (DFT) @@ -436,10 +455,7 @@ void DST_rev(MAYBE_UNUSED(dcomplex *in), MAYBE_UNUSED(int length), MAYBE_UNUSED( static_cast(fftw_malloc(sizeof(fftw_complex) * 2 * (length - 1))); fout = static_cast(fftw_malloc(sizeof(double) * 2 * (length - 1))); - unsigned int flags = FFTW_ESTIMATE; - if (fft_measure) { - flags = FFTW_MEASURE; - } + auto flags = get_flags(fft_flag); p = fftw_plan_dft_c2r_1d(2*(length-1), fin, fout, flags); From 019780dd3ac03f8da483e0b92ae9078dfb4d6204 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 2 Nov 2020 20:31:58 +0000 Subject: [PATCH 081/428] Rename fft_flag to fft_measurement_flag There are other FFTW flags, so make naming more specific in case we ever want to use them. --- include/fft.hxx | 4 ++-- src/invert/fft_fftw.cxx | 44 ++++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/include/fft.hxx b/include/fft.hxx index 7e354e5e2e..e6f00c91aa 100644 --- a/include/fft.hxx +++ b/include/fft.hxx @@ -34,7 +34,7 @@ class Options; -BOUT_ENUM_CLASS(FFT_FLAG, estimate, measure, exhaustive); +BOUT_ENUM_CLASS(FFT_MEASUREMENT_FLAG, estimate, measure, exhaustive); namespace bout { namespace fft { @@ -90,7 +90,7 @@ void DST_rev(dcomplex *in, int length, BoutReal *out); /// Should the FFT functions find and use an optimised plan? void fft_init(bool fft_measure); /// Should the FFT functions find and use an optimised plan? -void fft_init(FFT_FLAG fft_flag); +void fft_init(FFT_MEASUREMENT_FLAG fft_flag); /// Should the FFT functions find and use an optimised plan? /// /// If \p options is not nullptr, it should contain a bool called diff --git a/src/invert/fft_fftw.cxx b/src/invert/fft_fftw.cxx index 732d88239f..b7b8679cb7 100644 --- a/src/invert/fft_fftw.cxx +++ b/src/invert/fft_fftw.cxx @@ -50,7 +50,7 @@ namespace fft { /// Have we set fft_measure? bool fft_initialised{false}; /// Should FFTW find an optimised plan by measuring various plans? -FFT_FLAG fft_flag{FFT_FLAG::estimate}; +FFT_MEASUREMENT_FLAG fft_measurement_flag{FFT_MEASUREMENT_FLAG::estimate}; void fft_init(Options* options) { if (fft_initialised) { @@ -62,43 +62,43 @@ void fft_init(Options* options) { bool fft_measure = (*options)["fft_measure"] .doc("Perform speed measurements to optimise settings?") .withDefault(false); - fft_flag = (*options)["fft_flag"] + fft_measurement_flag = (*options)["fft_measurement_flag"] .doc("Level speed measurements to optimise FFT settings: [estimate], measure, exhaustive") - .withDefault(FFT_FLAG::estimate); + .withDefault(FFT_MEASUREMENT_FLAG::estimate); if ((*options)["fft_measure"].isSet()) { - if ((*options)["fft_flag"].isSet()) { - throw BoutException("Cannot set both fft_measure and fft_flag"); + if ((*options)["fft_measurement_flag"].isSet()) { + throw BoutException("Cannot set both fft_measure and fft_measurement_flag"); } fft_init(fft_measure); } else { - fft_init(fft_flag); + fft_init(fft_measurement_flag); } } -unsigned int get_flags(FFT_FLAG fft_flag) { - switch (fft_flag) { - case FFT_FLAG::estimate: +unsigned int get_measurement_flag(FFT_MEASUREMENT_FLAG fft_measurement_flag) { + switch (fft_measurement_flag) { + case FFT_MEASUREMENT_FLAG::estimate: return FFTW_ESTIMATE; - case FFT_FLAG::measure: + case FFT_MEASUREMENT_FLAG::measure: return FFTW_MEASURE; - case FFT_FLAG::exhaustive: + case FFT_MEASUREMENT_FLAG::exhaustive: return FFTW_EXHAUSTIVE; default: - throw BoutException("Error, unimplemented fft_flag"); + throw BoutException("Error, unimplemented fft_measurement_flag"); } } -void fft_init(FFT_FLAG fft_flag) { - bout::fft::fft_flag = fft_flag; +void fft_init(FFT_MEASUREMENT_FLAG fft_measurement_flag) { + bout::fft::fft_measurement_flag = fft_measurement_flag; fft_initialised = true; } void fft_init(bool fft_measure) { if (fft_measure) { - fft_flag = FFT_FLAG::measure; + fft_measurement_flag = FFT_MEASUREMENT_FLAG::measure; } else { - fft_flag = FFT_FLAG::estimate; + fft_measurement_flag = FFT_MEASUREMENT_FLAG::estimate; } fft_initialised = true; } @@ -140,7 +140,7 @@ void rfft(MAYBE_UNUSED(const BoutReal *in), MAYBE_UNUSED(int length), MAYBE_UNUS */ fout = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * (length/2 + 1)); - auto flags = get_flags(fft_flag); + auto flags = get_measurement_flag(fft_measurement_flag); /* fftw call * Plan a real-input/complex-output discrete Fourier transform (DFT) @@ -200,7 +200,7 @@ void irfft(MAYBE_UNUSED(const dcomplex *in), MAYBE_UNUSED(int length), MAYBE_UNU // Initialize the output of the fourier transformation fout = (double*) fftw_malloc(sizeof(double) * length); - auto flags = get_flags(fft_flag); + auto flags = get_measurement_flag(fft_measurement_flag); /* fftw call * Plan a complex-input/real-output discrete Fourier transform (DFT) @@ -270,7 +270,7 @@ void rfft(MAYBE_UNUSED(const BoutReal *in), MAYBE_UNUSED(int length), MAYBE_UNUS fftw_malloc(sizeof(fftw_complex) * (length / 2 + 1) * n_th)); p = new fftw_plan[n_th]; //Never freed - auto flags = get_flags(fft_flag); + auto flags = get_measurement_flag(fft_measurement_flag); for(int i=0;i(fftw_malloc(sizeof(double) * 2 * length)); fout = static_cast(fftw_malloc(sizeof(fftw_complex) * 2 * length)); - auto flags = get_flags(fft_flag); + auto flags = get_measurement_flag(fft_measurement_flag); // fftw call // Plan a real-input/complex-output discrete Fourier transform (DFT) @@ -455,7 +455,7 @@ void DST_rev(MAYBE_UNUSED(dcomplex *in), MAYBE_UNUSED(int length), MAYBE_UNUSED( static_cast(fftw_malloc(sizeof(fftw_complex) * 2 * (length - 1))); fout = static_cast(fftw_malloc(sizeof(double) * 2 * (length - 1))); - auto flags = get_flags(fft_flag); + auto flags = get_measurement_flag(fft_measurement_flag); p = fftw_plan_dft_c2r_1d(2*(length-1), fin, fout, flags); From 0a0ecdcc60d729d05d1206cdb25702e16eed9f1b Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 2 Nov 2020 23:09:06 +0000 Subject: [PATCH 082/428] Deprecation warning for fft_measure option --- src/invert/fft_fftw.cxx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/invert/fft_fftw.cxx b/src/invert/fft_fftw.cxx index b7b8679cb7..f0ecefa8ea 100644 --- a/src/invert/fft_fftw.cxx +++ b/src/invert/fft_fftw.cxx @@ -67,6 +67,8 @@ void fft_init(Options* options) { .withDefault(FFT_MEASUREMENT_FLAG::estimate); if ((*options)["fft_measure"].isSet()) { + output << "WARNING: fft_measure is deprecated and will be removed in BOUT++ v5.0. " + << "Use fft_measurement_flag instead." << std::endl; if ((*options)["fft_measurement_flag"].isSet()) { throw BoutException("Cannot set both fft_measure and fft_measurement_flag"); } From c89aa9b09ca77bdbcd37e99540874c1e6db69a61 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 2 Nov 2020 19:00:24 +0000 Subject: [PATCH 083/428] Call checkData before returning result in Laplace inversions Helps pin down where invalid values came from if solution fails for some reason. Otherwise exception is thrown from where the solution is first used, which may be confusing. --- src/invert/laplace/impls/cyclic/cyclic_laplace.cxx | 6 ++++++ src/invert/laplace/impls/multigrid/multigrid_laplace.cxx | 2 -- src/invert/laplace/impls/naulin/naulin_laplace.cxx | 2 ++ src/invert/laplace/impls/pdd/pdd.cxx | 4 ++++ src/invert/laplace/impls/petsc/petsc_laplace.cxx | 2 ++ src/invert/laplace/impls/serial_band/serial_band.cxx | 2 ++ src/invert/laplace/impls/serial_tri/serial_tri.cxx | 2 ++ src/invert/laplace/impls/shoot/shoot_laplace.cxx | 2 ++ src/invert/laplace/impls/spt/spt.cxx | 2 ++ 9 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/invert/laplace/impls/cyclic/cyclic_laplace.cxx b/src/invert/laplace/impls/cyclic/cyclic_laplace.cxx index c2fc9802ca..af203d2ec5 100644 --- a/src/invert/laplace/impls/cyclic/cyclic_laplace.cxx +++ b/src/invert/laplace/impls/cyclic/cyclic_laplace.cxx @@ -250,6 +250,9 @@ FieldPerp LaplaceCyclic::solve(const FieldPerp& rhs, const FieldPerp& x0) { } } } + + checkData(x); + return x; } @@ -468,5 +471,8 @@ Field3D LaplaceCyclic::solve(const Field3D& rhs, const Field3D& x0) { } } } + + checkData(x); + return x; } diff --git a/src/invert/laplace/impls/multigrid/multigrid_laplace.cxx b/src/invert/laplace/impls/multigrid/multigrid_laplace.cxx index 15b52cdbdd..9098b2fcb3 100644 --- a/src/invert/laplace/impls/multigrid/multigrid_laplace.cxx +++ b/src/invert/laplace/impls/multigrid/multigrid_laplace.cxx @@ -542,9 +542,7 @@ BOUT_OMP(for) } } -#if CHECK > 2 checkData(result); -#endif return result; } diff --git a/src/invert/laplace/impls/naulin/naulin_laplace.cxx b/src/invert/laplace/impls/naulin/naulin_laplace.cxx index dcbe43e83b..b16c004fbf 100644 --- a/src/invert/laplace/impls/naulin/naulin_laplace.cxx +++ b/src/invert/laplace/impls/naulin/naulin_laplace.cxx @@ -329,6 +329,8 @@ Field3D LaplaceNaulin::solve(const Field3D& rhs, const Field3D& x0) { naulinsolver_mean_underrelax_counts = (naulinsolver_mean_underrelax_counts * BoutReal(ncalls - 1) + BoutReal(underrelax_count)) / BoutReal(ncalls); + checkData(b_x_pair.second); + return b_x_pair.second; } diff --git a/src/invert/laplace/impls/pdd/pdd.cxx b/src/invert/laplace/impls/pdd/pdd.cxx index 5a07ed4cde..9a5806a845 100644 --- a/src/invert/laplace/impls/pdd/pdd.cxx +++ b/src/invert/laplace/impls/pdd/pdd.cxx @@ -49,6 +49,8 @@ FieldPerp LaplacePDD::solve(const FieldPerp& b) { start(b, data); next(data); finish(data, x); + + checkData(x); return x; } @@ -97,6 +99,8 @@ Field3D LaplacePDD::solve(const Field3D& b) { x.setLocation(b.getLocation()); + checkData(x); + return x; } diff --git a/src/invert/laplace/impls/petsc/petsc_laplace.cxx b/src/invert/laplace/impls/petsc/petsc_laplace.cxx index 50e711e13a..2399520093 100644 --- a/src/invert/laplace/impls/petsc/petsc_laplace.cxx +++ b/src/invert/laplace/impls/petsc/petsc_laplace.cxx @@ -841,6 +841,8 @@ FieldPerp LaplacePetsc::solve(const FieldPerp& b, const FieldPerp& x0) { throw BoutException("Petsc index sanity check 2 failed"); } + checkData(sol); + // Return the solution return sol; } diff --git a/src/invert/laplace/impls/serial_band/serial_band.cxx b/src/invert/laplace/impls/serial_band/serial_band.cxx index 8af81a0edc..32ba521b6f 100644 --- a/src/invert/laplace/impls/serial_band/serial_band.cxx +++ b/src/invert/laplace/impls/serial_band/serial_band.cxx @@ -419,5 +419,7 @@ FieldPerp LaplaceSerialBand::solve(const FieldPerp& b, const FieldPerp& x0) { irfft(&xk(ix, 0), ncz, x[ix]); } + checkData(x); + return x; } diff --git a/src/invert/laplace/impls/serial_tri/serial_tri.cxx b/src/invert/laplace/impls/serial_tri/serial_tri.cxx index 12a1d0d410..6332415747 100644 --- a/src/invert/laplace/impls/serial_tri/serial_tri.cxx +++ b/src/invert/laplace/impls/serial_tri/serial_tri.cxx @@ -237,5 +237,7 @@ FieldPerp LaplaceSerialTri::solve(const FieldPerp& b, const FieldPerp& x0) { #endif } + checkData(x); + return x; // Result of the inversion } diff --git a/src/invert/laplace/impls/shoot/shoot_laplace.cxx b/src/invert/laplace/impls/shoot/shoot_laplace.cxx index b31a710455..38557021fd 100644 --- a/src/invert/laplace/impls/shoot/shoot_laplace.cxx +++ b/src/invert/laplace/impls/shoot/shoot_laplace.cxx @@ -171,6 +171,8 @@ FieldPerp LaplaceShoot::solve(const FieldPerp& rhs) { } } } + + checkData(x); return x; } diff --git a/src/invert/laplace/impls/spt/spt.cxx b/src/invert/laplace/impls/spt/spt.cxx index 1960d38928..2fac129057 100644 --- a/src/invert/laplace/impls/spt/spt.cxx +++ b/src/invert/laplace/impls/spt/spt.cxx @@ -108,6 +108,8 @@ FieldPerp LaplaceSPT::solve(const FieldPerp& b, const FieldPerp& x0) { }else start(b, slicedata); finish(slicedata, x); + + checkData(x); return x; } From 568a19485efe3a0f1f250fd3bacece988f718b91 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 18 Nov 2020 16:44:19 +0000 Subject: [PATCH 084/428] Check DataFile grid sizes match those in existing mesh --- src/fileio/datafile.cxx | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index c44afc01c8..6b373e090a 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -928,6 +928,33 @@ void Datafile::add(Vector3D &f, const char *name, bool save_repeat) { } } +namespace { +// Read a value from file and check it matches reference_value, throw if not +void checkGridValue(DataFormat* file, const std::string& name, + const std::string& filename, const int reference_value) { + int file_value; + if (!file->read(&file_value, name)) { + throw BoutException("Could not read {} from file '{}'", name, filename); + } + + if (file_value != reference_value) { + throw BoutException("{} ({}) in file '{}' does not match value in mesh ({})", name, + file_value, filename, reference_value); + } +} + +// Check that the array sizes in \p file match those in existing \p mesh +void checkFileGrid(DataFormat* file, const std::string& filename, const Mesh* mesh) { + checkGridValue(file, "MXG", filename, mesh->xstart); + checkGridValue(file, "MYG", filename, mesh->ystart); + checkGridValue(file, "MZG", filename, mesh->zstart); + // nx includes boundaries + checkGridValue(file, "nx", filename, mesh->GlobalNx); + checkGridValue(file, "ny", filename, mesh->GlobalNyNoBoundaries); + checkGridValue(file, "nz", filename, mesh->GlobalNzNoBoundaries); +} +} // namespace + bool Datafile::read() { Timer timer("io"); ///< Start timer. Stops when goes out of scope @@ -942,6 +969,8 @@ bool Datafile::read() { if(!file->is_valid()) throw BoutException("Datafile::read: File is not valid!"); + checkFileGrid(file.get(), filename, mesh); + file->setRecord(-1); // Read the latest record // Read integers From 4bd085fb927a901417de113fe56f8b90d6732a09 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Nov 2020 14:11:57 +0000 Subject: [PATCH 085/428] Fix backport of grid checks Address issues raised by @ZedThree in #2148. --- src/fileio/datafile.cxx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index 6b373e090a..42b40bcdba 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -934,11 +934,11 @@ void checkGridValue(DataFormat* file, const std::string& name, const std::string& filename, const int reference_value) { int file_value; if (!file->read(&file_value, name)) { - throw BoutException("Could not read {} from file '{}'", name, filename); + throw BoutException("Could not read %s from file '%s'", name, filename); } if (file_value != reference_value) { - throw BoutException("{} ({}) in file '{}' does not match value in mesh ({})", name, + throw BoutException("%s (%i) in file '%s' does not match value in mesh (%i)", name, file_value, filename, reference_value); } } @@ -950,8 +950,8 @@ void checkFileGrid(DataFormat* file, const std::string& filename, const Mesh* me checkGridValue(file, "MZG", filename, mesh->zstart); // nx includes boundaries checkGridValue(file, "nx", filename, mesh->GlobalNx); - checkGridValue(file, "ny", filename, mesh->GlobalNyNoBoundaries); - checkGridValue(file, "nz", filename, mesh->GlobalNzNoBoundaries); + checkGridValue(file, "ny", filename, mesh->GlobalNy); + checkGridValue(file, "nz", filename, mesh->LocalNz); } } // namespace From 0271fe90ff044317031ae624be3127cef595b3d6 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 21 Nov 2020 14:04:37 +0000 Subject: [PATCH 086/428] Bugfix for FieldPerp write in H5Format Only affects parallel=true case, which does not work anyway, so not a serious bug. --- src/fileio/impls/hdf5/h5_format.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fileio/impls/hdf5/h5_format.cxx b/src/fileio/impls/hdf5/h5_format.cxx index cbdd97d154..7a561c4030 100644 --- a/src/fileio/impls/hdf5/h5_format.cxx +++ b/src/fileio/impls/hdf5/h5_format.cxx @@ -348,9 +348,9 @@ bool H5Format::addVar(const std::string &name, bool repeat, hid_t write_hdf5_typ if (parallel) { init_size[0] = mesh->GlobalNx - 2 * mesh->xstart; if (datatype == "FieldPerp") { - init_size[1] = mesh->GlobalNy - 2 * mesh->ystart; - } else { init_size[1] = mesh->GlobalNz; + } else { + init_size[1] = mesh->GlobalNy - 2 * mesh->ystart; } init_size[2] = mesh->GlobalNz; } else { From 38bf53914b83e316b53306f992fcb82f9ad03b21 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 20 Nov 2020 21:28:46 +0000 Subject: [PATCH 087/428] Support I/O for std::vector in Datafile and DataFormat --- include/datafile.hxx | 4 ++ include/dataformat.hxx | 1 + src/fileio/datafile.cxx | 142 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 142 insertions(+), 5 deletions(-) diff --git a/include/datafile.hxx b/include/datafile.hxx index 084955a773..c7fae7b0e6 100644 --- a/include/datafile.hxx +++ b/include/datafile.hxx @@ -69,6 +69,7 @@ class Datafile { add(value, name.c_str(), false); } void add(int &i, const char *name, bool save_repeat = false); + void add(std::vector &ivec, const char *name, bool save_repeat = false); void add(BoutReal &r, const char *name, bool save_repeat = false); void add(bool &b, const char* name, bool save_repeat = false); void add(Field2D &f, const char *name, bool save_repeat = false); @@ -122,10 +123,12 @@ class Datafile { std::string name; ///< Name as it appears in the output file bool save_repeat; ///< If true, has a time dimension and is saved every time step bool covar; ///< For vectors, true if a covariant vector, false if contravariant + size_t size; ///< Size of a stored vector or string, to check it does not change after being added }; // one set per variable type std::vector> int_arr; + std::vector>> int_vec_arr; std::vector> BoutReal_arr; std::vector> bool_arr; std::vector> f2d_arr; @@ -139,6 +142,7 @@ class Datafile { bool read_fperp(const std::string &name, FieldPerp *f, bool save_repeat); bool write_int(const std::string &name, int *f, bool save_repeat); + bool write_int_vec(const std::string &name, std::vector *f, bool save_repeat); bool write_real(const std::string &name, BoutReal *f, bool save_repeat); bool write_f2d(const std::string &name, Field2D *f, bool save_repeat); bool write_f3d(const std::string &name, Field3D *f, bool save_repeat); diff --git a/include/dataformat.hxx b/include/dataformat.hxx index 54d683bd38..f9426ac8f5 100644 --- a/include/dataformat.hxx +++ b/include/dataformat.hxx @@ -77,6 +77,7 @@ class DataFormat { // Add a variable to the file virtual bool addVarInt(const std::string &name, bool repeat) = 0; + virtual bool addVarIntVec(const std::string &name, bool repeat, size_t size) = 0; virtual bool addVarBoutReal(const std::string &name, bool repeat) = 0; virtual bool addVarField2D(const std::string &name, bool repeat) = 0; virtual bool addVarField3D(const std::string &name, bool repeat) = 0; diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index c44afc01c8..360e410d72 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -79,10 +79,10 @@ Datafile::Datafile(Datafile &&other) noexcept shiftInput(other.shiftInput), flushFrequencyCounter(other.flushFrequencyCounter), flushFrequency(other.flushFrequency), file(std::move(other.file)), writable(other.writable), appending(other.appending), first_time(other.first_time), - int_arr(std::move(other.int_arr)), BoutReal_arr(std::move(other.BoutReal_arr)), - bool_arr(std::move(other.bool_arr)), f2d_arr(std::move(other.f2d_arr)), - f3d_arr(std::move(other.f3d_arr)), v2d_arr(std::move(other.v2d_arr)), - v3d_arr(std::move(other.v3d_arr)) { + int_arr(std::move(other.int_arr)), int_vec_arr(std::move(other.int_vec_arr)), + BoutReal_arr(std::move(other.BoutReal_arr)), bool_arr(std::move(other.bool_arr)), + f2d_arr(std::move(other.f2d_arr)), f3d_arr(std::move(other.f3d_arr)), + v2d_arr(std::move(other.v2d_arr)), v3d_arr(std::move(other.v3d_arr)) { filenamelen = other.filenamelen; filename = other.filename; other.filenamelen = 0; @@ -95,7 +95,7 @@ Datafile::Datafile(const Datafile &other) : floats(other.floats), openclose(other.openclose), Lx(other.Lx), Ly(other.Ly), Lz(other.Lz), enabled(other.enabled), shiftOutput(other.shiftOutput), shiftInput(other.shiftInput), flushFrequencyCounter(other.flushFrequencyCounter), flushFrequency(other.flushFrequency), file(nullptr), writable(other.writable), appending(other.appending), first_time(other.first_time), - int_arr(other.int_arr), BoutReal_arr(other.BoutReal_arr), + int_arr(other.int_arr), int_vec_arr(other.int_vec_arr), BoutReal_arr(other.BoutReal_arr), bool_arr(other.bool_arr), f2d_arr(other.f2d_arr), f3d_arr(other.f3d_arr), v2d_arr(other.v2d_arr), v3d_arr(other.v3d_arr) { @@ -123,6 +123,7 @@ Datafile& Datafile::operator=(Datafile &&rhs) noexcept { appending = rhs.appending; first_time = rhs.first_time; int_arr = std::move(rhs.int_arr); + int_vec_arr = std::move(rhs.int_vec_arr); BoutReal_arr = std::move(rhs.BoutReal_arr); bool_arr = std::move(rhs.bool_arr); f2d_arr = std::move(rhs.f2d_arr); @@ -224,6 +225,13 @@ bool Datafile::openw(const char *format, ...) { throw BoutException("Failed to add int variable %s to Datafile", var.name.c_str()); } } + + // Add vectors of integers + for(const auto& var : int_vec_arr) { + if (!file->addVarIntVec(var.name, var.save_repeat, var.ptr->size())) { + throw BoutException("Failed to add int vector variable %s to Datafile", var.name.c_str()); + } + } // Add BoutReals for(const auto& var : BoutReal_arr) { @@ -342,6 +350,13 @@ bool Datafile::opena(const char *format, ...) { } } + // Add vectors of integers + for(const auto& var : int_vec_arr) { + if (!file->addVarIntVec(var.name, var.save_repeat, var.ptr->size())) { + throw BoutException("Failed to add int vector variable %s to Datafile", var.name.c_str()); + } + } + // Add BoutReals for(const auto& var : BoutReal_arr) { if (!file->addVarBoutReal(var.name, var.save_repeat)) { @@ -497,6 +512,64 @@ void Datafile::add(int &i, const char *name, bool save_repeat) { } } +void Datafile::add(std::vector &i, const char *name, bool save_repeat) { + TRACE("DataFile::add(std::vector)"); + if (!enabled) + return; + if (varAdded(name)) { + // Check if it's the same variable + if (&i == varPtr(name)) { + output_warn.write("WARNING: variable '%s' already added to Datafile, skipping...\n", + name); + return; + } else { + throw BoutException("Variable with name '%s' already added to Datafile", name); + } + } + + VarStr> d; + + d.ptr = &i; + d.name = name; + d.save_repeat = save_repeat; + d.covar = false; + d.size = i.size(); + + int_vec_arr.push_back(d); + + if (writable) { + // Otherwise will add variables when Datafile is opened for writing/appending + if (openclose) { + // Open the file + // Check filename has been set + if (strcmp(filename, "") == 0) + throw BoutException("Datafile::add: Filename has not been set"); + if(!file->openw(filename, BoutComm::rank(), appending)) { + if (appending) { + throw BoutException("Datafile::add: Failed to open file %s for appending!", + filename); + } else { + throw BoutException("Datafile::add: Failed to open file %s for writing!", + filename); + } + } + appending = true; + } + + if(!file->is_valid()) + throw BoutException("Datafile::add: File is not valid!"); + + // Add variable to file + if (!file->addVarIntVec(name, save_repeat, i.size())) { + throw BoutException("Failed to add int vector variable %s to Datafile", name); + } + + if(openclose) { + file->close(); + } + } +} + void Datafile::add(BoutReal &r, const char *name, bool save_repeat) { TRACE("DataFile::add(BoutReal)"); if (!enabled) @@ -967,6 +1040,37 @@ bool Datafile::read() { } } + // Read vectors of integers + for(const auto& var : int_vec_arr) { + if (var.ptr->size() != var.size) { + throw BoutException("Size of std::vector '%s' has changed since being added " + "to Datafile. Cannot read.", var.name.c_str()); + } + if(var.save_repeat) { + if(!file->read_rec(&(*var.ptr)[0], var.name.c_str(), var.ptr->size())) { + if(!init_missing) { + throw BoutException( + "Missing data for %s in input. Set init_missing=true to create empty vector.", + var.name.c_str()); + } + output_warn.write("\tWARNING: Could not read integer vector %s. Creating empty vector\n", var.name.c_str()); + *(var.ptr) = {}; + continue; + } + } else { + if(!file->read(&(*var.ptr)[0], var.name.c_str(), var.ptr->size())) { + if(!init_missing) { + throw BoutException( + "Missing data for %s in input. Set init_missing=true to create empty vector.", + var.name.c_str()); + } + output_warn.write("\tWARNING: Could not read integer vector %s. Creating empty vector\n", var.name.c_str()); + *(var.ptr) = {}; + continue; + } + } + } + // Read BoutReals for(const auto& var : BoutReal_arr) { if(var.save_repeat) { @@ -1149,6 +1253,15 @@ bool Datafile::write() { write_int(var.name, var.ptr, var.save_repeat); } + // Write vectors of integers + for(const auto& var : int_vec_arr) { + if (var.ptr->size() != var.size) { + throw BoutException("Size of std::vector '%s' has changed since being added " + "to Datafile. Cannot write.", var.name.c_str()); + } + write_int_vec(var.name, var.ptr, var.save_repeat); + } + // Write BoutReals for(const auto& var : BoutReal_arr) { write_real(var.name, var.ptr, var.save_repeat); @@ -1499,6 +1612,14 @@ bool Datafile::write_int(const std::string &name, int *f, bool save_repeat) { } } +bool Datafile::write_int_vec(const std::string &name, std::vector *f, bool save_repeat) { + if(save_repeat) { + return file->write_rec(&(*f)[0], name, f->size()); + }else { + return file->write(&(*f)[0], name, f->size()); + } +} + bool Datafile::write_real(const std::string &name, BoutReal *f, bool save_repeat) { if(save_repeat) { return file->write_rec(f, name); @@ -1576,6 +1697,11 @@ bool Datafile::varAdded(const std::string &name) { return true; } + for(const auto& var : int_vec_arr ) { + if(name == var.name) + return true; + } + for(const auto& var : BoutReal_arr ) { if(name == var.name) return true; @@ -1620,6 +1746,12 @@ void *Datafile::varPtr(const std::string &name) { } } + for (const auto &var : int_vec_arr) { + if (name == var.name) { + return static_cast(var.ptr); + } + } + for (const auto &var : BoutReal_arr) { if (name == var.name) { return static_cast(var.ptr); From f35337e640b9db2bb2ae7e9f860f2817e3e27d36 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 20 Nov 2020 21:30:14 +0000 Subject: [PATCH 088/428] Support std::vector in Ncxx4 --- src/fileio/impls/netcdf4/ncxx4.cxx | 45 ++++++++++++++++++++++++++++++ src/fileio/impls/netcdf4/ncxx4.hxx | 1 + 2 files changed, 46 insertions(+) diff --git a/src/fileio/impls/netcdf4/ncxx4.cxx b/src/fileio/impls/netcdf4/ncxx4.cxx index 8e6598dc21..30a2cfa34c 100644 --- a/src/fileio/impls/netcdf4/ncxx4.cxx +++ b/src/fileio/impls/netcdf4/ncxx4.cxx @@ -350,6 +350,51 @@ bool Ncxx4::addVarInt(const string &name, bool repeat) { return true; } +bool Ncxx4::addVarIntVec(const string &name, bool repeat, size_t size) { + if(!is_valid()) + return false; + + NcVar var = dataFile->getVar(name); + const auto vec_dim_name = "vec" + std::to_string(size); + auto vec_dim = dataFile->getDim(vec_dim_name); + if (vec_dim.isNull()) { + vec_dim = dataFile->addDim(vec_dim_name, size); + } + if(var.isNull()) { + // Variable not in file, so add it. + if (repeat) { + auto dims = getRecDimVec(2); + dims[1] = vec_dim; + var = dataFile->addVar(name, ncInt, dims); + } else { + auto dims = getDimVec(1); + dims[0] = vec_dim; + var = dataFile->addVar(name, ncInt, dims); + } + + if(var.isNull()) { + output_error.write("ERROR: NetCDF could not add int '%s' to file '%s'\n", name.c_str(), fname); + return false; + } + } else { + // Check the existing variable is consistent with what's being added + if (repeat) { + ASSERT0(var.getDimCount() == 2); + if (var.getDim(1).getSize() != size) { + throw BoutException("Found existing variable '%s' with size %lu. Trying to add " + "with size %lu.", name.c_str(), var.getDim(1).getSize(), size); + } + } else { + ASSERT0(var.getDimCount() == 1); + if (var.getDim(0).getSize() != size) { + throw BoutException("Found existing variable '%s' with size %lu. Trying to add " + "with size %lu.", name.c_str(), var.getDim(0).getSize(), size); + } + } + } + return true; +} + bool Ncxx4::addVarBoutReal(const string &name, bool repeat) { if(!is_valid()) return false; diff --git a/src/fileio/impls/netcdf4/ncxx4.hxx b/src/fileio/impls/netcdf4/ncxx4.hxx index 7919b621ed..293de2ef08 100644 --- a/src/fileio/impls/netcdf4/ncxx4.hxx +++ b/src/fileio/impls/netcdf4/ncxx4.hxx @@ -88,6 +88,7 @@ class Ncxx4 : public DataFormat { // Add a variable to the file bool addVarInt(const std::string &name, bool repeat) override; + bool addVarIntVec(const std::string &name, bool repeat, size_t size) override; bool addVarBoutReal(const std::string &name, bool repeat) override; bool addVarField2D(const std::string &name, bool repeat) override; bool addVarField3D(const std::string &name, bool repeat) override; From 1339d05a99c588be682b32e3c270f6f21c4deee3 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 20 Nov 2020 21:31:23 +0000 Subject: [PATCH 089/428] Support std::vector in NcFormat --- src/fileio/impls/netcdf/nc_format.cxx | 50 +++++++++++++++++++++++++++ src/fileio/impls/netcdf/nc_format.hxx | 1 + 2 files changed, 51 insertions(+) diff --git a/src/fileio/impls/netcdf/nc_format.cxx b/src/fileio/impls/netcdf/nc_format.cxx index f84611a4e2..fc7d66ad86 100644 --- a/src/fileio/impls/netcdf/nc_format.cxx +++ b/src/fileio/impls/netcdf/nc_format.cxx @@ -390,6 +390,56 @@ bool NcFormat::addVarInt(const string &name, bool repeat) { return true; } +bool NcFormat::addVarIntVec(const string &name, bool repeat, size_t size) { + if(!is_valid()) + return false; + + // Create an error object so netCDF doesn't exit +#ifdef NCDF_VERBOSE + NcError err(NcError::verbose_nonfatal); +#else + NcError err(NcError::silent_nonfatal); +#endif + + NcVar* var; + if (!(var = dataFile->get_var(name.c_str()))) { + // Variable not in file, so add it. + NcDim* vecDim; + auto vecDimName = "vec" + std::to_string(size); + if (!(vecDim = dataFile->get_dim(vecDimName.c_str()))) { + vecDim = dataFile->add_dim(vecDimName.c_str(), int(size)); + } + if (repeat) { + std::vector local_dim_list{recDimList[0], vecDim}; + var = dataFile->add_var(name.c_str(), ncInt, 2, &local_dim_list[0]); + } else { + std::vector local_dim_list{vecDim}; + var = dataFile->add_var(name.c_str(), ncInt, 1, &local_dim_list[0]); + } + + if(!var->is_valid()) { + output_error.write("ERROR: NetCDF could not add int '%s' to file '%s'\n", name.c_str(), fname); + return false; + } + } else { + // Check the existing variable is consistent with what's being added + if (repeat) { + ASSERT0(var->num_dims() == 2); + if (size_t(var->get_dim(1)->size()) != size) { + throw BoutException("Found existing variable '%s' with size %lu. Trying to add " + "with size %lu.", name.c_str(), var->get_dim(1)->size(), size); + } + } else { + ASSERT0(var->num_dims() == 1); + if (size_t(var->get_dim(0)->size()) != size) { + throw BoutException("Found existing variable '%s' with size %lu. Trying to add " + "with size %lu.", name.c_str(), var->get_dim(0)->size(), size); + } + } + } + return true; +} + bool NcFormat::addVarBoutReal(const string &name, bool repeat) { if(!is_valid()) return false; diff --git a/src/fileio/impls/netcdf/nc_format.hxx b/src/fileio/impls/netcdf/nc_format.hxx index f8592eb599..2bc962b07c 100644 --- a/src/fileio/impls/netcdf/nc_format.hxx +++ b/src/fileio/impls/netcdf/nc_format.hxx @@ -87,6 +87,7 @@ class NcFormat : public DataFormat { // Add a variable to the file bool addVarInt(const std::string &name, bool repeat) override; + bool addVarIntVec(const std::string &name, bool repeat, size_t size) override; bool addVarBoutReal(const std::string &name, bool repeat) override; bool addVarField2D(const std::string &name, bool repeat) override; bool addVarField3D(const std::string &name, bool repeat) override; From 891b36f7b8a3c4c1d4ca532a19e4ef4134cafa65 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 20 Nov 2020 21:32:18 +0000 Subject: [PATCH 090/428] Support std::vector in H5Format --- src/fileio/impls/hdf5/h5_format.cxx | 39 ++++++++++++++++------------- src/fileio/impls/hdf5/h5_format.hxx | 4 ++- 2 files changed, 25 insertions(+), 18 deletions(-) diff --git a/src/fileio/impls/hdf5/h5_format.cxx b/src/fileio/impls/hdf5/h5_format.cxx index 7a561c4030..323174f89e 100644 --- a/src/fileio/impls/hdf5/h5_format.cxx +++ b/src/fileio/impls/hdf5/h5_format.cxx @@ -249,7 +249,7 @@ bool H5Format::setRecord(int t) { // Add a variable to the file bool H5Format::addVar(const std::string &name, bool repeat, hid_t write_hdf5_type, - std::string datatype) { + std::string datatype, int lx, int ly, int lz) { hid_t dataSet = H5Dopen(dataFile, name.c_str(), H5P_DEFAULT); if (dataSet >= 0) { // >=0 means variable already exists, so return. if (H5Dclose(dataSet) < 0) @@ -259,6 +259,7 @@ bool H5Format::addVar(const std::string &name, bool repeat, hid_t write_hdf5_typ int nd = 0; if (datatype == "scalar") nd = 0; + else if (datatype == "vector") nd = 1; else if (datatype == "FieldX") nd = 1; else if (datatype == "Field2D") nd = 2; else if (datatype == "FieldPerp") nd = 2; @@ -273,23 +274,23 @@ bool H5Format::addVar(const std::string &name, bool repeat, hid_t write_hdf5_typ hsize_t init_size[4]; if (parallel) { init_size[0]=0; - init_size[1]=mesh->GlobalNx-2*mesh->xstart; + init_size[1] = lx == 0 ? mesh->GlobalNx-2*mesh->xstart : lx; if (datatype == "FieldPerp_t") { - init_size[2]=mesh->GlobalNz; + init_size[2] = lz == 0 ? mesh->GlobalNz : lz; } else { - init_size[2]=mesh->GlobalNy-2*mesh->ystart; + init_size[2] = ly == 0 ? mesh->GlobalNy - 2*mesh->ystart : ly; } - init_size[3]=mesh->GlobalNz; + init_size[3] = lz == 0 ? mesh->GlobalNz : lz; } else { init_size[0]=0; - init_size[1]=mesh->LocalNx; + init_size[1] = lx == 0 ? mesh->LocalNx : lx; if (datatype == "FieldPerp_t") { - init_size[2]=mesh->LocalNz; + init_size[2] = lz == 0 ? mesh->LocalNz : lz; } else { - init_size[2]=mesh->LocalNy; + init_size[2] = ly == 0 ? mesh->LocalNy : ly; } - init_size[3]=mesh->LocalNz; + init_size[3] = lz == 0 ? mesh->LocalNz : lz; } // Modify dataset creation properties, i.e. enable chunking. @@ -346,21 +347,21 @@ bool H5Format::addVar(const std::string &name, bool repeat, hid_t write_hdf5_typ // Negative value indicates error, i.e. file does not exist, so create: hsize_t init_size[3]; if (parallel) { - init_size[0] = mesh->GlobalNx - 2 * mesh->xstart; + init_size[0] = lx == 0 ? mesh->GlobalNx - 2 * mesh->xstart : lx; if (datatype == "FieldPerp") { - init_size[1] = mesh->GlobalNz; + init_size[1] = lz == 0 ? mesh->GlobalNz : lz; } else { - init_size[1] = mesh->GlobalNy - 2 * mesh->ystart; + init_size[1] = ly == 0 ? mesh->GlobalNy - 2 * mesh->ystart : ly; } - init_size[2] = mesh->GlobalNz; + init_size[2] = lz == 0 ? mesh->GlobalNz : lz; } else { - init_size[0] = mesh->LocalNx; + init_size[0] = lx == 0 ? mesh->LocalNx : lx; if (datatype == "FieldPerp") { - init_size[1] = mesh->LocalNz; + init_size[1] = lz == 0 ? mesh->LocalNz : lz; } else { - init_size[1] = mesh->LocalNy; + init_size[1] = ly == 0 ? mesh->LocalNy : ly; } - init_size[2] = mesh->LocalNz; + init_size[2] = lz == 0 ? mesh->LocalNz : lz; } // Create value for attribute to say what kind of field this is @@ -392,6 +393,10 @@ bool H5Format::addVarInt(const std::string &name, bool repeat) { return addVar(name, repeat, H5T_NATIVE_INT, "scalar"); } +bool H5Format::addVarIntVec(const std::string &name, bool repeat, size_t size) { + return addVar(name, repeat, H5T_NATIVE_INT, "vector", size); +} + bool H5Format::addVarBoutReal(const std::string &name, bool repeat) { auto h5_float_type = lowPrecision ? H5T_NATIVE_FLOAT : H5T_NATIVE_DOUBLE; return addVar(name, repeat, h5_float_type, "scalar"); diff --git a/src/fileio/impls/hdf5/h5_format.hxx b/src/fileio/impls/hdf5/h5_format.hxx index df037e2e50..a321bb1e9b 100644 --- a/src/fileio/impls/hdf5/h5_format.hxx +++ b/src/fileio/impls/hdf5/h5_format.hxx @@ -83,6 +83,7 @@ class H5Format : public DataFormat { // Add a variable to the file bool addVarInt(const std::string &name, bool repeat) override; + bool addVarIntVec(const std::string &name, bool repeat, size_t size) override; bool addVarBoutReal(const std::string &name, bool repeat) override; bool addVarField2D(const std::string &name, bool repeat) override; bool addVarField3D(const std::string &name, bool repeat) override; @@ -146,7 +147,8 @@ class H5Format : public DataFormat { hsize_t chunk_length; - bool addVar(const std::string &name, bool repeat, hid_t write_hdf5_type, std::string datatype); + bool addVar(const std::string &name, bool repeat, hid_t write_hdf5_type, std::string datatype, + int lx = 0, int ly = 0, int lz = 0); bool read(void *var, hid_t hdf5_type, const char *name, int lx = 1, int ly = 0, int lz = 0); bool write(void *var, hid_t mem_hdf5_type, const char *name, int lx = 0, int ly = 0, int lz = 0); bool read_rec(void *var, hid_t hdf5_type, const char *name, int lx = 1, int ly = 0, int lz = 0); From 842c631d09c0e31474854e318be43d9e52868559 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 21 Nov 2020 14:43:37 +0000 Subject: [PATCH 091/428] Fix writing array of int in Ncxx4 DataFormat implementation Previously, although the method had lx, ly, lz arguments, the 'counts' vector in write_rec(int*, ...) was hard-coded to length 1. This only allows writing a (time-dependent) scalar int variable. 'counts' should be length 4, with shape (1, lx, ly, lz). --- src/fileio/impls/netcdf4/ncxx4.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fileio/impls/netcdf4/ncxx4.cxx b/src/fileio/impls/netcdf4/ncxx4.cxx index 30a2cfa34c..6d55bc90d0 100644 --- a/src/fileio/impls/netcdf4/ncxx4.cxx +++ b/src/fileio/impls/netcdf4/ncxx4.cxx @@ -858,8 +858,8 @@ bool Ncxx4::write_rec(int *data, const char *name, int lx, int ly, int lz) { std::vector start(1); start[0] = rec_nr[name]; - std::vector counts(1); - counts[0] = 1; + std::vector counts(4); + counts[0] = 1; counts[1] = lx; counts[2] = ly; counts[3] = lz; #ifdef NCDF_VERBOSE output.write("Ncxx4:: write_rec { Writing variable } \n"); From a3c93f6b5349039ca32ce189fed74bfc467eb9d3 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 22 Nov 2020 14:43:04 +0000 Subject: [PATCH 092/428] Fix 'start' vector in Ncxx4::write_rec(int*,...) --- src/fileio/impls/netcdf4/ncxx4.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fileio/impls/netcdf4/ncxx4.cxx b/src/fileio/impls/netcdf4/ncxx4.cxx index 6d55bc90d0..6282a61067 100644 --- a/src/fileio/impls/netcdf4/ncxx4.cxx +++ b/src/fileio/impls/netcdf4/ncxx4.cxx @@ -856,8 +856,8 @@ bool Ncxx4::write_rec(int *data, const char *name, int lx, int ly, int lz) { } } - std::vector start(1); - start[0] = rec_nr[name]; + std::vector start(4); + start[0] = rec_nr[name]; start[1] = x0; start[2] = y0; start[3] = z0; std::vector counts(4); counts[0] = 1; counts[1] = lx; counts[2] = ly; counts[3] = lz; From c41afe9337716aae3827e466b78c9a1d2ef58334 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 22 Nov 2020 15:09:38 +0000 Subject: [PATCH 093/428] Support std::vector in Datafile --- include/datafile.hxx | 3 + include/dataformat.hxx | 9 + src/fileio/datafile.cxx | 126 ++++++++++++++ src/fileio/impls/hdf5/h5_format.cxx | 36 ++++ src/fileio/impls/hdf5/h5_format.hxx | 9 + src/fileio/impls/netcdf/nc_format.cxx | 231 ++++++++++++++++++++++++++ src/fileio/impls/netcdf/nc_format.hxx | 9 + src/fileio/impls/netcdf4/ncxx4.cxx | 182 ++++++++++++++++++++ src/fileio/impls/netcdf4/ncxx4.hxx | 9 + 9 files changed, 614 insertions(+) diff --git a/include/datafile.hxx b/include/datafile.hxx index c7fae7b0e6..d0c0df4ccb 100644 --- a/include/datafile.hxx +++ b/include/datafile.hxx @@ -70,6 +70,7 @@ class Datafile { } void add(int &i, const char *name, bool save_repeat = false); void add(std::vector &ivec, const char *name, bool save_repeat = false); + void add(std::vector &cvec, const char *name, bool save_repeat = false); void add(BoutReal &r, const char *name, bool save_repeat = false); void add(bool &b, const char* name, bool save_repeat = false); void add(Field2D &f, const char *name, bool save_repeat = false); @@ -129,6 +130,7 @@ class Datafile { // one set per variable type std::vector> int_arr; std::vector>> int_vec_arr; + std::vector>> char_vec_arr; std::vector> BoutReal_arr; std::vector> bool_arr; std::vector> f2d_arr; @@ -143,6 +145,7 @@ class Datafile { bool write_int(const std::string &name, int *f, bool save_repeat); bool write_int_vec(const std::string &name, std::vector *f, bool save_repeat); + bool write_char_vec(const std::string &name, std::vector *f, bool save_repeat); bool write_real(const std::string &name, BoutReal *f, bool save_repeat); bool write_f2d(const std::string &name, Field2D *f, bool save_repeat); bool write_f3d(const std::string &name, Field3D *f, bool save_repeat); diff --git a/include/dataformat.hxx b/include/dataformat.hxx index f9426ac8f5..14ac18c2ec 100644 --- a/include/dataformat.hxx +++ b/include/dataformat.hxx @@ -78,6 +78,7 @@ class DataFormat { // Add a variable to the file virtual bool addVarInt(const std::string &name, bool repeat) = 0; virtual bool addVarIntVec(const std::string &name, bool repeat, size_t size) = 0; + virtual bool addVarCharVec(const std::string &name, bool repeat, size_t size) = 0; virtual bool addVarBoutReal(const std::string &name, bool repeat) = 0; virtual bool addVarField2D(const std::string &name, bool repeat) = 0; virtual bool addVarField3D(const std::string &name, bool repeat) = 0; @@ -87,12 +88,16 @@ class DataFormat { virtual bool read(int *var, const char *name, int lx = 1, int ly = 0, int lz = 0) = 0; virtual bool read(int *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) = 0; + virtual bool read(char *var, const char *name, int n = 1) = 0; + virtual bool read(char *var, const std::string &name, int n = 1) = 0; virtual bool read(BoutReal *var, const char *name, int lx = 1, int ly = 0, int lz = 0) = 0; virtual bool read(BoutReal *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) = 0; virtual bool read_perp(BoutReal *var, const std::string &name, int lx = 1, int lz = 0) = 0; virtual bool write(int *var, const char *name, int lx = 0, int ly = 0, int lz = 0) = 0; virtual bool write(int *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) = 0; + virtual bool write(char *var, const char *name, int n = 1) = 0; + virtual bool write(char *var, const std::string &name, int n = 1) = 0; virtual bool write(BoutReal *var, const char *name, int lx = 0, int ly = 0, int lz = 0) = 0; virtual bool write(BoutReal *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) = 0; virtual bool write_perp(BoutReal *var, const std::string &name, int lx = 0, int lz = 0) = 0; @@ -101,12 +106,16 @@ class DataFormat { virtual bool read_rec(int *var, const char *name, int lx = 1, int ly = 0, int lz = 0) = 0; virtual bool read_rec(int *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) = 0; + virtual bool read_rec(char *var, const char *name, int n = 1) = 0; + virtual bool read_rec(char *var, const std::string &name, int n = 1) = 0; virtual bool read_rec(BoutReal *var, const char *name, int lx = 1, int ly = 0, int lz = 0) = 0; virtual bool read_rec(BoutReal *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) = 0; virtual bool read_rec_perp(BoutReal *var, const std::string &name, int lx = 1, int lz = 0) = 0; virtual bool write_rec(int *var, const char *name, int lx = 0, int ly = 0, int lz = 0) = 0; virtual bool write_rec(int *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) = 0; + virtual bool write_rec(char *var, const char *name, int n = 1) = 0; + virtual bool write_rec(char *var, const std::string &name, int n = 1) = 0; virtual bool write_rec(BoutReal *var, const char *name, int lx = 0, int ly = 0, int lz = 0) = 0; virtual bool write_rec(BoutReal *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) = 0; virtual bool write_rec_perp(BoutReal *var, const std::string &name, int lx = 0, int lz = 0) = 0; diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index 360e410d72..b7c1cd30db 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -233,6 +233,13 @@ bool Datafile::openw(const char *format, ...) { } } + // Add vectors of chars + for(const auto& var : char_vec_arr) { + if (!file->addVarCharVec(var.name, var.save_repeat, var.ptr->size())) { + throw BoutException("Failed to add char vector variable %s to Datafile", var.name.c_str()); + } + } + // Add BoutReals for(const auto& var : BoutReal_arr) { if (!file->addVarBoutReal(var.name, var.save_repeat)) { @@ -357,6 +364,13 @@ bool Datafile::opena(const char *format, ...) { } } + // Add vectors of chars + for(const auto& var : char_vec_arr) { + if (!file->addVarCharVec(var.name, var.save_repeat, var.ptr->size())) { + throw BoutException("Failed to add char vector variable %s to Datafile", var.name.c_str()); + } + } + // Add BoutReals for(const auto& var : BoutReal_arr) { if (!file->addVarBoutReal(var.name, var.save_repeat)) { @@ -570,6 +584,66 @@ void Datafile::add(std::vector &i, const char *name, bool save_repeat) { } } +void Datafile::add(std::vector &cvec, const char *name, bool save_repeat) { + TRACE("DataFile::add(std::vector)"); + if (!enabled) { + return; + } + if (varAdded(name)) { + // Check if it's the same variable + if (&cvec == varPtr(name)) { + output_warn.write("WARNING: variable '%s' already added to Datafile, skipping...\n", + name); + return; + } else { + throw BoutException("Variable with name '%s' already added to Datafile", name); + } + } + + VarStr> d; + + d.ptr = &cvec; + d.name = name; + d.save_repeat = save_repeat; + d.covar = false; + + char_vec_arr.push_back(d); + + if (writable) { + // Otherwise will add variables when Datafile is opened for writing/appending + if (openclose) { + // Open the file + // Check filename has been set + if (strcmp(filename, "") == 0) { + throw BoutException("Datafile::add: Filename has not been set"); + } + if(!file->openw(filename, BoutComm::rank(), appending)) { + if (appending) { + throw BoutException("Datafile::add: Failed to open file %s for appending!", + filename); + } else { + throw BoutException("Datafile::add: Failed to open file %s for writing!", + filename); + } + } + appending = true; + } + + if (!file->is_valid()) { + throw BoutException("Datafile::add: File is not valid!"); + } + + // Add variable to file + if (!file->addVarCharVec(name, save_repeat, cvec.size())) { + throw BoutException("Failed to add int variable %s to Datafile", name); + } + + if (openclose) { + file->close(); + } + } +} + void Datafile::add(BoutReal &r, const char *name, bool save_repeat) { TRACE("DataFile::add(BoutReal)"); if (!enabled) @@ -1071,6 +1145,33 @@ bool Datafile::read() { } } + // Read vectors of chars + for (const auto& var : char_vec_arr) { + if (var.save_repeat) { + if (!file->read_rec(&(*var.ptr)[0], var.name.c_str(), var.ptr->size())) { + if (!init_missing) { + throw BoutException( + "Missing data for %s in input. Set init_missing=true to create empty vector.", + var.name.c_str()); + } + output_warn.write("\tWARNING: Could not read char vector %s. Creating empty vector\n", var.name.c_str()); + *(var.ptr) = {}; + continue; + } + } else { + if (!file->read(&(*var.ptr)[0], var.name.c_str(), var.ptr->size())) { + if (!init_missing) { + throw BoutException( + "Missing data for %s in input. Set init_missing=true to create empty vector.", + var.name.c_str()); + } + output_warn.write("\tWARNING: Could not read char vector %s. Creating empty vector\n", var.name.c_str()); + *(var.ptr) = {}; + continue; + } + } + } + // Read BoutReals for(const auto& var : BoutReal_arr) { if(var.save_repeat) { @@ -1262,6 +1363,11 @@ bool Datafile::write() { write_int_vec(var.name, var.ptr, var.save_repeat); } + // Write vectors of chars + for (const auto& var : char_vec_arr) { + write_char_vec(var.name, var.ptr, var.save_repeat); + } + // Write BoutReals for(const auto& var : BoutReal_arr) { write_real(var.name, var.ptr, var.save_repeat); @@ -1620,6 +1726,14 @@ bool Datafile::write_int_vec(const std::string &name, std::vector *f, bool } } +bool Datafile::write_char_vec(const std::string &name, std::vector *f, bool save_repeat) { + if (save_repeat) { + return file->write_rec(&(*f)[0], name, f->size()); + } else { + return file->write(&(*f)[0], name, f->size()); + } +} + bool Datafile::write_real(const std::string &name, BoutReal *f, bool save_repeat) { if(save_repeat) { return file->write_rec(f, name); @@ -1702,6 +1816,12 @@ bool Datafile::varAdded(const std::string &name) { return true; } + for (const auto& var : char_vec_arr) { + if(name == var.name) { + return true; + } + } + for(const auto& var : BoutReal_arr ) { if(name == var.name) return true; @@ -1752,6 +1872,12 @@ void *Datafile::varPtr(const std::string &name) { } } + for (const auto &var : char_vec_arr) { + if (name == var.name) { + return static_cast(var.ptr); + } + } + for (const auto &var : BoutReal_arr) { if (name == var.name) { return static_cast(var.ptr); diff --git a/src/fileio/impls/hdf5/h5_format.cxx b/src/fileio/impls/hdf5/h5_format.cxx index 323174f89e..eab2ca96e7 100644 --- a/src/fileio/impls/hdf5/h5_format.cxx +++ b/src/fileio/impls/hdf5/h5_format.cxx @@ -397,6 +397,10 @@ bool H5Format::addVarIntVec(const std::string &name, bool repeat, size_t size) { return addVar(name, repeat, H5T_NATIVE_INT, "vector", size); } +bool H5Format::addVarCharVec(const std::string &name, bool repeat, size_t size) { + return addVar(name, repeat, H5T_C_S1, "vector", size); +} + bool H5Format::addVarBoutReal(const std::string &name, bool repeat) { auto h5_float_type = lowPrecision ? H5T_NATIVE_FLOAT : H5T_NATIVE_DOUBLE; return addVar(name, repeat, h5_float_type, "scalar"); @@ -425,6 +429,14 @@ bool H5Format::read(int *var, const std::string &name, int lx, int ly, int lz) { return read(var, name.c_str(), lx, ly, lz); } +bool H5Format::read(char *data, const char *name, int n) { + return read(data, H5T_C_S1, name, n); +} + +bool H5Format::read(char *var, const std::string &name, int n) { + return read(var, name.c_str(), n); +} + bool H5Format::read(BoutReal *data, const char *name, int lx, int ly, int lz) { return read(data, H5T_NATIVE_DOUBLE, name, lx, ly, lz); } @@ -550,6 +562,14 @@ bool H5Format::write(int *var, const std::string &name, int lx, int ly, int lz) return write(var, name.c_str(), lx, ly, lz); } +bool H5Format::write(char *data, const char *name, int n) { + return write(data, H5T_C_S1, name, n); +} + +bool H5Format::write(char *var, const std::string &name, int n) { + return write(var, name.c_str(), n); +} + bool H5Format::write(BoutReal *data, const char *name, int lx, int ly, int lz) { if(lowPrecision) { @@ -728,6 +748,14 @@ bool H5Format::read_rec(int *var, const std::string &name, int lx, int ly, int l return read_rec(var, name.c_str(), lx, ly, lz); } +bool H5Format::read_rec(char *data, const char *name, int n) { + return read_rec(data, H5T_C_S1, name, n); +} + +bool H5Format::read_rec(char *var, const std::string &name, int n) { + return read_rec(var, name.c_str(), n); +} + bool H5Format::read_rec(BoutReal *data, const char *name, int lx, int ly, int lz) { return read_rec(data, H5T_NATIVE_DOUBLE, name, lx, ly, lz); @@ -892,6 +920,14 @@ bool H5Format::write_rec(int *var, const std::string &name, int lx, int ly, int return write_rec(var, name.c_str(), lx, ly, lz); } +bool H5Format::write_rec(char *data, const char *name, int n) { + return write_rec(data, H5T_C_S1, name, n); +} + +bool H5Format::write_rec(char *var, const std::string &name, int n) { + return write_rec(var, name.c_str(), n); +} + bool H5Format::write_rec(BoutReal *data, const char *name, int lx, int ly, int lz) { if(lowPrecision) { diff --git a/src/fileio/impls/hdf5/h5_format.hxx b/src/fileio/impls/hdf5/h5_format.hxx index a321bb1e9b..83e3a766b0 100644 --- a/src/fileio/impls/hdf5/h5_format.hxx +++ b/src/fileio/impls/hdf5/h5_format.hxx @@ -84,6 +84,7 @@ class H5Format : public DataFormat { // Add a variable to the file bool addVarInt(const std::string &name, bool repeat) override; bool addVarIntVec(const std::string &name, bool repeat, size_t size) override; + bool addVarCharVec(const std::string &name, bool repeat, size_t size) override; bool addVarBoutReal(const std::string &name, bool repeat) override; bool addVarField2D(const std::string &name, bool repeat) override; bool addVarField3D(const std::string &name, bool repeat) override; @@ -93,12 +94,16 @@ class H5Format : public DataFormat { bool read(int *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read(int *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; + bool read(char *var, const char *name, int n = 1) override; + bool read(char *var, const std::string &name, int n = 1) override; bool read(BoutReal *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read(BoutReal *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; bool read_perp(BoutReal *var, const std::string &name, int lx = 1, int lz = 0) override; bool write(int *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write(int *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; + bool write(char *var, const char *name, int n = 1) override; + bool write(char *var, const std::string &name, int n = 1) override; bool write(BoutReal *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write(BoutReal *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; bool write_perp(BoutReal *var, const std::string &name, int lx = 0, int lz = 0) override; @@ -107,12 +112,16 @@ class H5Format : public DataFormat { bool read_rec(int *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read_rec(int *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; + bool read_rec(char *var, const char *name, int n = 1) override; + bool read_rec(char *var, const std::string &name, int n = 1) override; bool read_rec(BoutReal *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read_rec(BoutReal *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; bool read_rec_perp(BoutReal *var, const std::string &name, int lx = 1, int lz = 0) override; bool write_rec(int *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write_rec(int *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; + bool write_rec(char *var, const char *name, int n = 1) override; + bool write_rec(char *var, const std::string &name, int n = 1) override; bool write_rec(BoutReal *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write_rec(BoutReal *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; bool write_rec_perp(BoutReal *var, const std::string &name, int lx = 0, int lz = 0) override; diff --git a/src/fileio/impls/netcdf/nc_format.cxx b/src/fileio/impls/netcdf/nc_format.cxx index fc7d66ad86..7c508cdfab 100644 --- a/src/fileio/impls/netcdf/nc_format.cxx +++ b/src/fileio/impls/netcdf/nc_format.cxx @@ -440,6 +440,42 @@ bool NcFormat::addVarIntVec(const string &name, bool repeat, size_t size) { return true; } +bool NcFormat::addVarCharVec(const string &name, bool repeat, size_t size) { + if (!is_valid()) { + return false; + } + + // Create an error object so netCDF doesn't exit +#ifdef NCDF_VERBOSE + NcError err(NcError::verbose_nonfatal); +#else + NcError err(NcError::silent_nonfatal); +#endif + + NcVar* var; + if (!(var = dataFile->get_var(name.c_str()))) { + // Variable not in file, so add it. + NcDim* vecDim; + auto vecDimName = "char" + std::to_string(size); + if (!(vecDim = dataFile->get_dim(vecDimName.c_str()))) { + vecDim = dataFile->add_dim(vecDimName.c_str(), int(size)); + } + if (repeat) { + std::vector local_dim_list{recDimList[0], vecDim}; + var = dataFile->add_var(name.c_str(), ncChar, 2, &local_dim_list[0]); + } else { + std::vector local_dim_list{vecDim}; + var = dataFile->add_var(name.c_str(), ncChar, 1, &local_dim_list[0]); + } + + if (!var->is_valid()) { + output_error.write("ERROR: NetCDF could not add char vector '%s' to file '%s'\n", name.c_str(), fname); + return false; + } + } + return true; +} + bool NcFormat::addVarBoutReal(const string &name, bool repeat) { if(!is_valid()) return false; @@ -609,6 +645,62 @@ bool NcFormat::read(int *var, const string &name, int lx, int ly, int lz) { return read(var, name.c_str(), lx, ly, lz); } +bool NcFormat::read(char *data, const char *name, int n) { + if (!is_valid()) { + return false; + } + + if (n < 0) { + return false; + } + + // Check for valid name + checkName(name); + + TRACE("NcFormat::read(char)"); + + // Create an error object so netCDF doesn't exit +#ifdef NCDF_VERBOSE + NcError err(NcError::verbose_nonfatal); +#else + NcError err(NcError::silent_nonfatal); +#endif + + NcVar *var; + + if (!(var = dataFile->get_var(name))) { +#ifdef NCDF_VERBOSE + output_info.write("INFO: NetCDF variable '%s' not found\n", name); +#endif + return false; + } + + long cur[1], counts[1]; + cur[0] = 0; + counts[0] = n; + + if (!(var->set_cur(cur))) { +#ifdef NCDF_VERBOSE + output_info.write( + "INFO: NetCDF Could not set cur(%d) for variable '%s'\n", n, name); +#endif + return false; + } + + if (!(var->get(data, counts))) { +#ifdef NCDF_VERBOSE + output_info.write("INFO: NetCDF could not read data for '%s'\n", name); +#endif + return false; + } + + return true; +} + +bool NcFormat::read(char *var, const string &name, int n) { + return read(var, name.c_str(), n); +} + bool NcFormat::read(BoutReal *data, const char *name, int lx, int ly, int lz) { if(!is_valid()) return false; @@ -728,6 +820,51 @@ bool NcFormat::write(int *var, const string &name, int lx, int ly, int lz) { return write(var, name.c_str(), lx, ly, lz); } +bool NcFormat::write(char *data, const char *name, int n) { + if (!is_valid()) { + return false; + } + + if (n < 0) { + return false; + } + + // Check for valid name + checkName(name); + + TRACE("NcFormat::write(char)"); + +#ifdef NCDF_VERBOSE + NcError err(NcError::verbose_nonfatal); +#else + NcError err(NcError::silent_nonfatal); +#endif + + NcVar *var; + if (!(var = dataFile->get_var(name))) { + output_error.write("ERROR: NetCDF char variable '%s' has not been added to file '%s'\n", name, fname); + return false; + } + + long cur[1], counts[1]; + cur[0] = 0; + counts[0] = n; + + if (!(var->set_cur(cur))) { + return false; + } + + if (!(var->put(data, counts))) { + return false; + } + + return true; +} + +bool NcFormat::write(char *var, const string &name, int n) { + return write(var, name.c_str(), n); +} + bool NcFormat::write(BoutReal *data, const char *name, int lx, int ly, int lz) { if(!is_valid()) return false; @@ -892,6 +1029,52 @@ bool NcFormat::read_rec(int *var, const string &name, int lx, int ly, int lz) { return read_rec(var, name.c_str(), lx, ly, lz); } +bool NcFormat::read_rec(char *data, const char *name, int n) { + if (!is_valid()) { + return false; + } + + if (n < 0) { + return false; + } + + // Check for valid name + checkName(name); + + // Create an error object so netCDF doesn't exit +#ifdef NCDF_VERBOSE + NcError err(NcError::verbose_nonfatal); +#else + NcError err(NcError::silent_nonfatal); +#endif + + NcVar *var; + + if (!(var = dataFile->get_var(name))) { + return false; + } + + // NOTE: Probably should do something here to check t0 + + long cur[2], counts[2]; + cur[0] = t0; cur[1] = 0; + counts[0] = 1; counts[1] = n; + + if (!(var->set_cur(cur))) { + return false; + } + + if (!(var->get(data, counts))) { + return false; + } + + return true; +} + +bool NcFormat::read_rec(char *var, const string &name, int n) { + return read_rec(var, name.c_str(), n); +} + bool NcFormat::read_rec(BoutReal *data, const char *name, int lx, int ly, int lz) { if(!is_valid()) return false; @@ -1015,6 +1198,54 @@ bool NcFormat::write_rec(int *var, const string &name, int lx, int ly, int lz) { return write_rec(var, name.c_str(), lx, ly, lz); } +bool NcFormat::write_rec(char *data, const char *name, int n) { + if (!is_valid()) { + return false; + } + + if (n < 0) { + return false; + } + + // Check for valid name + checkName(name); + +#ifdef NCDF_VERBOSE + NcError err(NcError::verbose_nonfatal); +#else + NcError err(NcError::silent_nonfatal); +#endif + + NcVar *var; + + // Try to find variable + if (!(var = dataFile->get_var(name))) { + output_error.write("ERROR: NetCDF char variable '%s' has not been added to file '%s'\n", name, fname); + return false; + } else { + // Get record number + if (rec_nr.find(name) == rec_nr.end()) { + // Add to map + rec_nr[name] = default_rec; + } + } + + if (!var->put_rec(data, rec_nr[name])) { + return false; + } + + var->sync(); + + // Increment record number + rec_nr[name] = rec_nr[name] + 1; + + return true; +} + +bool NcFormat::write_rec(char *var, const string &name, int n) { + return write_rec(var, name.c_str(), n); +} + bool NcFormat::write_rec(BoutReal *data, const char *name, int lx, int ly, int lz) { if(!is_valid()) return false; diff --git a/src/fileio/impls/netcdf/nc_format.hxx b/src/fileio/impls/netcdf/nc_format.hxx index 2bc962b07c..1ea3fca5ba 100644 --- a/src/fileio/impls/netcdf/nc_format.hxx +++ b/src/fileio/impls/netcdf/nc_format.hxx @@ -88,6 +88,7 @@ class NcFormat : public DataFormat { // Add a variable to the file bool addVarInt(const std::string &name, bool repeat) override; bool addVarIntVec(const std::string &name, bool repeat, size_t size) override; + bool addVarCharVec(const std::string &name, bool repeat, size_t size) override; bool addVarBoutReal(const std::string &name, bool repeat) override; bool addVarField2D(const std::string &name, bool repeat) override; bool addVarField3D(const std::string &name, bool repeat) override; @@ -97,12 +98,16 @@ class NcFormat : public DataFormat { bool read(int *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read(int *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; + bool read(char *var, const char *name, int n = 1) override; + bool read(char *var, const std::string &name, int n = 1) override; bool read(BoutReal *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read(BoutReal *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; bool read_perp(BoutReal *var, const std::string &name, int lx = 1, int lz = 0) override; bool write(int *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write(int *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; + bool write(char *var, const char *name, int n = 1) override; + bool write(char *var, const std::string &name, int n = 1) override; bool write(BoutReal *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write(BoutReal *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; bool write_perp(BoutReal *var, const std::string &name, int lx = 0, int lz = 0) override; @@ -111,12 +116,16 @@ class NcFormat : public DataFormat { bool read_rec(int *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read_rec(int *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; + bool read_rec(char *var, const char *name, int n = 1) override; + bool read_rec(char *var, const std::string &name, int n = 1) override; bool read_rec(BoutReal *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read_rec(BoutReal *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; bool read_rec_perp(BoutReal *var, const std::string &name, int lx = 1, int lz = 0) override; bool write_rec(int *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write_rec(int *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; + bool write_rec(char *var, const char *name, int n = 1) override; + bool write_rec(char *var, const std::string &name, int n = 1) override; bool write_rec(BoutReal *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write_rec(BoutReal *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; bool write_rec_perp(BoutReal *var, const std::string &name, int lx = 0, int lz = 0) override; diff --git a/src/fileio/impls/netcdf4/ncxx4.cxx b/src/fileio/impls/netcdf4/ncxx4.cxx index 6282a61067..15237ca0d2 100644 --- a/src/fileio/impls/netcdf4/ncxx4.cxx +++ b/src/fileio/impls/netcdf4/ncxx4.cxx @@ -395,6 +395,37 @@ bool Ncxx4::addVarIntVec(const string &name, bool repeat, size_t size) { return true; } +bool Ncxx4::addVarCharVec(const string &name, bool repeat, size_t size) { + if (!is_valid()) { + return false; + } + + NcVar var = dataFile->getVar(name); + const auto vec_dim_name = "char" + std::to_string(size); + auto vec_dim = dataFile->getDim(vec_dim_name); + if (vec_dim.isNull()) { + vec_dim = dataFile->addDim(vec_dim_name, size); + } + if(var.isNull()) { + // Variable not in file, so add it. + if (repeat) { + auto dims = getRecDimVec(2); + dims[1] = vec_dim; + var = dataFile->addVar(name, ncChar, dims); + } else { + auto dims = getDimVec(1); + dims[0] = vec_dim; + var = dataFile->addVar(name, ncChar, dims); + } + + if(var.isNull()) { + output_error.write("ERROR: NetCDF could not add char '%s' to file '%s'\n", name.c_str(), fname); + return false; + } + } + return true; +} + bool Ncxx4::addVarBoutReal(const string &name, bool repeat) { if(!is_valid()) return false; @@ -542,6 +573,41 @@ bool Ncxx4::read(int *var, const std::string &name, int lx, int ly, int lz) { return read(var, name.c_str(), lx, ly, lz); } +bool Ncxx4::read(char *data, const char *name, int n) { + TRACE("Ncxx4::read(char)"); + +#ifdef NCDF_VERBOSE + output.write("Ncxx4:: read(char, %s)\n", name); +#endif + + if (!is_valid()) { + return false; + } + + if (n < 0) { + return false; + } + + NcVar var = dataFile->getVar(name); + if (var.isNull()) { +#ifdef NCDF_VERBOSE + output_info.write("INFO: NetCDF variable '%s' not found\n", name); +#endif + return false; + } + + std::vector start = {1}; + std::vector counts = {size_t(n)}; + + var.getVar(start, counts, data); + + return true; +} + +bool Ncxx4::read(char *var, const std::string &name, int n) { + return read(var, name.c_str(), n); +} + bool Ncxx4::read(BoutReal *data, const char *name, int lx, int ly, int lz) { TRACE("Ncxx4::read(BoutReal)"); @@ -642,6 +708,46 @@ bool Ncxx4::write(int *var, const std::string &name, int lx, int ly, int lz) { return write(var, name.c_str(), lx, ly, lz); } +bool Ncxx4::write(char *data, const char *name, int n) { + TRACE("Ncxx4::write(char)"); + +#ifdef NCDF_VERBOSE + output.write("Ncxx4:: write(char, %s)\n", name); +#endif + if (!is_valid()) { + return false; + } + + if (n < 0) { + return false; + } + + NcVar var = dataFile->getVar(name); + if (var.isNull()) { + output_error.write("ERROR: NetCDF char variable '%s' has not been added to file '%s'\n", name, fname); + return false; + } + +#ifdef NCDF_VERBOSE + output.write("Ncxx4:: write { Writing Variable } \n"); +#endif + + std::vector start = {0}; + std::vector counts = {size_t(n)}; + + var.putVar(start, counts, data); + +#ifdef NCDF_VERBOSE + output.write("Ncxx4:: write { Done } \n"); +#endif + + return true; +} + +bool Ncxx4::write(char *var, const std::string &name, int n) { + return write(var, name.c_str(), n); +} + bool Ncxx4::write(BoutReal *data, const char *name, int lx, int ly, int lz) { TRACE("Ncxx4::write(BoutReal)"); @@ -775,6 +881,38 @@ bool Ncxx4::read_rec(int *var, const std::string &name, int lx, int ly, int lz) return read_rec(var, name.c_str(), lx, ly, lz); } +bool Ncxx4::read_rec(char *data, const char *name, int n) { +#ifdef NCDF_VERBOSE + output.write("Ncxx4:: read_rec(char, %s)\n", name); +#endif + if (!is_valid()) { + return false; + } + + if (n < 0) { + return false; + } + + NcVar var = dataFile->getVar(name); + + if (var.isNull()) { + return false; + } + + // NOTE: Probably should do something here to check t0 + + std::vector start = {size_t(t0), 0}; + std::vector counts = {1, size_t(n)}; + + var.getVar(start, counts, data); + + return true; +} + +bool Ncxx4::read_rec(char *var, const std::string &name, int n) { + return read_rec(var, name.c_str(), n); +} + bool Ncxx4::read_rec(BoutReal *data, const char *name, int lx, int ly, int lz) { #ifdef NCDF_VERBOSE output.write("Ncxx4:: read_rec(BoutReal, %s)\n", name); @@ -877,6 +1015,50 @@ bool Ncxx4::write_rec(int *var, const std::string &name, int lx, int ly, int lz) return write_rec(var, name.c_str(), lx, ly, lz); } +bool Ncxx4::write_rec(char *data, const char *name, int n) { +#ifdef NCDF_VERBOSE + output.write("Ncxx4:: write_rec(char, %s)\n", name); +#endif + if (!is_valid()) { + return false; + } + + if (n < 0) { + return false; + } + + // Try to find variable + NcVar var = dataFile->getVar(name); + if (var.isNull()) { + output_error.write("ERROR: NetCDF char variable '%s' has not been added to file '%s'\n", name, fname); + return false; + } else { + // Get record number + if (rec_nr.find(name) == rec_nr.end()) { + // Add to map + rec_nr[name] = default_rec; + } + } + + std::vector start = {size_t(rec_nr[name]), 0}; + std::vector counts = {1, size_t(n)}; + +#ifdef NCDF_VERBOSE + output.write("Ncxx4:: write_rec { Writing variable } \n"); +#endif + + var.putVar(start, counts, data); + + // Increment record number + rec_nr[name] = rec_nr[name] + 1; + + return true; +} + +bool Ncxx4::write_rec(char *var, const std::string &name, int n) { + return write_rec(var, name.c_str(), n); +} + bool Ncxx4::write_rec(BoutReal *data, const char *name, int lx, int ly, int lz) { TRACE("Ncxx4::write_rec(BoutReal)"); diff --git a/src/fileio/impls/netcdf4/ncxx4.hxx b/src/fileio/impls/netcdf4/ncxx4.hxx index 293de2ef08..501ecbafa7 100644 --- a/src/fileio/impls/netcdf4/ncxx4.hxx +++ b/src/fileio/impls/netcdf4/ncxx4.hxx @@ -89,6 +89,7 @@ class Ncxx4 : public DataFormat { // Add a variable to the file bool addVarInt(const std::string &name, bool repeat) override; bool addVarIntVec(const std::string &name, bool repeat, size_t size) override; + bool addVarCharVec(const std::string &name, bool repeat, size_t size) override; bool addVarBoutReal(const std::string &name, bool repeat) override; bool addVarField2D(const std::string &name, bool repeat) override; bool addVarField3D(const std::string &name, bool repeat) override; @@ -98,12 +99,16 @@ class Ncxx4 : public DataFormat { bool read(int *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read(int *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; + bool read(char *var, const char *name, int n = 1) override; + bool read(char *var, const std::string &name, int n = 1) override; bool read(BoutReal *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read(BoutReal *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; bool read_perp(BoutReal *var, const std::string &name, int lx = 1, int lz = 0) override; bool write(int *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write(int *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; + bool write(char *var, const char *name, int n = 1) override; + bool write(char *var, const std::string &name, int n = 1) override; bool write(BoutReal *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write(BoutReal *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; bool write_perp(BoutReal *var, const std::string &name, int lx = 0, int lz = 0) override; @@ -112,12 +117,16 @@ class Ncxx4 : public DataFormat { bool read_rec(int *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read_rec(int *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; + bool read_rec(char *var, const char *name, int n = 1) override; + bool read_rec(char *var, const std::string &name, int n = 1) override; bool read_rec(BoutReal *var, const char *name, int lx = 1, int ly = 0, int lz = 0) override; bool read_rec(BoutReal *var, const std::string &name, int lx = 1, int ly = 0, int lz = 0) override; bool read_rec_perp(BoutReal *var, const std::string &name, int lx = 1, int lz = 0) override; bool write_rec(int *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write_rec(int *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; + bool write_rec(char *var, const char *name, int n = 1) override; + bool write_rec(char *var, const std::string &name, int n = 1) override; bool write_rec(BoutReal *var, const char *name, int lx = 0, int ly = 0, int lz = 0) override; bool write_rec(BoutReal *var, const std::string &name, int lx = 0, int ly = 0, int lz = 0) override; bool write_rec_perp(BoutReal *var, const std::string &name, int lx = 0, int lz = 0) override; From 39003329109c31a9f82e4dcccea3cb99c34f0704 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 23 Nov 2020 14:58:01 +0000 Subject: [PATCH 094/428] Support std::string in Datafile --- include/datafile.hxx | 6 +- include/dataformat.hxx | 2 +- src/fileio/datafile.cxx | 79 +++++++++++++++------------ src/fileio/impls/hdf5/h5_format.cxx | 2 +- src/fileio/impls/hdf5/h5_format.hxx | 2 +- src/fileio/impls/netcdf/nc_format.cxx | 17 +++++- src/fileio/impls/netcdf/nc_format.hxx | 2 +- src/fileio/impls/netcdf4/ncxx4.cxx | 17 +++++- src/fileio/impls/netcdf4/ncxx4.hxx | 2 +- 9 files changed, 84 insertions(+), 45 deletions(-) diff --git a/include/datafile.hxx b/include/datafile.hxx index d0c0df4ccb..9013b718c7 100644 --- a/include/datafile.hxx +++ b/include/datafile.hxx @@ -70,7 +70,7 @@ class Datafile { } void add(int &i, const char *name, bool save_repeat = false); void add(std::vector &ivec, const char *name, bool save_repeat = false); - void add(std::vector &cvec, const char *name, bool save_repeat = false); + void add(std::string &s, const char *name, bool save_repeat = false); void add(BoutReal &r, const char *name, bool save_repeat = false); void add(bool &b, const char* name, bool save_repeat = false); void add(Field2D &f, const char *name, bool save_repeat = false); @@ -130,7 +130,7 @@ class Datafile { // one set per variable type std::vector> int_arr; std::vector>> int_vec_arr; - std::vector>> char_vec_arr; + std::vector> string_arr; std::vector> BoutReal_arr; std::vector> bool_arr; std::vector> f2d_arr; @@ -145,7 +145,7 @@ class Datafile { bool write_int(const std::string &name, int *f, bool save_repeat); bool write_int_vec(const std::string &name, std::vector *f, bool save_repeat); - bool write_char_vec(const std::string &name, std::vector *f, bool save_repeat); + bool write_string(const std::string &name, std::string *f, bool save_repeat); bool write_real(const std::string &name, BoutReal *f, bool save_repeat); bool write_f2d(const std::string &name, Field2D *f, bool save_repeat); bool write_f3d(const std::string &name, Field3D *f, bool save_repeat); diff --git a/include/dataformat.hxx b/include/dataformat.hxx index 14ac18c2ec..afda265969 100644 --- a/include/dataformat.hxx +++ b/include/dataformat.hxx @@ -78,7 +78,7 @@ class DataFormat { // Add a variable to the file virtual bool addVarInt(const std::string &name, bool repeat) = 0; virtual bool addVarIntVec(const std::string &name, bool repeat, size_t size) = 0; - virtual bool addVarCharVec(const std::string &name, bool repeat, size_t size) = 0; + virtual bool addVarString(const std::string &name, bool repeat, size_t size) = 0; virtual bool addVarBoutReal(const std::string &name, bool repeat) = 0; virtual bool addVarField2D(const std::string &name, bool repeat) = 0; virtual bool addVarField3D(const std::string &name, bool repeat) = 0; diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index b7c1cd30db..b5c4cf088e 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -80,6 +80,7 @@ Datafile::Datafile(Datafile &&other) noexcept flushFrequency(other.flushFrequency), file(std::move(other.file)), writable(other.writable), appending(other.appending), first_time(other.first_time), int_arr(std::move(other.int_arr)), int_vec_arr(std::move(other.int_vec_arr)), + string_arr(std::move(other.string_arr)), BoutReal_arr(std::move(other.BoutReal_arr)), bool_arr(std::move(other.bool_arr)), f2d_arr(std::move(other.f2d_arr)), f3d_arr(std::move(other.f3d_arr)), v2d_arr(std::move(other.v2d_arr)), v3d_arr(std::move(other.v3d_arr)) { @@ -95,7 +96,8 @@ Datafile::Datafile(const Datafile &other) : floats(other.floats), openclose(other.openclose), Lx(other.Lx), Ly(other.Ly), Lz(other.Lz), enabled(other.enabled), shiftOutput(other.shiftOutput), shiftInput(other.shiftInput), flushFrequencyCounter(other.flushFrequencyCounter), flushFrequency(other.flushFrequency), file(nullptr), writable(other.writable), appending(other.appending), first_time(other.first_time), - int_arr(other.int_arr), int_vec_arr(other.int_vec_arr), BoutReal_arr(other.BoutReal_arr), + int_arr(other.int_arr), int_vec_arr(other.int_vec_arr), + string_arr(other.string_arr), BoutReal_arr(other.BoutReal_arr), bool_arr(other.bool_arr), f2d_arr(other.f2d_arr), f3d_arr(other.f3d_arr), v2d_arr(other.v2d_arr), v3d_arr(other.v3d_arr) { @@ -124,6 +126,7 @@ Datafile& Datafile::operator=(Datafile &&rhs) noexcept { first_time = rhs.first_time; int_arr = std::move(rhs.int_arr); int_vec_arr = std::move(rhs.int_vec_arr); + string_arr = std::move(rhs.string_arr); BoutReal_arr = std::move(rhs.BoutReal_arr); bool_arr = std::move(rhs.bool_arr); f2d_arr = std::move(rhs.f2d_arr); @@ -233,10 +236,10 @@ bool Datafile::openw(const char *format, ...) { } } - // Add vectors of chars - for(const auto& var : char_vec_arr) { - if (!file->addVarCharVec(var.name, var.save_repeat, var.ptr->size())) { - throw BoutException("Failed to add char vector variable %s to Datafile", var.name.c_str()); + // Add strings + for(const auto& var : string_arr) { + if (!file->addVarString(var.name, var.save_repeat, var.ptr->size())) { + throw BoutException("Failed to add string variable %s to Datafile", var.name.c_str()); } } @@ -364,10 +367,10 @@ bool Datafile::opena(const char *format, ...) { } } - // Add vectors of chars - for(const auto& var : char_vec_arr) { - if (!file->addVarCharVec(var.name, var.save_repeat, var.ptr->size())) { - throw BoutException("Failed to add char vector variable %s to Datafile", var.name.c_str()); + // Add strings + for(const auto& var : string_arr) { + if (!file->addVarString(var.name, var.save_repeat, var.ptr->size())) { + throw BoutException("Failed to add string variable %s to Datafile", var.name.c_str()); } } @@ -584,14 +587,14 @@ void Datafile::add(std::vector &i, const char *name, bool save_repeat) { } } -void Datafile::add(std::vector &cvec, const char *name, bool save_repeat) { - TRACE("DataFile::add(std::vector)"); +void Datafile::add(std::string &s, const char *name, bool save_repeat) { + TRACE("DataFile::add(std::string)"); if (!enabled) { return; } if (varAdded(name)) { // Check if it's the same variable - if (&cvec == varPtr(name)) { + if (&s == varPtr(name)) { output_warn.write("WARNING: variable '%s' already added to Datafile, skipping...\n", name); return; @@ -600,14 +603,15 @@ void Datafile::add(std::vector &cvec, const char *name, bool save_repeat) } } - VarStr> d; + VarStr d; - d.ptr = &cvec; + d.ptr = &s; d.name = name; d.save_repeat = save_repeat; d.covar = false; + d.size = s.size(); - char_vec_arr.push_back(d); + string_arr.push_back(d); if (writable) { // Otherwise will add variables when Datafile is opened for writing/appending @@ -634,8 +638,8 @@ void Datafile::add(std::vector &cvec, const char *name, bool save_repeat) } // Add variable to file - if (!file->addVarCharVec(name, save_repeat, cvec.size())) { - throw BoutException("Failed to add int variable %s to Datafile", name); + if (!file->addVarString(name, save_repeat, s.size())) { + throw BoutException("Failed to add string variable %s to Datafile", name); } if (openclose) { @@ -1145,29 +1149,30 @@ bool Datafile::read() { } } - // Read vectors of chars - for (const auto& var : char_vec_arr) { + // Read strings + for (const auto& var : string_arr) { + if (var.ptr->size() != var.size) { + throw BoutException("Size of std::string '%s' has changed since being " + "added to Datafile. Cannot read.", var.name.c_str()); + } + var.ptr->resize(var.size); if (var.save_repeat) { - if (!file->read_rec(&(*var.ptr)[0], var.name.c_str(), var.ptr->size())) { + if (!file->read_rec(&(*var.ptr)[0], var.name.c_str(), var.size)) { if (!init_missing) { throw BoutException( - "Missing data for %s in input. Set init_missing=true to create empty vector.", + "Missing data for %s in input. Set init_missing=true to create empty string.", var.name.c_str()); } - output_warn.write("\tWARNING: Could not read char vector %s. Creating empty vector\n", var.name.c_str()); - *(var.ptr) = {}; - continue; + output_warn.write("\tWARNING: Could not read string %s. Creating empty string\n", var.name.c_str()); } } else { - if (!file->read(&(*var.ptr)[0], var.name.c_str(), var.ptr->size())) { + if (!file->read(&(*var.ptr)[0], var.name.c_str(), var.size)) { if (!init_missing) { throw BoutException( - "Missing data for %s in input. Set init_missing=true to create empty vector.", + "Missing data for %s in input. Set init_missing=true to create empty string.", var.name.c_str()); } - output_warn.write("\tWARNING: Could not read char vector %s. Creating empty vector\n", var.name.c_str()); - *(var.ptr) = {}; - continue; + output_warn.write("\tWARNING: Could not read string %s. Creating empty strig\n", var.name.c_str()); } } } @@ -1363,9 +1368,13 @@ bool Datafile::write() { write_int_vec(var.name, var.ptr, var.save_repeat); } - // Write vectors of chars - for (const auto& var : char_vec_arr) { - write_char_vec(var.name, var.ptr, var.save_repeat); + // Write strings + for (const auto& var : string_arr) { + if (var.ptr->size() != var.size) { + throw BoutException("Size of string '%s' has changed since being " + "added to Datafile. Cannot write.", var.name.c_str()); + } + write_string(var.name, var.ptr, var.save_repeat); } // Write BoutReals @@ -1726,7 +1735,7 @@ bool Datafile::write_int_vec(const std::string &name, std::vector *f, bool } } -bool Datafile::write_char_vec(const std::string &name, std::vector *f, bool save_repeat) { +bool Datafile::write_string(const std::string &name, std::string *f, bool save_repeat) { if (save_repeat) { return file->write_rec(&(*f)[0], name, f->size()); } else { @@ -1816,7 +1825,7 @@ bool Datafile::varAdded(const std::string &name) { return true; } - for (const auto& var : char_vec_arr) { + for (const auto& var : string_arr) { if(name == var.name) { return true; } @@ -1872,7 +1881,7 @@ void *Datafile::varPtr(const std::string &name) { } } - for (const auto &var : char_vec_arr) { + for (const auto &var : string_arr) { if (name == var.name) { return static_cast(var.ptr); } diff --git a/src/fileio/impls/hdf5/h5_format.cxx b/src/fileio/impls/hdf5/h5_format.cxx index eab2ca96e7..11e5092ed9 100644 --- a/src/fileio/impls/hdf5/h5_format.cxx +++ b/src/fileio/impls/hdf5/h5_format.cxx @@ -397,7 +397,7 @@ bool H5Format::addVarIntVec(const std::string &name, bool repeat, size_t size) { return addVar(name, repeat, H5T_NATIVE_INT, "vector", size); } -bool H5Format::addVarCharVec(const std::string &name, bool repeat, size_t size) { +bool H5Format::addVarString(const std::string &name, bool repeat, size_t size) { return addVar(name, repeat, H5T_C_S1, "vector", size); } diff --git a/src/fileio/impls/hdf5/h5_format.hxx b/src/fileio/impls/hdf5/h5_format.hxx index 83e3a766b0..e53bb71ab2 100644 --- a/src/fileio/impls/hdf5/h5_format.hxx +++ b/src/fileio/impls/hdf5/h5_format.hxx @@ -84,7 +84,7 @@ class H5Format : public DataFormat { // Add a variable to the file bool addVarInt(const std::string &name, bool repeat) override; bool addVarIntVec(const std::string &name, bool repeat, size_t size) override; - bool addVarCharVec(const std::string &name, bool repeat, size_t size) override; + bool addVarString(const std::string &name, bool repeat, size_t size) override; bool addVarBoutReal(const std::string &name, bool repeat) override; bool addVarField2D(const std::string &name, bool repeat) override; bool addVarField3D(const std::string &name, bool repeat) override; diff --git a/src/fileio/impls/netcdf/nc_format.cxx b/src/fileio/impls/netcdf/nc_format.cxx index 7c508cdfab..3c595661ed 100644 --- a/src/fileio/impls/netcdf/nc_format.cxx +++ b/src/fileio/impls/netcdf/nc_format.cxx @@ -440,7 +440,7 @@ bool NcFormat::addVarIntVec(const string &name, bool repeat, size_t size) { return true; } -bool NcFormat::addVarCharVec(const string &name, bool repeat, size_t size) { +bool NcFormat::addVarString(const string &name, bool repeat, size_t size) { if (!is_valid()) { return false; } @@ -472,6 +472,21 @@ bool NcFormat::addVarCharVec(const string &name, bool repeat, size_t size) { output_error.write("ERROR: NetCDF could not add char vector '%s' to file '%s'\n", name.c_str(), fname); return false; } + } else { + // Check the existing variable is consistent with what's being added + if (repeat) { + ASSERT0(var->num_dims() == 2); + if (size_t(var->get_dim(1)->size()) != size) { + throw BoutException("Found existing variable '%s' with size %lu. Trying to add " + "with size %lu.", name.c_str(), var->get_dim(1)->size(), size); + } + } else { + ASSERT0(var->num_dims() == 1); + if (size_t(var->get_dim(0)->size()) != size) { + throw BoutException("Found existing variable '%s' with size %lu. Trying to add " + "with size %lu.", name.c_str(), var->get_dim(0)->size(), size); + } + } } return true; } diff --git a/src/fileio/impls/netcdf/nc_format.hxx b/src/fileio/impls/netcdf/nc_format.hxx index 1ea3fca5ba..4571f73d82 100644 --- a/src/fileio/impls/netcdf/nc_format.hxx +++ b/src/fileio/impls/netcdf/nc_format.hxx @@ -88,7 +88,7 @@ class NcFormat : public DataFormat { // Add a variable to the file bool addVarInt(const std::string &name, bool repeat) override; bool addVarIntVec(const std::string &name, bool repeat, size_t size) override; - bool addVarCharVec(const std::string &name, bool repeat, size_t size) override; + bool addVarString(const std::string &name, bool repeat, size_t size) override; bool addVarBoutReal(const std::string &name, bool repeat) override; bool addVarField2D(const std::string &name, bool repeat) override; bool addVarField3D(const std::string &name, bool repeat) override; diff --git a/src/fileio/impls/netcdf4/ncxx4.cxx b/src/fileio/impls/netcdf4/ncxx4.cxx index 15237ca0d2..93b6b7a7e4 100644 --- a/src/fileio/impls/netcdf4/ncxx4.cxx +++ b/src/fileio/impls/netcdf4/ncxx4.cxx @@ -395,7 +395,7 @@ bool Ncxx4::addVarIntVec(const string &name, bool repeat, size_t size) { return true; } -bool Ncxx4::addVarCharVec(const string &name, bool repeat, size_t size) { +bool Ncxx4::addVarString(const string &name, bool repeat, size_t size) { if (!is_valid()) { return false; } @@ -422,6 +422,21 @@ bool Ncxx4::addVarCharVec(const string &name, bool repeat, size_t size) { output_error.write("ERROR: NetCDF could not add char '%s' to file '%s'\n", name.c_str(), fname); return false; } + } else { + // Check the existing variable is consistent with what's being added + if (repeat) { + ASSERT0(var.getDimCount() == 2); + if (var.getDim(1).getSize() != size) { + throw BoutException("Found existing variable '%s' with size %lu. Trying to add " + "with size %lu.", name.c_str(), var.getDim(1).getSize(), size); + } + } else { + ASSERT0(var.getDimCount() == 1); + if (var.getDim(0).getSize() != size) { + throw BoutException("Found existing variable '%s' with size %lu. Trying to add " + "with size %lu.", name.c_str(), var.getDim(0).getSize(), size); + } + } } return true; } diff --git a/src/fileio/impls/netcdf4/ncxx4.hxx b/src/fileio/impls/netcdf4/ncxx4.hxx index 501ecbafa7..17f90ceb44 100644 --- a/src/fileio/impls/netcdf4/ncxx4.hxx +++ b/src/fileio/impls/netcdf4/ncxx4.hxx @@ -89,7 +89,7 @@ class Ncxx4 : public DataFormat { // Add a variable to the file bool addVarInt(const std::string &name, bool repeat) override; bool addVarIntVec(const std::string &name, bool repeat, size_t size) override; - bool addVarCharVec(const std::string &name, bool repeat, size_t size) override; + bool addVarString(const std::string &name, bool repeat, size_t size) override; bool addVarBoutReal(const std::string &name, bool repeat) override; bool addVarField2D(const std::string &name, bool repeat) override; bool addVarField3D(const std::string &name, bool repeat) override; From 8c21e3f16e412e41c36949f24182d1830a8e7e3f Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Nov 2020 22:03:18 +0000 Subject: [PATCH 095/428] Test std::vector and std::string in test-io and test-io_hdf5 --- .../test-io/data/benchmark.out.0.nc | Bin 145800 -> 159445 bytes tests/integrated/test-io/runtest | 35 ++++++++++++++---- tests/integrated/test-io/test_io.cxx | 26 +++++++++++++ .../test-io_hdf5/data/benchmark.out.0.hdf5 | Bin 666120 -> 727950 bytes tests/integrated/test-io_hdf5/runtest | 35 ++++++++++++++---- .../integrated/test-io_hdf5/test_io_hdf5.cxx | 26 +++++++++++++ 6 files changed, 106 insertions(+), 16 deletions(-) diff --git a/tests/integrated/test-io/data/benchmark.out.0.nc b/tests/integrated/test-io/data/benchmark.out.0.nc index 35e74423c4020be025fca00476965e6284481f2b..bae42030c77c72c226479e73bb9e4aa0f4b5e178 100644 GIT binary patch delta 2667 zcmb7G4@{J07=PaHzWeI|hrr!|2zSVz!>~M{C5nHJe}W<)7^Y(?-krh_K_mzb|D+HL zOOB_&H5HkaG)F@ZLk|}BugwE0k{6ms45HUsQhfN7Wz zf7Ni{?H9`6x`q}9AI8rTJNF2QG=4i4EYbMCPZ%K~Hs!2IDry6lqPJZ2f0gKqnr~MG zi>V+Cl9la<$SQp~f$!}FOZbbaC7svD9C_oH`!wy(2@8VmC8Od%?EdAHqanDiQ|&Lw zkC`55FRS8q2|Mp--=)GE1k0*Q45x2(`M^MrZ^4sQnYLdq{PrPyL62_*+n)A5NT(7T zcr54o;27iLXw_B2cXxh#0QcxN5-)7Bh>ec|qG^tKEEtyLFZZ9R?hKKzQ-!B@n+?Oc z*|mk4xl4_^kQ_cW7ZDuI%XCNyH3( zVosgm z;X>%Dr~k@S=Tv$(d5XMswOk%i=ebpwn^i0XloDbEbQMp-m7*hvogEtALzl%5zw_3t3O)-y7Ah<0wQfF&$aYM@Gxz7uQS)P{%Bc3a2W zomo>;=c(LWUhmzCu4S_f^kM^+E(G~pAu+3+N3SrjDtY`RlTHp|93P{($}r0k=y`Oc zS2CraAexJyip}Q-WQAc-*lJuySKR%|va}|dGb&*gfPI3-^13cQDY}u$7#RoYY~sxw zAIB=A&-nm!)TLLc3G^-T?#Nv%z*Ow%T@wog&e@yzcru>}OJy?Y^-jsXy>W2jWEQH7thOfFBi^P(pf)iM11RJ0(!efJ z(woRp+T6E_>sXn839izYf2z-Ajyb!!=cE5@$~YtnCwJxB+a)7PN903DNMKR;EhA%( z?s;zi*yEOiVS1hty$=24{4p)7t-(!*gSj9Anu^S9f;O@(rKeVcPSM1?pHK+}wTC>9 zk;DMN%>L(01u#37&p6tx&2+~qp`e}pXv%H=sRA!opTV{^8y5C<#7@Sk0&1!&QFldG sXLoHVg(WXW?K10ZH@o}j$@0m^WJ44vIV1$^1{!sfadJ#{@Z??D zPLpMGE=rv;jQYX=v3+^#nqNRu85l$uW*s~126T4Aa J*7c0cT>u;unuGuV diff --git a/tests/integrated/test-io/runtest b/tests/integrated/test-io/runtest index beaf1ffe23..77d21abf51 100755 --- a/tests/integrated/test-io/runtest +++ b/tests/integrated/test-io/runtest @@ -14,6 +14,7 @@ except: from boututils.run_wrapper import shell, shell_safe, launch_safe from boutdata.collect import collect +from boututils.datafile import DataFile import numpy as np from sys import stdout, exit @@ -25,7 +26,8 @@ shell_safe("make > make.log") # Read benchmark values vars = ['ivar', 'rvar', 'bvar', 'f2d', 'f3d', 'fperp', 'fperp2', 'ivar_evol', 'rvar_evol', - 'bvar_evol', 'v2d_evol_x', 'v2d_evol_y', 'v2d_evol_z', 'fperp2_evol'] + 'bvar_evol', 'v2d_evol_x', 'v2d_evol_y', 'v2d_evol_z', 'fperp2_evol', + "ivar_vec", "svar", "ivar_vec_evol", "svar_evol"] field_vars = ['f2d', 'f3d', 'fperp', 'fperp2', 'v2d_evol_x', 'v2d_evol_y', 'v2d_evol_z', 'fperp2_evol'] # Field quantities, not scalars @@ -35,13 +37,21 @@ tol = 1e-10 print("Reading benchmark data") bmk = {} for v in vars: - bmk[v] = collect(v, path="data", prefix="benchmark.out", info=False) + if "ivar_vec" in v or "svar" in v: + f = DataFile("data/benchmark.out.0.nc") + bmk[v] = f[v][...] + else: + bmk[v] = collect(v, path="data", prefix="benchmark.out", info=False) def check_output(): success = True for v in vars: stdout.write(" Checking variable "+v+" ... ") - result = collect(v, path="data", info=False) + if "ivar_vec" in v or "svar" in v: + f = DataFile("data/BOUT.dmp.0.nc") + result = f[v][...] + else: + result = collect(v, path="data", info=False) # Compare benchmark and output if np.shape(bmk[v]) != np.shape(result): @@ -49,11 +59,18 @@ def check_output(): success = False continue - diff = np.max(np.abs(bmk[v] - result)) - if diff > tol: - print("Fail, maximum difference = "+str(diff)) - success = False - continue + if result.dtype.kind not in np.typecodes["AllFloat"]: + # Non-float types should be identical + if not np.all(bmk[v] == result): + print("Fail") + success = False + continue + else: + diff = np.max(np.abs(bmk[v] - result)) + if diff > tol: + print("Fail, maximum difference = " + str(diff)) + success = False + continue if v in field_vars: # Check cell location @@ -122,12 +139,14 @@ if not check_output(): # Test incorrectly double-adding variables - should throw exception for check_incorrect_add in [ "ivar", + "ivar_vec", "rvar", "bvar", "f2d", "f3d", "fperp", "ivar_evol", + "ivar_vec_evol", "rvar_evol", "bvar_evol", "v2d_evol", diff --git a/tests/integrated/test-io/test_io.cxx b/tests/integrated/test-io/test_io.cxx index 5789767c89..53188846fd 100644 --- a/tests/integrated/test-io/test_io.cxx +++ b/tests/integrated/test-io/test_io.cxx @@ -16,6 +16,10 @@ int main(int argc, char **argv) { // Variables to be read and written int ivar, ivar_evol; + std::vector ivar_vec = {1, 2, 3}; + std::vector ivar_vec_evol = {4, 5, 6}; + std::string svar = "ab"; + std::string svar_evol = "cde"; BoutReal rvar, rvar_evol; bool bvar, bvar_evol; Field2D f2d; @@ -45,6 +49,8 @@ int main(int argc, char **argv) { // Non-evolving variables dump.add(ivar, "ivar", false); + dump.add(ivar_vec, "ivar_vec"); + dump.add(svar, "svar"); dump.add(rvar, "rvar", false); dump.add(bvar, "bvar", false); dump.add(f2d, "f2d", false); @@ -54,6 +60,8 @@ int main(int argc, char **argv) { // Evolving variables dump.add(ivar_evol, "ivar_evol", true); + dump.add(ivar_vec_evol, "ivar_vec_evol", true); + dump.add(svar_evol, "svar_evol", true); dump.add(rvar_evol, "rvar_evol", true); dump.add(bvar_evol, "bvar_evol", true); dump.add(v2d, "v2d_evol", true); @@ -63,6 +71,8 @@ int main(int argc, char **argv) { if (check_double_add) { // Add all variables twice to check this does not cause an error dump.add(ivar, "ivar", false); + dump.add(ivar_vec, "ivar_vec"); + dump.add(svar, "svar"); dump.add(rvar, "rvar", false); dump.add(bvar, "bvar", false); dump.add(f2d, "f2d", false); @@ -70,6 +80,8 @@ int main(int argc, char **argv) { dump.add(fperp, "fperp", false); dump.add(fperp2, "fperp2", false); dump.add(ivar_evol, "ivar_evol", true); + dump.add(ivar_vec_evol, "ivar_vec_evol"); + dump.add(svar_evol, "svar_evol"); dump.add(rvar_evol, "rvar_evol", true); dump.add(bvar_evol, "bvar_evol", true); dump.add(v2d, "v2d_evol", true); @@ -81,6 +93,12 @@ int main(int argc, char **argv) { if (check_incorrect_add == "ivar") { int dummy = 0; dump.add(dummy, "ivar", false); + } else if (check_incorrect_add == "ivar_vec") { + std::vector dummy = {-1}; + dump.add(dummy, "ivar_vec", false); + } else if (check_incorrect_add == "svar") { + std::string dummy = "y"; + dump.add(dummy, "svar", false); } else if (check_incorrect_add == "rvar") { BoutReal dummy = 0.0; dump.add(dummy, "rvar", false); @@ -99,6 +117,12 @@ int main(int argc, char **argv) { } else if (check_incorrect_add == "ivar_evol") { int dummy = 0; dump.add(dummy, "ivar_evol", true); + } else if (check_incorrect_add == "ivar_vec_evol") { + std::vector dummy = {-1}; + dump.add(dummy, "ivar_vec_evol", false); + } else if (check_incorrect_add == "svar_evol") { + std::string dummy = "y"; + dump.add(dummy, "svar_evol", false); } else if (check_incorrect_add == "rvar_evol") { BoutReal dummy = 0.0; dump.add(dummy, "rvar_evol", true); @@ -119,6 +143,8 @@ int main(int argc, char **argv) { bvar_evol = bvar; for(int i=0;i<3;i++) { ivar_evol = ivar + i; + ivar_vec_evol[0] += i; ivar_vec_evol[1] += i; ivar_vec_evol[2] += i; + svar_evol[0] += i; svar_evol[1] += i; svar_evol[2] += i; rvar_evol = rvar + 0.5 * i; bvar_evol = !bvar_evol; v2d.x = v2d.y = v2d.z = f2d; diff --git a/tests/integrated/test-io_hdf5/data/benchmark.out.0.hdf5 b/tests/integrated/test-io_hdf5/data/benchmark.out.0.hdf5 index a4f4b905ff43eeef121eb3c9354be253813375b0..80c74f7ad0e3a0984b82062c4c618a2215bece43 100644 GIT binary patch delta 2243 zcmds2T}TvB6h3$E%~pzI(oN_S~6` z*sODEE{12r#>=t?N^V92{=8P)wQto=%I2G=f_OTJkrTv9(PiYd&b2G8Mn>3VO8sOQ zMT|Ki&y{i6nQ2!VJ+^6>!RNK3%KK&nUO+4?|F9*-L6G7qunP%?uR3?lrRInX;)w6= zx}W>_$isM^ui@6aL=H59N*3c0gsk;1YN2n|Ft~|Ae&V>*au#>gTpLf0B*s%+0uH2{ zNR1B-P-U0c-zx*#x4)}#)iGCbYSZCkBZ zt&QP?wPLH+ovlUfm&Y-e)uIvPu^P&?5@Shl)Nkh2)n z0qBoDwWJ>x_%aplu|u587kO{8xg_iHLcCewC*R_AG}?)}f34iXn2!Z!BgcHT8B zKtcH-U0zH|zvi(%Jkj)4!PIuUsN4=fbTD`hk-b7MSC<3;AIUR{OK4w=4UGWE=yKQ9 zC&q*`tk|uF6R1KIW_M4AxCKIPPs=cJ#So&s`w@qu8fBl^D2f|Er#tMyX!DYrvdLKS zj{aCq08G%5-@CXU?ch4>pz)B~$s7e8uDIwO9Q{5=@3WK9W>172w_|Fj9@#P?5hFb5 zCJoT(;%);=eNyOUGfI6*=w%J1zDMX~O{Kn9=w*$Cz8<>8hAgghka}58sox#~!tV?% zYjNNz+m@L!mu#7Z&X(C3vSpTAw#;(MmNm*$bYGvzOl1F2C;qTL`xLO#(Je1AqoywV z&~7w72Z(8e{uj>aSTQ5Aapj7|pM;NmYw)X*uZxkd@R3o!d*rh*6_693aSl*T|2Q>& zkIk=>o(0j{JoaSe+r1+&&?hWJY&wl19=)r(JJr*h?4;E&kEyO1q&wA>ctWX{mlIEh zexxifr%`rU@r%p7yqL^v-4g<=DfEyRw3Ers{s9=gF_g~wbakib$$NVH(!*RkObU#` d{tc*?NBaN( delta 223 zcmeC%uG4WvV}b@VhZfgH?N%n?156BH&;X?kOqeHYPw{OQU~U&+X522o%%m2{$U5CT zhAD9SjTj~mw#f~Q0h<-L6$F+$6f+%~?l70pVf&6FTn3Dsj1ZF;95#PsNoU!vaG0rD zpnbMC^Y+=^EbZyjyYyHErW>7R$(TNAGmFynJ*Qbbr~lo|qPkt{49gEOu=IAIv`7*o z%XD{LR?X=LbXiY;Y@4p2%^I=&fgbCG)aeVRvRX{PGmn*{eZv%1AZ7z%b|B{1zF`Vy G#8d#2+DHBX diff --git a/tests/integrated/test-io_hdf5/runtest b/tests/integrated/test-io_hdf5/runtest index 1db181dcc9..1dc9ac1e4f 100755 --- a/tests/integrated/test-io_hdf5/runtest +++ b/tests/integrated/test-io_hdf5/runtest @@ -9,6 +9,7 @@ from boututils.run_wrapper import shell, shell_safe, launch_safe from boutdata.collect import collect +from boututils.datafile import DataFile import numpy as np from sys import stdout, exit @@ -20,7 +21,8 @@ shell_safe("make > make.log") # Read benchmark values vars = ['ivar', 'rvar', 'bvar', 'f2d', 'f3d', 'fperp', 'fperp2', 'ivar_evol', 'rvar_evol', - 'bvar_evol', 'v2d_evol_x', 'v2d_evol_y', 'v2d_evol_z', 'fperp2_evol'] + 'bvar_evol', 'v2d_evol_x', 'v2d_evol_y', 'v2d_evol_z', 'fperp2_evol', + "ivar_vec", "svar", "ivar_vec_evol", "svar_evol"] field_vars = ['f2d', 'f3d', 'fperp', 'fperp2', 'v2d_evol_x', 'v2d_evol_y', 'v2d_evol_z', 'fperp2_evol'] # Field quantities, not scalars @@ -30,13 +32,21 @@ tol = 1e-10 print("Reading benchmark data") bmk = {} for v in vars: - bmk[v] = collect(v, path="data", prefix="benchmark.out", info=False) + if "ivar_vec" in v or "svar" in v: + f = DataFile("data/benchmark.out.0.hdf5") + bmk[v] = f[v][...] + else: + bmk[v] = collect(v, path="data", prefix="benchmark.out", info=False) def check_output(): success = True for v in vars: stdout.write(" Checking variable "+v+" ... ") - result = collect(v, path="data", info=False) + if "ivar_vec" in v or "svar" in v: + f = DataFile("data/BOUT.dmp.0.hdf5") + result = f[v][...] + else: + result = collect(v, path="data", info=False) # Compare benchmark and output if np.shape(bmk[v]) != np.shape(result): @@ -44,11 +54,18 @@ def check_output(): success = False continue - diff = np.max(np.abs(bmk[v] - result)) - if diff > tol: - print("Fail, maximum difference = "+str(diff)) - success = False - continue + if result.dtype.kind not in np.typecodes["AllFloat"]: + # Non-float types should be identical + if not np.all(bmk[v] == result): + print("Fail") + success = False + continue + else: + diff = np.max(np.abs(bmk[v] - result)) + if diff > tol: + print("Fail, maximum difference = " + str(diff)) + success = False + continue if v in field_vars: # Check cell location @@ -117,12 +134,14 @@ if not check_output(): # Test incorrectly double-adding variables - should throw exception for check_incorrect_add in [ "ivar", + "ivar_vec", "rvar", "bvar", "f2d", "f3d", "fperp", "ivar_evol", + "ivar_vec_evol", "rvar_evol", "bvar_evol", "v2d_evol", diff --git a/tests/integrated/test-io_hdf5/test_io_hdf5.cxx b/tests/integrated/test-io_hdf5/test_io_hdf5.cxx index 5789767c89..53188846fd 100644 --- a/tests/integrated/test-io_hdf5/test_io_hdf5.cxx +++ b/tests/integrated/test-io_hdf5/test_io_hdf5.cxx @@ -16,6 +16,10 @@ int main(int argc, char **argv) { // Variables to be read and written int ivar, ivar_evol; + std::vector ivar_vec = {1, 2, 3}; + std::vector ivar_vec_evol = {4, 5, 6}; + std::string svar = "ab"; + std::string svar_evol = "cde"; BoutReal rvar, rvar_evol; bool bvar, bvar_evol; Field2D f2d; @@ -45,6 +49,8 @@ int main(int argc, char **argv) { // Non-evolving variables dump.add(ivar, "ivar", false); + dump.add(ivar_vec, "ivar_vec"); + dump.add(svar, "svar"); dump.add(rvar, "rvar", false); dump.add(bvar, "bvar", false); dump.add(f2d, "f2d", false); @@ -54,6 +60,8 @@ int main(int argc, char **argv) { // Evolving variables dump.add(ivar_evol, "ivar_evol", true); + dump.add(ivar_vec_evol, "ivar_vec_evol", true); + dump.add(svar_evol, "svar_evol", true); dump.add(rvar_evol, "rvar_evol", true); dump.add(bvar_evol, "bvar_evol", true); dump.add(v2d, "v2d_evol", true); @@ -63,6 +71,8 @@ int main(int argc, char **argv) { if (check_double_add) { // Add all variables twice to check this does not cause an error dump.add(ivar, "ivar", false); + dump.add(ivar_vec, "ivar_vec"); + dump.add(svar, "svar"); dump.add(rvar, "rvar", false); dump.add(bvar, "bvar", false); dump.add(f2d, "f2d", false); @@ -70,6 +80,8 @@ int main(int argc, char **argv) { dump.add(fperp, "fperp", false); dump.add(fperp2, "fperp2", false); dump.add(ivar_evol, "ivar_evol", true); + dump.add(ivar_vec_evol, "ivar_vec_evol"); + dump.add(svar_evol, "svar_evol"); dump.add(rvar_evol, "rvar_evol", true); dump.add(bvar_evol, "bvar_evol", true); dump.add(v2d, "v2d_evol", true); @@ -81,6 +93,12 @@ int main(int argc, char **argv) { if (check_incorrect_add == "ivar") { int dummy = 0; dump.add(dummy, "ivar", false); + } else if (check_incorrect_add == "ivar_vec") { + std::vector dummy = {-1}; + dump.add(dummy, "ivar_vec", false); + } else if (check_incorrect_add == "svar") { + std::string dummy = "y"; + dump.add(dummy, "svar", false); } else if (check_incorrect_add == "rvar") { BoutReal dummy = 0.0; dump.add(dummy, "rvar", false); @@ -99,6 +117,12 @@ int main(int argc, char **argv) { } else if (check_incorrect_add == "ivar_evol") { int dummy = 0; dump.add(dummy, "ivar_evol", true); + } else if (check_incorrect_add == "ivar_vec_evol") { + std::vector dummy = {-1}; + dump.add(dummy, "ivar_vec_evol", false); + } else if (check_incorrect_add == "svar_evol") { + std::string dummy = "y"; + dump.add(dummy, "svar_evol", false); } else if (check_incorrect_add == "rvar_evol") { BoutReal dummy = 0.0; dump.add(dummy, "rvar_evol", true); @@ -119,6 +143,8 @@ int main(int argc, char **argv) { bvar_evol = bvar; for(int i=0;i<3;i++) { ivar_evol = ivar + i; + ivar_vec_evol[0] += i; ivar_vec_evol[1] += i; ivar_vec_evol[2] += i; + svar_evol[0] += i; svar_evol[1] += i; svar_evol[2] += i; rvar_evol = rvar + 0.5 * i; bvar_evol = !bvar_evol; v2d.x = v2d.y = v2d.z = f2d; From 67240e61668d68d13aaebec5d45f498910abf4d1 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 25 Nov 2020 13:32:02 +0000 Subject: [PATCH 096/428] Add missing .c_str() calls in CheckGridValue() --- src/fileio/datafile.cxx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index 42b40bcdba..08caa4005c 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -934,12 +934,12 @@ void checkGridValue(DataFormat* file, const std::string& name, const std::string& filename, const int reference_value) { int file_value; if (!file->read(&file_value, name)) { - throw BoutException("Could not read %s from file '%s'", name, filename); + throw BoutException("Could not read %s from file '%s'", name.c_str(), filename.c_str()); } if (file_value != reference_value) { - throw BoutException("%s (%i) in file '%s' does not match value in mesh (%i)", name, - file_value, filename, reference_value); + throw BoutException("%s (%i) in file '%s' does not match value in mesh (%i)", name.c_str(), + file_value, filename.c_str(), reference_value); } } From d1245c37d68c7a97dbf93b926caf7adbf0102a3f Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 25 Nov 2020 13:48:25 +0000 Subject: [PATCH 097/428] Fix expected value of ny in Datafile checks --- src/fileio/datafile.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index 08caa4005c..6de9489167 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -950,7 +950,7 @@ void checkFileGrid(DataFormat* file, const std::string& filename, const Mesh* me checkGridValue(file, "MZG", filename, mesh->zstart); // nx includes boundaries checkGridValue(file, "nx", filename, mesh->GlobalNx); - checkGridValue(file, "ny", filename, mesh->GlobalNy); + checkGridValue(file, "ny", filename, mesh->GlobalNy - 2*mesh->ystart); checkGridValue(file, "nz", filename, mesh->LocalNz); } } // namespace From 750fdcc818bce9f84372069a21e1cefc7563c3bf Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 10 Dec 2020 14:32:38 +0000 Subject: [PATCH 098/428] Fix yindex_global in benchmark data for test-io and test-io_hdf5 --- .../test-io/data/benchmark.out.0.nc | Bin 159445 -> 159617 bytes .../test-io_hdf5/data/benchmark.out.0.hdf5 | Bin 727950 -> 728190 bytes 2 files changed, 0 insertions(+), 0 deletions(-) diff --git a/tests/integrated/test-io/data/benchmark.out.0.nc b/tests/integrated/test-io/data/benchmark.out.0.nc index bae42030c77c72c226479e73bb9e4aa0f4b5e178..a8746aa0b821e89b744605a7b19d00687927a7a8 100644 GIT binary patch delta 219 zcmcb5m$UIc=L8L=#{7+1GA)d3lMTbNCvR#IV~O~1^62I>Et1k;F{w!lDp^eT{`S~h zyWk|hgn$480}I2|JSGMZ2m_NqLT~az86y_|WLl=52xF3-ygw>%@}?Fsmi$Sai#DHWk(34tPg+pPqI6%I zXLId>ll-haE({DT(Bn9*gguL$_rykYD14OEv8m2=B-*RKTb_o+RWm{+$YUFJ$o~Y`t(_+S(I2x z3@^n`KX95QYx|3}tpC`yE9_=r5?HSEn5BY|e|onbtM2rGXjY5q6QWsNSSo>PCvRYO zn4VC~$~s*(hLx9@gMneYY7A?&<@ACntPVl~3<3-wz$U{DrYjhx|C-AB4J4_=s4%&Z zQFXdPH>*~A)O1!LW&>h&Am#vK&h1gtxlCH7b7*m?hyqOo0tYb32qqaAc&9sRaoH1+ F2LSFCLU8~9 delta 180 zcmeyjL#J=M&IApnKIulSRxQR>Ev8m2=B-*RKTb{W(qk2vZnT+2Z~6vTR?W$k%!1P= zon}#*ZlllYIsNZx7S+w1Cg0e$E9_=r5?HSEn5BY|cY3!TtM2rGXjY5q9onqQ(-%hr zHLhWHnJy5+DzsfIhPBL+m50HCfnhqY7MC$AHv->k=d>M(%da>8wD^ T2E^<@%mKul+cl?irM3V7w!$`H From a1800a32e2f46eddacf9958291d6aa295e4f7557 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 10 Dec 2020 22:10:01 +0000 Subject: [PATCH 099/428] Fix a couple of clang-tidy warnings --- include/bout/solver.hxx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index 39529d3a5e..6f1396f969 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -382,14 +382,16 @@ protected: /// Vectors of diagnostic variables to save std::vector> diagnostic_int; std::vector> diagnostic_BoutReal; - void add_int_diagnostic(int &i, std::string name, std::string description = "") { + void add_int_diagnostic(int &i, const std::string &name, + const std::string &description = "") { VarStr v; v.var = &i; v.name = name; v.description = description; diagnostic_int.emplace_back(std::move(v)); }; - void add_BoutReal_diagnostic(BoutReal &r, std::string name, std::string description = "") { + void add_BoutReal_diagnostic(BoutReal &r, const std::string &name, + const std::string &description = "") { VarStr v; v.var = &r; v.name = name; From 66a1f881ada48aa211690bb1348edd2a0bbe58fe Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 2 Jan 2021 23:33:53 +0100 Subject: [PATCH 100/428] Fix reading of char* in Ncxx4 --- src/fileio/impls/netcdf4/ncxx4.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fileio/impls/netcdf4/ncxx4.cxx b/src/fileio/impls/netcdf4/ncxx4.cxx index 93b6b7a7e4..aab1855d75 100644 --- a/src/fileio/impls/netcdf4/ncxx4.cxx +++ b/src/fileio/impls/netcdf4/ncxx4.cxx @@ -611,7 +611,7 @@ bool Ncxx4::read(char *data, const char *name, int n) { return false; } - std::vector start = {1}; + std::vector start = {0}; std::vector counts = {size_t(n)}; var.getVar(start, counts, data); From 0e54e49d06756a89e29ad0e2c92b20db3e14e2bf Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 3 Jan 2021 00:53:54 +0100 Subject: [PATCH 101/428] Write descriptions for std::vector and std::string variables --- src/fileio/datafile.cxx | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index 5dda96d361..240861e536 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -1367,6 +1367,20 @@ bool Datafile::write() { } } + // Vectors of integers + for(const auto& var : int_vec_arr) { + if (not var.description.empty()) { + file->setAttribute(var.name, "description", var.description); + } + } + + // String variables + for (const auto& var : string_arr) { + if (not var.description.empty()) { + file->setAttribute(var.name, "description", var.description); + } + } + // BoutReal variables for(const auto& var : BoutReal_arr) { if (not var.description.empty()) { From 8a935d8ecf11d7243ae7985bd0e9937caa10fc52 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 3 Jan 2021 17:38:09 +0100 Subject: [PATCH 102/428] Use bout_type="string" for strings in H5Format ... to be consistent with type definitions in the Python tools introduced in https://github.com/boutproject/boututils/pull/15 --- src/fileio/impls/hdf5/h5_format.cxx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/fileio/impls/hdf5/h5_format.cxx b/src/fileio/impls/hdf5/h5_format.cxx index 11e5092ed9..f56da7046e 100644 --- a/src/fileio/impls/hdf5/h5_format.cxx +++ b/src/fileio/impls/hdf5/h5_format.cxx @@ -260,6 +260,7 @@ bool H5Format::addVar(const std::string &name, bool repeat, hid_t write_hdf5_typ int nd = 0; if (datatype == "scalar") nd = 0; else if (datatype == "vector") nd = 1; + else if (datatype == "string") nd = 1; else if (datatype == "FieldX") nd = 1; else if (datatype == "Field2D") nd = 2; else if (datatype == "FieldPerp") nd = 2; @@ -398,7 +399,7 @@ bool H5Format::addVarIntVec(const std::string &name, bool repeat, size_t size) { } bool H5Format::addVarString(const std::string &name, bool repeat, size_t size) { - return addVar(name, repeat, H5T_C_S1, "vector", size); + return addVar(name, repeat, H5T_C_S1, "string", size); } bool H5Format::addVarBoutReal(const std::string &name, bool repeat) { From 25364f95aae0914d63cc5b6a6b6ec6cb64a42284 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 3 Jan 2021 17:51:20 +0100 Subject: [PATCH 103/428] clang-tidy fixes --- src/fileio/impls/hdf5/h5_format.cxx | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/fileio/impls/hdf5/h5_format.cxx b/src/fileio/impls/hdf5/h5_format.cxx index f56da7046e..e16743feb1 100644 --- a/src/fileio/impls/hdf5/h5_format.cxx +++ b/src/fileio/impls/hdf5/h5_format.cxx @@ -258,14 +258,19 @@ bool H5Format::addVar(const std::string &name, bool repeat, hid_t write_hdf5_typ } int nd = 0; - if (datatype == "scalar") nd = 0; - else if (datatype == "vector") nd = 1; - else if (datatype == "string") nd = 1; - else if (datatype == "FieldX") nd = 1; - else if (datatype == "Field2D") nd = 2; - else if (datatype == "FieldPerp") nd = 2; - else if (datatype == "Field3D") nd = 3; - else throw BoutException("Unrecognized datatype '"+datatype+"'"); + if (datatype == "scalar") { + nd = 0; + } else if (datatype == "vector" or datatype == "string" or datatype == "FieldX") { + nd = 1; + } else if (datatype == "Field2D") { + nd = 2; + } else if (datatype == "FieldPerp") { + nd = 2; + } else if (datatype == "Field3D") { + nd = 3; + } else throw { + BoutException("Unrecognized datatype '"+datatype+"'"); + } if (repeat) { // add time dimension From 51ad88d8ab261c10df2ba6506849bded41cbbcac Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 3 Jan 2021 18:07:42 +0100 Subject: [PATCH 104/428] Fix typo --- src/fileio/impls/hdf5/h5_format.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/fileio/impls/hdf5/h5_format.cxx b/src/fileio/impls/hdf5/h5_format.cxx index e16743feb1..76748b71cd 100644 --- a/src/fileio/impls/hdf5/h5_format.cxx +++ b/src/fileio/impls/hdf5/h5_format.cxx @@ -268,8 +268,8 @@ bool H5Format::addVar(const std::string &name, bool repeat, hid_t write_hdf5_typ nd = 2; } else if (datatype == "Field3D") { nd = 3; - } else throw { - BoutException("Unrecognized datatype '"+datatype+"'"); + } else { + throw BoutException("Unrecognized datatype '"+datatype+"'"); } if (repeat) { From 63d6aced0c9993637d0c08b3ba2c59c64bdeccc3 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 4 Jan 2021 14:22:09 +0100 Subject: [PATCH 105/428] Replace boutdata and boututils directories with submodules Get boutdata and boututils from the separate git repos (github.com/boutproject/boutdata and github.com/boutproject/boututils), including them as submodules --- .gitmodules | 6 + externalpackages/boutdata | 1 + externalpackages/boututils | 1 + tools/pylib/boutdata | 1 + tools/pylib/boutdata/__init__.py | 11 - tools/pylib/boutdata/cbdtoeqdsk.py | 30 - tools/pylib/boutdata/collect.py | 782 ---------------- tools/pylib/boutdata/data.py | 891 ------------------ tools/pylib/boutdata/gen_surface.py | 154 ---- tools/pylib/boutdata/griddata.py | 493 ---------- tools/pylib/boutdata/input.py | 58 -- tools/pylib/boutdata/mayavi2.py | 118 --- tools/pylib/boutdata/mms.py | 591 ------------ tools/pylib/boutdata/pol_slice.py | 110 --- tools/pylib/boutdata/processor_rearrange.py | 161 ---- tools/pylib/boutdata/restart.py | 828 ----------------- tools/pylib/boutdata/settings.py | 92 -- tools/pylib/boutdata/shiftz.py | 91 -- tools/pylib/boutdata/squashoutput.py | 154 ---- tools/pylib/boututils | 1 + tools/pylib/boututils/View3D.py | 390 -------- tools/pylib/boututils/__init__.py | 39 - tools/pylib/boututils/analyse_equil_2.py | 272 ------ tools/pylib/boututils/anim.py | 115 --- tools/pylib/boututils/ask.py | 53 -- tools/pylib/boututils/boutarray.py | 73 -- tools/pylib/boututils/boutgrid.py | 139 --- tools/pylib/boututils/boutwarnings.py | 19 - tools/pylib/boututils/calculus.py | 252 ------ tools/pylib/boututils/check_scaling.py | 90 -- tools/pylib/boututils/closest_line.py | 14 - tools/pylib/boututils/contour.py | 82 -- tools/pylib/boututils/crosslines.py | 1 - tools/pylib/boututils/datafile.py | 954 -------------------- tools/pylib/boututils/efit_analyzer.py | 420 --------- tools/pylib/boututils/fft_deriv.py | 46 - tools/pylib/boututils/fft_integrate.py | 64 -- tools/pylib/boututils/file_import.py | 27 - tools/pylib/boututils/geqdsk.py | 1 - tools/pylib/boututils/idl_tabulate.py | 15 - tools/pylib/boututils/int_func.py | 53 -- tools/pylib/boututils/linear_regression.py | 27 - tools/pylib/boututils/local_min_max.py | 1 - tools/pylib/boututils/mode_structure.py | 417 --------- tools/pylib/boututils/moment_xyzt.py | 80 -- tools/pylib/boututils/options.py | 165 ---- tools/pylib/boututils/plotdata.py | 90 -- tools/pylib/boututils/plotpolslice.py | 143 --- tools/pylib/boututils/radial_grid.py | 68 -- tools/pylib/boututils/read_geqdsk.py | 92 -- tools/pylib/boututils/run_wrapper.py | 279 ------ tools/pylib/boututils/showdata.py | 702 -------------- tools/pylib/boututils/spectrogram.py | 163 ---- tools/pylib/boututils/surface_average.py | 103 --- tools/pylib/boututils/volume_integral.py | 107 --- tools/pylib/boututils/watch.py | 84 -- 56 files changed, 10 insertions(+), 10204 deletions(-) create mode 160000 externalpackages/boutdata create mode 160000 externalpackages/boututils create mode 120000 tools/pylib/boutdata delete mode 100644 tools/pylib/boutdata/__init__.py delete mode 100644 tools/pylib/boutdata/cbdtoeqdsk.py delete mode 100644 tools/pylib/boutdata/collect.py delete mode 100644 tools/pylib/boutdata/data.py delete mode 100644 tools/pylib/boutdata/gen_surface.py delete mode 100644 tools/pylib/boutdata/griddata.py delete mode 100644 tools/pylib/boutdata/input.py delete mode 100644 tools/pylib/boutdata/mayavi2.py delete mode 100644 tools/pylib/boutdata/mms.py delete mode 100644 tools/pylib/boutdata/pol_slice.py delete mode 100644 tools/pylib/boutdata/processor_rearrange.py delete mode 100644 tools/pylib/boutdata/restart.py delete mode 100644 tools/pylib/boutdata/settings.py delete mode 100644 tools/pylib/boutdata/shiftz.py delete mode 100644 tools/pylib/boutdata/squashoutput.py create mode 120000 tools/pylib/boututils delete mode 100644 tools/pylib/boututils/View3D.py delete mode 100644 tools/pylib/boututils/__init__.py delete mode 100644 tools/pylib/boututils/analyse_equil_2.py delete mode 100755 tools/pylib/boututils/anim.py delete mode 100644 tools/pylib/boututils/ask.py delete mode 100644 tools/pylib/boututils/boutarray.py delete mode 100755 tools/pylib/boututils/boutgrid.py delete mode 100644 tools/pylib/boututils/boutwarnings.py delete mode 100644 tools/pylib/boututils/calculus.py delete mode 100644 tools/pylib/boututils/check_scaling.py delete mode 100644 tools/pylib/boututils/closest_line.py delete mode 100644 tools/pylib/boututils/contour.py delete mode 120000 tools/pylib/boututils/crosslines.py delete mode 100644 tools/pylib/boututils/datafile.py delete mode 100644 tools/pylib/boututils/efit_analyzer.py delete mode 100644 tools/pylib/boututils/fft_deriv.py delete mode 100644 tools/pylib/boututils/fft_integrate.py delete mode 100644 tools/pylib/boututils/file_import.py delete mode 120000 tools/pylib/boututils/geqdsk.py delete mode 100644 tools/pylib/boututils/idl_tabulate.py delete mode 100644 tools/pylib/boututils/int_func.py delete mode 100644 tools/pylib/boututils/linear_regression.py delete mode 120000 tools/pylib/boututils/local_min_max.py delete mode 100644 tools/pylib/boututils/mode_structure.py delete mode 100644 tools/pylib/boututils/moment_xyzt.py delete mode 100644 tools/pylib/boututils/options.py delete mode 100644 tools/pylib/boututils/plotdata.py delete mode 100644 tools/pylib/boututils/plotpolslice.py delete mode 100644 tools/pylib/boututils/radial_grid.py delete mode 100644 tools/pylib/boututils/read_geqdsk.py delete mode 100644 tools/pylib/boututils/run_wrapper.py delete mode 100644 tools/pylib/boututils/showdata.py delete mode 100644 tools/pylib/boututils/spectrogram.py delete mode 100644 tools/pylib/boututils/surface_average.py delete mode 100644 tools/pylib/boututils/volume_integral.py delete mode 100644 tools/pylib/boututils/watch.py diff --git a/.gitmodules b/.gitmodules index 27c98fc7be..81e3dd0948 100644 --- a/.gitmodules +++ b/.gitmodules @@ -7,3 +7,9 @@ [submodule "mpark.variant"] path = externalpackages/mpark.variant url = https://github.com/mpark/variant.git +[submodule "externalpackages/boututils"] + path = externalpackages/boututils + url = https://github.com/boutproject/boututils.git +[submodule "externalpackages/boutdata"] + path = externalpackages/boutdata + url = https://github.com/boutproject/boutdata.git diff --git a/externalpackages/boutdata b/externalpackages/boutdata new file mode 160000 index 0000000000..0b849bd326 --- /dev/null +++ b/externalpackages/boutdata @@ -0,0 +1 @@ +Subproject commit 0b849bd3263574afd1d468e56a116d2956896fac diff --git a/externalpackages/boututils b/externalpackages/boututils new file mode 160000 index 0000000000..1db58c0701 --- /dev/null +++ b/externalpackages/boututils @@ -0,0 +1 @@ +Subproject commit 1db58c0701823ca5ddb67c9b29be1643b3c604b6 diff --git a/tools/pylib/boutdata b/tools/pylib/boutdata new file mode 120000 index 0000000000..1aa0a53311 --- /dev/null +++ b/tools/pylib/boutdata @@ -0,0 +1 @@ +../../externalpackages/boutdata/boutdata/ \ No newline at end of file diff --git a/tools/pylib/boutdata/__init__.py b/tools/pylib/boutdata/__init__.py deleted file mode 100644 index 17b97e7aca..0000000000 --- a/tools/pylib/boutdata/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -""" Routines for exchanging data to/from BOUT++ """ - -try: - from builtins import str -except ImportError: - raise ImportError("Please install the future module to use Python 2") - -# Import this, as this almost always used when calling this package -from boutdata.collect import collect, attributes - -__all__ = ["attributes", "collect", "gen_surface", "pol_slice"] diff --git a/tools/pylib/boutdata/cbdtoeqdsk.py b/tools/pylib/boutdata/cbdtoeqdsk.py deleted file mode 100644 index 90d54e29b5..0000000000 --- a/tools/pylib/boutdata/cbdtoeqdsk.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import print_function -from boututils.file_import import file_import -from bunch import Bunch -import numpy as np - - -def cbmtogeqdsk(g): - gg=Bunch() - gg.r=g['Rxy'] - gg.z=g['Zxy'] - gg.psi=g['psi'] - gg.pres=g['mu0p'] - gg.qpsi=g['qsafe'] - gg.fpol=g['f'] - gg.nx=g['nx'] - gg.ny=g['ny'] - i=np.argwhere(g['mu0p']==0) - - gg.simagx=gg.psi.min() - gg.sibdry=gg.psi[i[0]] - gg.xlim=0 - gg.ylim=0 - gg.nlim=0 - - return gg - -if __name__ == '__main__': - gfile='../cbm18_dens8.dskgato.cdl' - g=file_import(gfile) - print(cbmtogeqdsk(g)) diff --git a/tools/pylib/boutdata/collect.py b/tools/pylib/boutdata/collect.py deleted file mode 100644 index 0a965c8e80..0000000000 --- a/tools/pylib/boutdata/collect.py +++ /dev/null @@ -1,782 +0,0 @@ -from __future__ import print_function -from __future__ import division - -from builtins import str, range - -import os -import sys -import glob - -import numpy as np - -from boututils.datafile import DataFile -from boututils.boutarray import BoutArray - - -def findVar(varname, varlist): - """Find variable name in a list - - First does case insensitive comparison, then - checks for abbreviations. - - Returns the matched string, or raises a ValueError - - Parameters - ---------- - varname : str - Variable name to look for - varlist : list of str - List of possible variable names - - Returns - ------- - str - The closest match to varname in varlist - - """ - # Try a variation on the case - v = [name for name in varlist if name.lower() == varname.lower()] - if len(v) == 1: - # Found case match - print("Variable '%s' not found. Using '%s' instead" % (varname, v[0])) - return v[0] - elif len(v) > 1: - print("Variable '"+varname + - "' not found, and is ambiguous. Could be one of: "+str(v)) - raise ValueError("Variable '"+varname+"' not found") - - # None found. Check if it's an abbreviation - v = [name for name in varlist - if name[:len(varname)].lower() == varname.lower()] - if len(v) == 1: - print("Variable '%s' not found. Using '%s' instead" % (varname, v[0])) - return v[0] - - if len(v) > 1: - print("Variable '"+varname + - "' not found, and is ambiguous. Could be one of: "+str(v)) - raise ValueError("Variable '"+varname+"' not found") - - -def _convert_to_nice_slice(r, N, name="range"): - """Convert r to a "sensible" slice in range [0, N] - - If r is None, the slice corresponds to the full range. - - Lists or tuples of one or two ints are converted to slices. - - Slices with None for one or more arguments have them replaced with - sensible values. - - Private helper function for collect - - Parameters - ---------- - r : None, int, slice or list of int - Range-like to check/convert to slice - N : int - Size of range - name : str, optional - Name of range for error message - - Returns - ------- - slice - "Sensible" slice with no Nones for start, stop or step - """ - - if N == 0: - raise ValueError("No data available in %s"%name) - if r is None: - temp_slice = slice(N) - elif isinstance(r, slice): - temp_slice = r - elif isinstance(r, (int, np.integer)): - if r >= N or r <-N: - # raise out of bounds error as if we'd tried to index the array with r - # without this, would return an empty array instead - raise IndexError(name+" index out of range, value was "+str(r)) - elif r == -1: - temp_slice = slice(r, None) - else: - temp_slice = slice(r, r + 1) - elif len(r) == 0: - return _convert_to_nice_slice(None, N, name) - elif len(r) == 1: - return _convert_to_nice_slice(r[0], N, name) - elif len(r) == 2: - r2 = list(r) - if r2[0] < 0: - r2[0] += N - if r2[1] < 0: - r2[1] += N - if r2[0] > r2[1]: - raise ValueError("{} start ({}) is larger than end ({})" - .format(name, *r2)) - # Lists uses inclusive end, we need exclusive end - temp_slice = slice(r2[0], r2[1] + 1) - else: - raise ValueError("Couldn't convert {} ('{}') to slice. Please pass a " - "slice(start, stop, step) if you need to set a step." - .format(name, r)) - - # slice.indices converts None to actual values - return slice(*temp_slice.indices(N)) - - -def collect(varname, xind=None, yind=None, zind=None, tind=None, path=".", - yguards=False, xguards=True, info=True, prefix="BOUT.dmp", - strict=False, tind_auto=False, datafile_cache=None): - """Collect a variable from a set of BOUT++ outputs. - - Parameters - ---------- - varname : str - Name of the variable - xind, yind, zind, tind : int, slice or list of int, optional - Range of X, Y, Z or time indices to collect. Either a single - index to collect, a list containing [start, end] (inclusive - end), or a slice object (usual python indexing). Default is to - fetch all indices - path : str, optional - Path to data files (default: ".") - prefix : str, optional - File prefix (default: "BOUT.dmp") - yguards : bool, optional - Collect Y boundary guard cells? (default: False) - xguards : bool, optional - Collect X boundary guard cells? (default: True) - (Set to True to be consistent with the definition of nx) - info : bool, optional - Print information about collect? (default: True) - strict : bool, optional - Fail if the exact variable name is not found? (default: False) - tind_auto : bool, optional - Read all files, to get the shortest length of time_indices. - Useful if writing got interrupted (default: False) - datafile_cache : datafile_cache_tuple, optional - Optional cache of open DataFile instances: namedtuple as returned - by create_cache. Used by BoutOutputs to pass in a cache so that we - do not have to re-open the dump files to read another variable - (default: None) - - Examples - -------- - - >>> collect(name) - BoutArray([[[[...]]]]) - - """ - - if datafile_cache is None: - # Search for BOUT++ dump files - file_list, parallel, _ = findFiles(path, prefix) - else: - parallel = datafile_cache.parallel - file_list = datafile_cache.file_list - - def getDataFile(i): - """Get the DataFile from the cache, if present, otherwise open the - DataFile - - """ - if datafile_cache is not None: - return datafile_cache.datafile_list[i] - else: - return DataFile(file_list[i]) - - if parallel: - if info: - print("Single (parallel) data file") - - f = getDataFile(0) - - dimensions = f.dimensions(varname) - - try: - mxg = f["MXG"] - except KeyError: - mxg = 0 - print("MXG not found, setting to {}".format(mxg)) - try: - myg = f["MYG"] - except KeyError: - myg = 0 - print("MYG not found, setting to {}".format(myg)) - - if xguards: - nx = f["nx"] - else: - nx = f["nx"] - 2*mxg - if yguards: - ny = f["ny"] + 2*myg - else: - ny = f["ny"] - nz = f["MZ"] - t_array = f.read("t_array") - if t_array is None: - nt = 1 - t_array = np.zeros(1) - else: - try: - nt = len(t_array) - except TypeError: - # t_array is not an array here, which probably means it was a - # one-element array and has been read as a scalar. - nt = 1 - - xind = _convert_to_nice_slice(xind, nx, "xind") - yind = _convert_to_nice_slice(yind, ny, "yind") - zind = _convert_to_nice_slice(zind, nz, "zind") - tind = _convert_to_nice_slice(tind, nt, "tind") - - if not xguards: - xind = slice(xind.start+mxg, xind.stop+mxg, xind.step) - if not yguards: - yind = slice(yind.start+myg, yind.stop+myg, yind.step) - - if len(dimensions) == (): - ranges = [] - elif dimensions == ('t'): - ranges = [tind] - elif dimensions == ('x', 'y'): - # Field2D - ranges = [xind, yind] - elif dimensions == ('x', 'z'): - # FieldPerp - ranges = [xind, zind] - elif dimensions == ('t', 'x', 'y'): - # evolving Field2D - ranges = [tind, xind, yind] - elif dimensions == ('t', 'x', 'z'): - # evolving FieldPerp - ranges = [tind, xind, zind] - elif dimensions == ('x', 'y', 'z'): - # Field3D - ranges = [xind, yind, zind] - elif dimensions == ('t', 'x', 'y', 'z'): - # evolving Field3D - ranges = [tind, xind, yind, zind] - else: - raise ValueError("Variable has incorrect dimensions ({})" - .format(dimensions)) - - data = f.read(varname, ranges) - var_attributes = f.attributes(varname) - return BoutArray(data, attributes=var_attributes) - - nfiles = len(file_list) - - # Read data from the first file - f = getDataFile(0) - - if varname not in f.keys(): - if strict: - raise ValueError("Variable '{}' not found".format(varname)) - else: - varname = findVar(varname, f.list()) - - dimensions = f.dimensions(varname) - - var_attributes = f.attributes(varname) - ndims = len(dimensions) - - # ndims is 0 for reals, and 1 for f.ex. t_array - if ndims == 0: - # Just read from file - data = f.read(varname) - if datafile_cache is None: - # close the DataFile if we are not keeping it in a cache - f.close() - return BoutArray(data, attributes=var_attributes) - - if ndims > 4: - raise ValueError("ERROR: Too many dimensions") - - def load_and_check(varname): - var = f.read(varname) - if var is None: - raise ValueError("Missing " + varname + " variable") - return var - - mxsub = load_and_check("MXSUB") - mysub = load_and_check("MYSUB") - mz = load_and_check("MZ") - mxg = load_and_check("MXG") - myg = load_and_check("MYG") - t_array = f.read("t_array") - if t_array is None: - nt = 1 - t_array = np.zeros(1) - else: - try: - nt = len(t_array) - except TypeError: - # t_array is not an array here, which probably means it was a - # one-element array and has been read as a scalar. - nt = 1 - if tind_auto: - for i in range(nfiles): - t_array_ = getDataFile(i).read("t_array") - nt = min(len(t_array_), nt) - - if info: - print("mxsub = %d mysub = %d mz = %d\n" % (mxsub, mysub, mz)) - - # Get the version of BOUT++ (should be > 0.6 for NetCDF anyway) - try: - version = f["BOUT_VERSION"] - except KeyError: - print("BOUT++ version : Pre-0.2") - version = 0 - if version < 3.5: - # Remove extra point - nz = mz-1 - else: - nz = mz - - # Fallback to sensible (?) defaults - try: - nxpe = f["NXPE"] - except KeyError: - nxpe = 1 - print("NXPE not found, setting to {}".format(nxpe)) - try: - nype = f["NYPE"] - except KeyError: - nype = nfiles - print("NYPE not found, setting to {}".format(nype)) - - npe = nxpe * nype - if info: - print("nxpe = %d, nype = %d, npe = %d\n" % (nxpe, nype, npe)) - if npe < nfiles: - print("WARNING: More files than expected (" + str(npe) + ")") - elif npe > nfiles: - print("WARNING: Some files missing. Expected " + str(npe)) - - if xguards: - nx = nxpe * mxsub + 2*mxg - else: - nx = nxpe * mxsub - - if yguards: - ny = mysub * nype + 2*myg - else: - ny = mysub * nype - - xind = _convert_to_nice_slice(xind, nx, "xind") - yind = _convert_to_nice_slice(yind, ny, "yind") - zind = _convert_to_nice_slice(zind, nz, "zind") - tind = _convert_to_nice_slice(tind, nt, "tind") - - xsize = xind.stop - xind.start - ysize = yind.stop - yind.start - zsize = int(np.ceil(float(zind.stop - zind.start)/zind.step)) - tsize = int(np.ceil(float(tind.stop - tind.start)/tind.step)) - - if ndims == 1: - if tind is None: - data = f.read(varname) - else: - data = f.read(varname, ranges=[tind]) - if datafile_cache is None: - # close the DataFile if we are not keeping it in a cache - f.close() - return BoutArray(data, attributes=var_attributes) - - if datafile_cache is None: - # close the DataFile if we are not keeping it in a cache - f.close() - - # Map between dimension names and output size - sizes = {'x': xsize, 'y': ysize, 'z': zsize, 't': tsize} - - # Create a list with size of each dimension - ddims = [sizes[d] for d in dimensions] - - # Create the data array - data = np.zeros(ddims) - - if dimensions == ('t', 'x', 'z') or dimensions == ('x', 'z'): - yindex_global = None - # The pe_yind that this FieldPerp is going to be read from - fieldperp_yproc = None - - for i in range(npe): - # Get X and Y processor indices - pe_yind = int(i/nxpe) - pe_xind = i % nxpe - - inrange = True - - if yguards: - # Get local ranges - ystart = yind.start - pe_yind*mysub - ystop = yind.stop - pe_yind*mysub - - # Check lower y boundary - if pe_yind == 0: - # Keeping inner boundary - if ystop <= 0: - inrange = False - if ystart < 0: - ystart = 0 - else: - if ystop < myg-1: - inrange = False - if ystart < myg: - ystart = myg - - # Upper y boundary - if pe_yind == (nype - 1): - # Keeping outer boundary - if ystart >= (mysub + 2*myg): - inrange = False - if ystop > (mysub + 2*myg): - ystop = (mysub + 2*myg) - else: - if ystart >= (mysub + myg): - inrange = False - if ystop > (mysub + myg): - ystop = (mysub + myg) - - # Calculate global indices - ygstart = ystart + pe_yind * mysub - ygstop = ystop + pe_yind * mysub - - else: - # Get local ranges - ystart = yind.start - pe_yind*mysub + myg - ystop = yind.stop - pe_yind*mysub + myg - - if (ystart >= (mysub + myg)) or (ystop <= myg): - inrange = False # Y out of range - - if ystart < myg: - ystart = myg - if ystop > mysub + myg: - ystop = myg + mysub - - # Calculate global indices - ygstart = ystart + pe_yind * mysub - myg - ygstop = ystop + pe_yind * mysub - myg - - if xguards: - # Get local ranges - xstart = xind.start - pe_xind*mxsub - xstop = xind.stop - pe_xind*mxsub - - # Check lower x boundary - if pe_xind == 0: - # Keeping inner boundary - if xstop <= 0: - inrange = False - if xstart < 0: - xstart = 0 - else: - if xstop <= mxg: - inrange = False - if xstart < mxg: - xstart = mxg - - # Upper x boundary - if pe_xind == (nxpe - 1): - # Keeping outer boundary - if xstart >= (mxsub + 2*mxg): - inrange = False - if xstop > (mxsub + 2*mxg): - xstop = (mxsub + 2*mxg) - else: - if xstart >= (mxsub + mxg): - inrange = False - if xstop > (mxsub + mxg): - xstop = (mxsub+mxg) - - # Calculate global indices - xgstart = xstart + pe_xind * mxsub - xgstop = xstop + pe_xind * mxsub - - else: - # Get local ranges - xstart = xind.start - pe_xind*mxsub + mxg - xstop = xind.stop - pe_xind*mxsub + mxg - - if (xstart >= (mxsub + mxg)) or (xstop <= mxg): - inrange = False # X out of range - - if xstart < mxg: - xstart = mxg - if xstop > mxsub + mxg: - xstop = mxg + mxsub - - # Calculate global indices - xgstart = xstart + pe_xind * mxsub - mxg - xgstop = xstop + pe_xind * mxsub - mxg - - # Number of local values - nx_loc = xstop - xstart - ny_loc = ystop - ystart - - if not inrange: - continue # Don't need this file - - if info: - sys.stdout.write("\rReading from " + file_list[i] + ": [" + - str(xstart) + "-" + str(xstop-1) + "][" + - str(ystart) + "-" + str(ystop-1) + "] -> [" + - str(xgstart) + "-" + str(xgstop-1) + "][" + - str(ygstart) + "-" + str(ygstop-1) + "]") - - f = getDataFile(i) - - if dimensions == ('t', 'x', 'y', 'z'): - d = f.read(varname, ranges=[tind, - slice(xstart, xstop), - slice(ystart, ystop), - zind]) - data[:, (xgstart-xind.start):(xgstart-xind.start+nx_loc), - (ygstart-yind.start):(ygstart-yind.start+ny_loc), :] = d - elif dimensions == ('x', 'y', 'z'): - d = f.read(varname, ranges=[slice(xstart, xstop), - slice(ystart, ystop), - zind]) - data[(xgstart-xind.start):(xgstart-xind.start+nx_loc), - (ygstart-yind.start):(ygstart-yind.start+ny_loc), :] = d - elif dimensions == ('t', 'x', 'y'): - d = f.read(varname, ranges=[tind, - slice(xstart, xstop), - slice(ystart, ystop)]) - data[:, (xgstart-xind.start):(xgstart-xind.start+nx_loc), - (ygstart-yind.start):(ygstart-yind.start+ny_loc)] = d - elif dimensions == ('t', 'x', 'z'): - # FieldPerp should only be defined on processors which contain its yindex_global - f_attributes = f.attributes(varname) - temp_yindex = f_attributes["yindex_global"] - - if temp_yindex >= 0: - if yindex_global is None: - yindex_global = temp_yindex - - # we have found a file with containing the FieldPerp, get the attributes from here - var_attributes = f_attributes - assert temp_yindex == yindex_global - - if temp_yindex >= 0: - # Check we only read from one pe_yind - assert fieldperp_yproc is None or fieldperp_yproc == pe_yind - - fieldperp_yproc = pe_yind - - d = f.read(varname, ranges=[tind, - slice(xstart, xstop), - zind]) - data[:, (xgstart-xind.start):(xgstart-xind.start+nx_loc), :] = d - elif dimensions == ('x', 'y'): - d = f.read(varname, ranges=[slice(xstart, xstop), - slice(ystart, ystop)]) - data[(xgstart-xind.start):(xgstart-xind.start+nx_loc), - (ygstart-yind.start):(ygstart-yind.start+ny_loc)] = d - elif dimensions == ('x', 'z'): - # FieldPerp should only be defined on processors which contain its yindex_global - f_attributes = f.attributes(varname) - temp_yindex = f_attributes["yindex_global"] - - if temp_yindex >= 0: - if yindex_global is None: - yindex_global = temp_yindex - - # we have found a file with containing the FieldPerp, get the attributes from here - var_attributes = f_attributes - assert temp_yindex == yindex_global - - if temp_yindex >= 0: - # Check we only read from one pe_yind - assert fieldperp_yproc is None or fieldperp_yproc == pe_yind - - fieldperp_yproc = pe_yind - - d = f.read(varname, ranges=[slice(xstart, xstop), zind]) - data[(xgstart-xind.start):(xgstart-xind.start+nx_loc), :] = d - else: - raise ValueError('Incorrect dimensions '+str(dimensions)+' in collect') - - if datafile_cache is None: - # close the DataFile if we are not keeping it in a cache - f.close() - - # if a step was requested in x or y, need to apply it here - if xind.step is not None or yind.step is not None: - if dimensions == ('t', 'x', 'y', 'z'): - data = data[:, ::xind.step, ::yind.step] - elif dimensions == ('x', 'y', 'z'): - data = data[::xind.step, ::yind.step, :] - elif dimensions == ('t', 'x', 'y'): - data = data[:, ::xind.step, ::yind.step] - elif dimensions == ('t', 'x', 'z'): - data = data[:, ::xind.step, :] - elif dimensions == ('x', 'y'): - data = data[::xind.step, ::yind.step] - elif dimensions == ('x', 'z'): - data = data[::xind.step, :] - else: - raise ValueError('Incorrect dimensions '+str(dimensions)+' applying steps in collect') - - # Force the precision of arrays of dimension>1 - if ndims > 1: - try: - data = data.astype(t_array.dtype, copy=False) - except TypeError: - data = data.astype(t_array.dtype) - - # Finished looping over all files - if info: - sys.stdout.write("\n") - return BoutArray(data, attributes=var_attributes) - - -def attributes(varname, path=".", prefix="BOUT.dmp"): - """Return a dictionary of variable attributes in an output file - - Parameters - ---------- - varname : str - Name of the variable - path : str, optional - Path to data files (default: ".") - prefix : str, optional - File prefix (default: "BOUT.dmp") - - Returns - ------- - dict - A dictionary of attributes of varname - """ - # Search for BOUT++ dump files in NetCDF format - file_list, _, _ = findFiles(path, prefix) - - # Read data from the first file - f = DataFile(file_list[0]) - - return f.attributes(varname) - - -def dimensions(varname, path=".", prefix="BOUT.dmp"): - """Return the names of dimensions of a variable in an output file - - Parameters - ---------- - varname : str - Name of the variable - path : str, optional - Path to data files (default: ".") - prefix : str, optional - File prefix (default: "BOUT.dmp") - - Returns - ------- - tuple of strs - The elements of the tuple give the names of corresponding variable - dimensions - - """ - file_list, _, _ = findFiles(path, prefix) - return DataFile(file_list[0]).dimensions(varname) - - -def findFiles(path, prefix): - """Find files matching prefix in path. - - Netcdf (".nc", ".ncdf", ".cdl") and HDF5 (".h5", ".hdf5", ".hdf") - files are searched. - - Parameters - ---------- - path : str - Path to data files - prefix : str - File prefix - - Returns - ------- - tuple : (list of str, bool, str) - The first element of the tuple is the list of files, the second is - whether the files are a parallel dump file and the last element is - the file suffix. - - """ - - # Make sure prefix does not have a trailing . - if prefix[-1] == ".": - prefix = prefix[:-1] - - # Look for parallel dump files - suffixes = [".nc", ".ncdf", ".cdl", ".h5", ".hdf5", ".hdf"] - file_list_parallel = None - suffix_parallel = "" - for test_suffix in suffixes: - files = glob.glob(os.path.join(path, prefix+test_suffix)) - if files: - if file_list_parallel: # Already had a list of files - raise IOError("Parallel dump files with both {0} and {1} extensions are present. Do not know which to read.".format( - suffix, test_suffix)) - suffix_parallel = test_suffix - file_list_parallel = files - - file_list = None - suffix = "" - for test_suffix in suffixes: - files = glob.glob(os.path.join(path, prefix+".*"+test_suffix)) - if files: - if file_list: # Already had a list of files - raise IOError("Dump files with both {0} and {1} extensions are present. Do not know which to read.".format( - suffix, test_suffix)) - suffix = test_suffix - file_list = files - - if file_list_parallel and file_list: - raise IOError("Both regular (with suffix {0}) and parallel (with suffix {1}) dump files are present. Do not know which to read.".format( - suffix_parallel, suffix)) - elif file_list_parallel: - return file_list_parallel, True, suffix_parallel - elif file_list: - # make sure files are in the right order - nfiles = len(file_list) - file_list = [os.path.join(path, prefix+"."+str(i)+suffix) - for i in range(nfiles)] - return file_list, False, suffix - else: - raise IOError("ERROR: No data files found in path {0}".format(path)) - - -def create_cache(path, prefix): - """Create a list of DataFile objects to be passed repeatedly to - collect. - - Parameters - ---------- - path : str - Path to data files - prefix : str - File prefix - - Returns - ------- - namedtuple : (list of str, bool, str, list of :py:obj:`~boututils.datafile.DataFile`) - The cache of DataFiles in a namedtuple along with the file_list, - and parallel and suffix attributes - - """ - - # define namedtuple to return as the result - from collections import namedtuple - datafile_cache_tuple = namedtuple( - "datafile_cache", ["file_list", "parallel", "suffix", "datafile_list"]) - - file_list, parallel, suffix = findFiles(path, prefix) - - cache = [] - for f in file_list: - cache.append(DataFile(f)) - - return datafile_cache_tuple(file_list=file_list, parallel=parallel, suffix=suffix, datafile_list=cache) diff --git a/tools/pylib/boutdata/data.py b/tools/pylib/boutdata/data.py deleted file mode 100644 index 0025ba3034..0000000000 --- a/tools/pylib/boutdata/data.py +++ /dev/null @@ -1,891 +0,0 @@ -"""Provides a class BoutData which makes access to code inputs and -outputs easier. Creates a tree of maps, inspired by approach used in -OMFIT - -""" - -import os -import glob -import numpy -import re - -from boutdata.collect import collect, create_cache -from boututils.boutwarnings import alwayswarn -from boututils.datafile import DataFile - -# These are imported to be used by 'eval' in -# BoutOptions.evaluate_scalar() and BoutOptionsFile.evaluate(). -# Change the names to match those used by C++/BOUT++ -from numpy import (pi, sin, cos, tan, arccos as acos, arcsin as asin, - arctan as atan, arctan2 as atan2, sinh, cosh, tanh, - arcsinh as asinh, arccosh as acosh, arctanh as atanh, - exp, log, log10, power as pow, sqrt, ceil, floor, - round, abs) - - -class BoutOptions(object): - """This class represents a tree structure. Each node (BoutOptions - object) can have several sub-nodes (sections), and several - key-value pairs. - - Parameters - ---------- - name : str, optional - Name of the root section (default: "root") - parent : BoutOptions, optional - A parent BoutOptions object (default: None) - - Examples - -------- - - >>> optRoot = BoutOptions() # Create a root - - Specify value of a key in a section "test" - If the section does not exist then it is created - - >>> optRoot.getSection("test")["key"] = 4 - - Get the value of a key in a section "test" - If the section does not exist then a KeyError is raised - - >>> print(optRoot["test"]["key"]) - 4 - - To pretty print the options - - >>> print(optRoot) - root - |- test - | |- key = 4 - - """ - - def __init__(self, name="root", parent=None): - self._sections = {} - self._keys = {} - self._name = name - self._parent = parent - - def getSection(self, name): - """Return a section object. If the section does not exist then it is - created - - Parameters - ---------- - name : str - Name of the section to get/create - - Returns - ------- - BoutOptions - A new section with the original object as the parent - - """ - name = name.lower() - - if name in self._sections: - return self._sections[name] - else: - newsection = BoutOptions(name, self) - self._sections[name] = newsection - return newsection - - def __getitem__(self, key): - """ - First check if it's a section, then a value - """ - key = key.lower() - if key in self._sections: - return self._sections[key] - - if key not in self._keys: - raise KeyError("Key '%s' not in section '%s'" % (key, self.path())) - return self._keys[key] - - def __setitem__(self, key, value): - """ - Set a key - """ - if len(key) == 0: - return - self._keys[key.lower()] = value - - def path(self): - """Returns the path of this section, joining together names of - parents - - """ - - if self._parent: - return self._parent.path() + ":" + self._name - return self._name - - def keys(self): - """Returns all keys, including sections and values - - """ - return list(self._sections) + list(self._keys) - - def sections(self): - """Return a list of sub-sections - - """ - return self._sections.keys() - - def values(self): - """Return a list of values - - """ - return self._keys.keys() - - def as_dict(self): - """Return a nested dictionary of all the options. - - """ - dicttree = {name:self[name] for name in self.values()} - dicttree.update({name:self[name].as_dict() for name in self.sections()}) - return dicttree - - def __len__(self): - return len(self._sections) + len(self._keys) - - def __iter__(self): - """Iterates over all keys. First values, then sections - - """ - for k in self._keys: - yield k - for s in self._sections: - yield s - - def __str__(self, indent=""): - """Print a pretty version of the options tree - - """ - text = self._name + "\n" - - for k in self._keys: - text += indent + " |- " + k + " = " + str(self._keys[k]) + "\n" - - for s in self._sections: - text += indent + " |- " + self._sections[s].__str__(indent+" | ") - return text - - def evaluate_scalar(self, name): - """ - Evaluate (recursively) scalar expressions - """ - expression = self._substitute_expressions(name) - - # replace ^ with ** so that Python evaluates exponentiation - expression = expression.replace("^", "**") - - return eval(expression) - - def _substitute_expressions(self, name): - expression = str(self[name]).lower() - expression = self._evaluate_section(expression, "") - parent = self._parent - while parent is not None: - sectionname = parent._name - if sectionname is "root": - sectionname = "" - expression = parent._evaluate_section(expression, sectionname) - parent = parent._parent - - return expression - - def _evaluate_section(self, expression, nested_sectionname): - # pass a nested section name so that we can traverse the options tree - # rooted at our own level and each level above us so that we can use - # relatively qualified variable names, e.g. if we are in section - # 'foo:bar:baz' then a variable 'x' from section 'bar' could be called - # 'bar:x' (found traversing the tree starting from 'bar') or - # 'foo:bar:x' (found when traversing tree starting from 'foo'). - for var in self.values(): - if nested_sectionname is not "": - nested_name = nested_sectionname + ":" + var - else: - nested_name = var - if re.search(r"(?>> opts = BoutOptionsFile("BOUT.inp") - >>> print(opts) # Print all options in a tree - root - |- nout = 100 - |- timestep = 2 - ... - - >>> opts["All"]["scale"] # Value "scale" in section "All" - 1.0 - - """ - - def __init__(self, filename="BOUT.inp", name="root", gridfilename=None, nx=None, ny=None, nz=None): - BoutOptions.__init__(self, name) - # Open the file - with open(filename, "r") as f: - # Go through each line in the file - section = self # Start with root section - for linenr, line in enumerate(f.readlines()): - # First remove comments, either # or ; - startpos = line.find("#") - if startpos != -1: - line = line[:startpos] - startpos = line.find(";") - if startpos != -1: - line = line[:startpos] - - # Check section headers - startpos = line.find("[") - endpos = line.find("]") - if startpos != -1: - # A section heading - if endpos == -1: - raise SyntaxError("Missing ']' on line %d" % (linenr,)) - line = line[(startpos+1):endpos].strip() - - section = self - while True: - scorepos = line.find(":") - if scorepos == -1: - break - sectionname = line[0:scorepos] - line = line[(scorepos+1):] - section = section.getSection(sectionname) - section = section.getSection(line) - else: - # A key=value pair - - eqpos = line.find("=") - if eqpos == -1: - # No '=', so just set to true - section[line.strip()] = True - else: - value = line[(eqpos+1):].strip() - try: - # Try to convert to an integer - value = int(value) - except ValueError: - try: - # Try to convert to float - value = float(value) - except ValueError: - # Leave as a string - pass - - section[line[:eqpos].strip()] = value - - try: - # define arrays of x, y, z to be used for substitutions - gridfile = None - nzfromfile = None - if gridfilename: - if nx is not None or ny is not None: - raise ValueError("nx or ny given as inputs even though " - "gridfilename was given explicitly, " - "don't know which parameters to choose") - with DataFile(gridfilename) as gridfile: - self.nx = float(gridfile["nx"]) - self.ny = float(gridfile["ny"]) - try: - nzfromfile = gridfile["MZ"] - except KeyError: - pass - elif nx or ny: - if nx is None: - raise ValueError("nx not specified. If either nx or ny are given, then both must be.") - if ny is None: - raise ValueError("ny not specified. If either nx or ny are given, then both must be.") - self.nx = nx - self.ny = ny - else: - try: - self.nx = self["mesh"].evaluate_scalar("nx") - self.ny = self["mesh"].evaluate_scalar("ny") - except KeyError: - try: - # get nx, ny, nz from output files - from boutdata.collect import findFiles - file_list = findFiles(path=os.path.dirname(), prefix="BOUT.dmp") - with DataFile(file_list[0]) as f: - self.nx = f["nx"] - self.ny = f["ny"] - nzfromfile = f["MZ"] - except (IOError, KeyError): - try: - gridfilename = self["mesh"]["file"] - except KeyError: - gridfilename = self["grid"] - with DataFile(gridfilename) as gridfile: - self.nx = float(gridfile["nx"]) - self.ny = float(gridfile["ny"]) - try: - nzfromfile = float(gridfile["MZ"]) - except KeyError: - pass - if nz is not None: - self.nz = nz - else: - try: - self.nz = self["mesh"].evaluate_scalar("nz") - except KeyError: - try: - self.nz = self.evaluate_scalar("mz") - except KeyError: - if nzfromfile is not None: - self.nz = nzfromfile - mxg = self._keys.get("MXG", 2) - myg = self._keys.get("MYG", 2) - - # make self.x, self.y, self.z three dimensional now so - # that expressions broadcast together properly. - self.x = numpy.linspace((0.5 - mxg)/(self.nx - 2*mxg), - 1. - (0.5 - mxg)/(self.nx - 2*mxg), - self.nx)[:, numpy.newaxis, numpy.newaxis] - self.y = 2.*numpy.pi*numpy.linspace((0.5 - myg)/self.ny, - 1.-(0.5 - myg)/self.ny, - self.ny + 2*myg)[numpy.newaxis, :, numpy.newaxis] - self.z = 2.*numpy.pi*numpy.linspace(0.5/self.nz, - 1.-0.5/self.nz, - self.nz)[numpy.newaxis, numpy.newaxis, :] - except Exception as e: - alwayswarn("While building x, y, z coordinate arrays, an " - "exception occured: " + str(e) + - "\nEvaluating non-scalar options not available") - - def evaluate(self, name): - """Evaluate (recursively) expressions - - Sections and subsections must be given as part of 'name', - separated by colons - - Parameters - ---------- - name : str - Name of variable to evaluate, including sections and - subsections - - """ - section = self - split_name = name.split(":") - for subsection in split_name[:-1]: - section = section.getSection(subsection) - expression = section._substitute_expressions(split_name[-1]) - - # replace ^ with ** so that Python evaluates exponentiation - expression = expression.replace("^", "**") - - # substitute for x, y and z coordinates - for coord in ["x", "y", "z"]: - expression = re.sub(r"\b"+coord.lower()+r"\b", "self."+coord, expression) - - return eval(expression) - - def write(self, filename=None, overwrite=False): - """ Write to BOUT++ options file - - This method will throw an error rather than overwriting an existing - file unless the overwrite argument is set to true. - Note, no comments from the original input file are transferred to the - new one. - - Parameters - ---------- - filename : str - Path of the file to write - (defaults to path of the file that was read in) - overwrite : bool - If False then throw an exception if 'filename' already exists. - Otherwise, just overwrite without asking. - (default False) - """ - if filename is None: - filename = self.filename - - if not overwrite and os.path.exists(filename): - raise ValueError("Not overwriting existing file, cannot write output to "+filename) - - def write_section(basename, opts, f): - if basename: - f.write("["+basename+"]\n") - for key, value in opts._keys.items(): - f.write(key+" = "+str(value)+"\n") - for section in opts.sections(): - section_name = basename+":"+section if basename else section - write_section(section_name, opts[section], f) - - with open(filename, "w") as f: - write_section("", self, f) - - -class BoutOutputs(object): - """Emulates a map class, represents the contents of a BOUT++ dmp - files. Does not allow writing, only reading of data. By default - there is no cache, so each time a variable is read it is - collected; if caching is set to True variables are stored once - they are read. Extra keyword arguments are passed through to - collect. - - Parameters - ---------- - path : str, optional - Path to data files (default: ".") - prefix : str, optional - File prefix (default: "BOUT.dmp") - suffix : str, optional - File suffix (default: None, searches all file extensions) - caching : bool, float, optional - Switches on caching of data, so it is only read into memory - when first accessed (default False) If caching is set to a - number, it gives the maximum size of the cache in GB, after - which entries will be discarded in first-in-first-out order to - prevent the cache getting too big. If the variable being - returned is bigger than the maximum cache size, then the - variable will be returned without being added to the cache, - and the rest of the cache will be left (default: False) - DataFileCaching : bool, optional - Switch for creation of a cache of DataFile objects to be - passed to collect so that DataFiles do not need to be - re-opened to read each variable (default: True) - - **kwargs - keyword arguments that are passed through to _caching_collect() - - Examples - -------- - - >>> d = BoutOutputs(".") # Current directory - >> d.keys() # List all valid keys - ['iteration', - 'zperiod', - 'MYSUB', - ... - ] - - >>> d.dimensions["ne"] # Get the dimensions of the field ne - ('t', 'x', 'y', 'z') - - >>> d["ne"] # Read "ne" from data files - BoutArray([[[[...]]]]) - - >>> d = BoutOutputs(".", prefix="BOUT.dmp", caching=True) # Turn on caching - - """ - - def __init__(self, path=".", prefix="BOUT.dmp", suffix=None, caching=False, - DataFileCaching=True, **kwargs): - """ - Initialise BoutOutputs object - """ - self._path = path - # normalize prefix by removing trailing '.' if present - self._prefix = prefix.rstrip('.') - if suffix == None: - temp_file_list = glob.glob( - os.path.join(self._path, self._prefix + "*")) - latest_file = max(temp_file_list, key=os.path.getctime) - self._suffix = latest_file.split(".")[-1] - else: - # normalize suffix by removing leading '.' if present - self._suffix = suffix.lstrip('.') - self._caching = caching - self._DataFileCaching = DataFileCaching - self._kwargs = kwargs - - # Label for this data - self.label = path - - self._file_list = glob.glob(os.path.join( - path, self._prefix + "*" + self._suffix)) - if not suffix == None: - latest_file = max(self._file_list, key=os.path.getctime) - # if suffix==None we already found latest_file - - # Check that the path contains some data - if len(self._file_list) == 0: - raise ValueError("ERROR: No data files found") - - # Available variables - self.varNames = [] - self.dimensions = {} - self.evolvingVariableNames = [] - - with DataFile(latest_file) as f: - npes = f.read("NXPE")*f.read("NYPE") - if len(self._file_list) != npes: - alwayswarn("Too many data files, reading most recent ones") - if npes == 1: - # single output file - # do like this to catch, e.g. either 'BOUT.dmp.nc' or 'BOUT.dmp.0.nc' - self._file_list = [latest_file] - else: - self._file_list = [os.path.join( - path, self._prefix + "." + str(i) + "." + self._suffix) for i in range(npes)] - - # Get variable names - self.varNames = f.keys() - for name in f.keys(): - dimensions = f.dimensions(name) - self.dimensions[name] = dimensions - if name != "t_array" and "t" in dimensions: - self.evolvingVariableNames.append(name) - - # Private variables - if self._caching: - from collections import OrderedDict - self._datacache = OrderedDict() - if self._caching is not True: - # Track the size of _datacache and limit it to a maximum of _caching - try: - # Check that _caching is a number of some sort - float(self._caching) - except ValueError: - raise ValueError( - "BoutOutputs: Invalid value for caching argument. Caching should be either a number (giving the maximum size of the cache in GB), True for unlimited size or False for no caching.") - self._datacachesize = 0 - self._datacachemaxsize = self._caching*1.e9 - - self._DataFileCache = None - - def keys(self): - """Return a list of available variable names - - """ - return self.varNames - - def evolvingVariables(self): - """Return a list of names of time-evolving variables - - """ - return self.evolvingVariableNames - - def redistribute(self, npes, nxpe=None, mxg=2, myg=2, include_restarts=True): - """Create a new set of dump files for npes processors. - - Useful for restarting simulations using more or fewer processors. - - Existing data and restart files are kept in the directory - "redistribution_backups". redistribute() will fail if this - directory already exists, to avoid overwriting anything - - Parameters - ---------- - npes : int - Number of new files to create - nxpe : int, optional - If nxpe is None (the default), then an 'optimal' number will be - selected automatically - mxg, myg : int, optional - Number of guard cells in x, y (default: 2) - include_restarts : bool, optional - If True, then restart.redistribute will be used to - redistribute the restart files also (default: True) - - """ - from boutdata.processor_rearrange import get_processor_layout, create_processor_layout - from os import rename, path, mkdir - - # use get_processor_layout to get nx, ny - old_processor_layout = get_processor_layout( - DataFile(self._file_list[0]), has_t_dimension=True, mxg=mxg, myg=myg) - old_nxpe = old_processor_layout.nxpe - old_nype = old_processor_layout.nype - old_npes = old_processor_layout.npes - old_mxsub = old_processor_layout.mxsub - old_mysub = old_processor_layout.mysub - nx = old_processor_layout.nx - ny = old_processor_layout.ny - mz = old_processor_layout.mz - mxg = old_processor_layout.mxg - myg = old_processor_layout.myg - - # calculate new processor layout - new_processor_layout = create_processor_layout( - old_processor_layout, npes, nxpe=nxpe) - nxpe = new_processor_layout.nxpe - nype = new_processor_layout.nype - mxsub = new_processor_layout.mxsub - mysub = new_processor_layout.mysub - - # move existing files to backup directory - # don't overwrite backup: os.mkdir will raise exception if directory already exists - backupdir = path.join(self._path, "redistribution_backups") - mkdir(backupdir) - for f in self._file_list: - rename(f, path.join(backupdir, path.basename(f))) - - # create new output files - outfile_list = [] - this_prefix = self._prefix - if not this_prefix[-1] == '.': - # ensure prefix ends with a '.' - this_prefix = this_prefix + "." - for i in range(npes): - outpath = os.path.join( - self._path, this_prefix+str(i)+"."+self._suffix) - if self._suffix.split(".")[-1] in ["nc", "ncdf", "cdl"]: - # set format option to DataFile explicitly to avoid creating netCDF3 files, which can only contain up to 2GB of data - outfile_list.append( - DataFile(outpath, write=True, create=True, format='NETCDF4')) - else: - outfile_list.append(DataFile(outpath, write=True, create=True)) - - # Create a DataFileCache, if needed - if self._DataFileCaching: - DataFileCache = create_cache(backupdir, self._prefix) - else: - DataFileCache = None - # read and write the data - for v in self.varNames: - print("processing "+v) - data = collect(v, path=backupdir, prefix=self._prefix, xguards=True, - yguards=True, info=False, datafile_cache=DataFileCache) - ndims = len(data.shape) - - # write data - for i in range(npes): - ix = i % nxpe - iy = int(i/nxpe) - outfile = outfile_list[i] - if v == "NPES": - outfile.write(v, npes) - elif v == "NXPE": - outfile.write(v, nxpe) - elif v == "NYPE": - outfile.write(v, nype) - elif v == "MXSUB": - outfile.write(v, mxsub) - elif v == "MYSUB": - outfile.write(v, mysub) - elif ndims == 0: - # scalar - outfile.write(v, data) - elif ndims == 1: - # time evolving scalar - outfile.write(v, data) - elif ndims == 2: - # Field2D - if data.shape != (nx + 2*mxg, ny + 2*myg): - # FieldPerp? - # check is not perfect, fails if ny=nz - raise ValueError( - "Error: Found FieldPerp '"+v+"'. This case is not currently handled by BoutOutputs.redistribute().") - outfile.write( - v, data[ix*mxsub:(ix+1)*mxsub+2*mxg, iy*mysub:(iy+1)*mysub+2*myg]) - elif ndims == 3: - # Field3D - if data.shape[:2] != (nx + 2*mxg, ny + 2*myg): - # evolving Field2D, but this case is not handled - # check is not perfect, fails if ny=nx and nx=nt - raise ValueError("Error: Found evolving Field2D '"+v + - "'. This case is not currently handled by BoutOutputs.redistribute().") - outfile.write( - v, data[ix*mxsub:(ix+1)*mxsub+2*mxg, iy*mysub:(iy+1)*mysub+2*myg, :]) - elif ndims == 4: - outfile.write( - v, data[:, ix*mxsub:(ix+1)*mxsub+2*mxg, iy*mysub:(iy+1)*mysub+2*myg, :]) - else: - print( - "ERROR: variable found with unexpected number of dimensions,", ndims) - - for outfile in outfile_list: - outfile.close() - - if include_restarts: - print("processing restarts") - from boutdata import restart - from glob import glob - restart_prefix = "BOUT.restart" - restarts_list = glob(path.join(self._path, restart_prefix+"*")) - - # Move existing restart files to backup directory - for f in restarts_list: - rename(f, path.join(backupdir, path.basename(f))) - - # Redistribute restarts - restart.redistribute(npes, path=backupdir, - nxpe=nxpe, output=self._path, mxg=mxg, myg=myg) - - def _collect(self, *args, **kwargs): - """Wrapper for collect to pass self._DataFileCache if necessary. - - """ - if self._DataFileCaching and self._DataFileCache is None: - # Need to create the cache - self._DataFileCache = create_cache(self._path, self._prefix) - return collect(*args, datafile_cache=self._DataFileCache, **kwargs) - - def __len__(self): - return len(self.varNames) - - def __getitem__(self, name): - """Reads a variable - - Caches result and returns later if called again, if caching is - turned on for this instance - - """ - - if self._caching: - if name not in self._datacache.keys(): - item = self._collect(name, path=self._path, - prefix=self._prefix, **self._kwargs) - if self._caching is not True: - itemsize = item.nbytes - if itemsize > self._datacachemaxsize: - return item - self._datacache[name] = item - self._datacachesize += itemsize - while self._datacachesize > self._datacachemaxsize: - self._removeFirstFromCache() - else: - self._datacache[name] = item - return item - else: - return self._datacache[name] - else: - # Collect the data from the repository - data = self._collect(name, path=self._path, - prefix=self._prefix, **self._kwargs) - return data - - def _removeFirstFromCache(self): - """Pop the first item from the OrderedDict _datacache - - """ - item = self._datacache.popitem(last=False) - self._datacachesize -= item[1].nbytes - - def __iter__(self): - """Iterate through all keys, starting with "options" then going - through all variables for _caching_collect - - """ - for k in self.varNames: - yield k - - def __str__(self, indent=""): - """Print a pretty version of the tree - - """ - text = "" - for k in self.varNames: - text += indent+k+"\n" - - return text - - -def BoutData(path=".", prefix="BOUT.dmp", caching=False, **kwargs): - """Returns a dictionary, containing the contents of a BOUT++ output - directory. - - Does not allow writing, only reading of data. By default there is - no cache, so each time a variable is read it is collected; if - caching is set to True variables are stored once they are read. - - Parameters - ---------- - path : str, optional - Path to data files (default: ".") - prefix : str, optional - File prefix (default: "BOUT.dmp") - caching : bool, float, optional - Switches on caching of data, so it is only read into memory - when first accessed (default False) If caching is set to a - number, it gives the maximum size of the cache in GB, after - which entries will be discarded in first-in-first-out order to - prevent the cache getting too big. If the variable being - returned is bigger than the maximum cache size, then the - variable will be returned without being added to the cache, - and the rest of the cache will be left (default: False) - DataFileCaching : bool, optional - Switch for creation of a cache of DataFile objects to be - passed to collect so that DataFiles do not need to be - re-opened to read each variable (default: True) - **kwargs - Keyword arguments that are passed through to collect() - - Returns - ------- - dict - Contents of a BOUT++ output directory, including options and - output files - - Examples - -------- - - >>> d = BoutData(".") # Current directory - - >>> d.keys() # List all valid keys - - >>> print(d["options"]) # Prints tree of options - - >>> d["options"]["nout"] # Value of nout in BOUT.inp file - - >>> print(d["outputs"]) # Print available outputs - - >>> d["outputs"]["ne"] # Read "ne" from data files - - >>> d = BoutData(".", prefix="BOUT.dmp", caching=True) # Turn on caching - - """ - - data = {} # Map for the result - - data["path"] = path - - # Options from BOUT.inp file - data["options"] = BoutOptionsFile( - os.path.join(path, "BOUT.inp"), name="options") - - # Output from .dmp.* files - data["outputs"] = BoutOutputs( - path, prefix=prefix, caching=caching, **kwargs) - - return data diff --git a/tools/pylib/boutdata/gen_surface.py b/tools/pylib/boutdata/gen_surface.py deleted file mode 100644 index 9b1caf81a0..0000000000 --- a/tools/pylib/boutdata/gen_surface.py +++ /dev/null @@ -1,154 +0,0 @@ -"""Flux surface generator for tokamak grid files - -""" -from __future__ import print_function - -import numpy as np - - -def gen_surface(grid): - """Generator for iterating over flux surfaces - - Parameters - ---------- - grid : DataFile - An input grid file to read to find flux surfaces - - Yields - ------ - tuple : (int, list of int, bool) - A tuple containing the x index, list of y indices and whether - this flux surface is periodic - - """ - # Read the grid data - nx = grid.read("nx") - ny = grid.read("ny") - - npol = grid.read("npol") - if npol is None: - # Domains not stored in file (BOUT style input) - ixseps1 = grid.read("ixseps1") - ixseps2 = grid.read("ixseps2") - jyseps1_1 = grid.read("jyseps1_1") - jyseps1_2 = grid.read("jyseps1_2") - jyseps2_1 = grid.read("jyseps2_1") - jyseps2_2 = grid.read("jyseps2_2") - - if ixseps1 == ixseps2: - # Single null - ndomains = 3 - else: - # Double null - ndomains = 6 - - yup_xsplit = np.zeros(ndomains) - ydown_xsplit = np.zeros(ndomains) - yup_xin = np.zeros(ndomains) - yup_xout = np.zeros(ndomains) - ydown_xin = np.zeros(ndomains) - ydown_xout = np.zeros(ndomains) - - ystart = np.zeros(ndomains+1) - ystart[ndomains] = ny - - # Inner lower leg - ydown_xsplit[0] = -1 - ydown_xout[0] = -1 - yup_xsplit[0] = ixseps1 - yup_xin[0] = ndomains-1 # Outer lower leg - yup_xout[0] = 1 - - # Outer lower leg - ydown_xsplit[ndomains-1] = ixseps1 - ydown_xin[ndomains-1] = 0 - ydown_xout[ndomains-1] = ndomains-2 - yup_xsplit[ndomains-1] = -1 - yup_xout[ndomains-1] = -1 - ystart[ndomains-1] = jyseps2_2+1 - - if ixseps1 == ixseps2: - # Single null - - ydown_xsplit[1] = ixseps1 - ydown_xin[1] = 1 - ydown_xout[1] = 0 - yup_xsplit[1] = ixseps1 - yup_xin[1] = 1 - yup_xout[1] = 2 - ystart[1] = jyseps1_1+1 - else: - # Double null - raise RuntimeError("SORRY - NO DOUBLE NULL YET") - else: - # Use domains stored in the file - ndomains = npol.size # Number of domains - yup_xsplit = grid.read("yup_xsplit") - ydown_xsplit = grid.read("ydown_xsplit") - yup_xin = grid.read("yup_xin") - yup_xout = grid.read("yup_xout") - ydown_xin = grid.read("ydown_xin") - ydown_xout = grid.read("ydown_xout") - - # Calculate starting positions - ystart = np.zeros(ndomains+1) - for i in np.arange(1,ndomains): - ystart[i] = ystart[i-1] + npol[i-1] - ystart[ndomains] = ny - - # Record whether a domain has been visited - visited = np.zeros(ndomains) - - x = 0 # X index - while True: - yinds = None # Y indices result - - # Find a domain which hasn't been visited - domain = None - for i in np.arange(ndomains): - if visited[i] == 0: - domain = i - break - - if domain is None: - # All domains visited - x = x + 1 # Go to next x surface - visited = np.zeros(ndomains) # Clear the visited array - if x == nx: - break # Finished - continue - - # Follow surface back until it hits a boundary - while True: - if x < ydown_xsplit[domain]: - d = ydown_xin[domain] - else: - d = ydown_xout[domain] - if d < 0: - break # Hit boundary - domain = d # Keep going - - # Starting from domain, follow surface - - periodic = False - while domain >= 0: - if visited[domain] == 1: - # Already visited domain -> periodic - periodic = True - break; - # Get range of y indices in this domain - yi = np.arange(ystart[domain], ystart[domain+1]) - if yinds is None: - yinds = yi - else: - yinds = np.concatenate((yinds, yi)) - # mark this domain as visited - visited[domain] = 1 - # Get next domain - if x < yup_xsplit[domain]: - domain = yup_xin[domain] - else: - domain = yup_xout[domain] - - # Finished this surface - yield x, yinds, periodic diff --git a/tools/pylib/boutdata/griddata.py b/tools/pylib/boutdata/griddata.py deleted file mode 100644 index 8246ddd8ab..0000000000 --- a/tools/pylib/boutdata/griddata.py +++ /dev/null @@ -1,493 +0,0 @@ -"""Routines for manipulating grid files - -""" -from __future__ import print_function - -from numpy import ndarray, zeros, concatenate, linspace, amin, amax -import matplotlib.pyplot as plt - -from boututils.datafile import DataFile - - -def slice(infile, outfile, region=None, xind=None, yind=None): - """Copy an X-Y slice from one DataFile to another - - Parameters - ---------- - infile : str - Name of DataFile to read slice from - outfile : str - Name of DataFile to write slice to. File will be created, and - will be overwritten if it already exists - region : {0, 1, 2, 3, 4, 5, None}, optional - Copy a whole region. The available regions are: - - 0: Lower inner leg - - 1: Inner core - - 2: Upper inner leg - - 3: Upper outer leg - - 4: Outer core - - 5: Lower outer leg - xind, yind : (int, int), optional - Index ranges for x and y. Range includes first point, but not - last point - - TODO - ---- - - Rename to not clobber builtin `slice` - - Better regions? - - """ - - # Open input and output files - indf = DataFile(infile) - outdf = DataFile(outfile, create=True) - - nx = indf["nx"][0] - ny = indf["ny"][0] - - if region: - # Select a region of the mesh - - xind = [0, nx] - if region == 0: - # Lower inner leg - yind = [0, indf["jyseps1_1"][0]+1] - elif region == 1: - # Inner core - yind = [indf["jyseps1_1"][0]+1, indf["jyseps2_1"][0]+1] - elif region == 2: - # Upper inner leg - yind = [indf["jyseps2_1"][0]+1, indf["ny_inner"][0]] - elif region == 3: - # Upper outer leg - yind = [indf["ny_inner"][0], indf["jyseps1_2"][0]+1] - elif region == 4: - # Outer core - yind = [indf["jyseps1_2"][0]+1, indf["jyseps2_2"][0]+1] - else: - # Lower outer leg - yind = [indf["jyseps2_2"][0]+1, ny] - else: - # Use indices - if not xind: - xind = [0, nx] - if not yind: - yind = [0, ny] - - print("Indices: [%d:%d, %d:%d]" % (xind[0], xind[1], yind[0], yind[1])) - # List of variables requiring special handling - special = ["nx", "ny", "ny_inner", - "ixseps1", "ixseps2", - "jyseps1_1", "jyseps1_2", "jyseps2_1", "jyseps2_2", - "ShiftAngle"] - - outdf["nx"] = xind[1] - xind[0] - outdf["ny"] = yind[1] - yind[0] - outdf["ny_inner"] = indf["ny_inner"][0] - yind[0] - - outdf["ixseps1"] = indf["ixseps1"][0] - outdf["ixseps2"] = indf["ixseps2"][0] - - outdf["jyseps1_1"] = indf["jyseps1_1"][0] - yind[0] - outdf["jyseps2_1"] = indf["jyseps2_1"][0] - yind[0] - outdf["jyseps1_2"] = indf["jyseps1_2"][0] - yind[0] - outdf["jyseps2_2"] = indf["jyseps2_2"][0] - yind[0] - - outdf["ShiftAngle"] = indf["ShiftAngle"][xind[0]:xind[1]] - - # Loop over all variables - for v in list(indf.keys()): - if v in special: - continue # Skip these variables - - ndims = indf.ndims(v) - if ndims == 0: - # Copy scalars - print("Copying variable: " + v) - outdf[v] = indf[v][0] - elif ndims == 2: - # Assume [x,y] - print("Slicing variable: " + v); - outdf[v] = indf[v][xind[0]:xind[1], yind[0]:yind[1]] - else: - # Skip - print("Skipping variable: " + v) - - indf.close() - outdf.close() - - -def rotate(gridfile, yshift, output=None): - """Shifts a grid file by the specified number of points in y - - This moves the branch cut around, and can be used to change the - limiter location - - Parameters - ---------- - gridfile : str - Name of DataFile to rotate - yshift : int - Number of points in y to shift by - output : str, optional - Name of DataFile to write to. If None, will write to a new - file with the same name as `gridfile` + '_rot' - - """ - - if output is None: - output = gridfile + "_rot" - - print("Rotating grid file '%s' -> '%s'" % (gridfile, output)) - - # Open input grid file - with DataFile(gridfile) as d: - # Open output file - with DataFile(output, write=True, create=True) as out: - # Loop over variables - for varname in d.list(): - # Number of dimensions - ndims = d.ndims(varname) - - if ndims == 2: - print("Shifting '%s' (x,y)" % (varname,)) - # 2D, assume X-Y - - var = d[varname] # Read - ny = var.shape[1] - - # Make sure yshift is positive and in range - yshift = ((yshift % ny) + ny) % ny - - newvar = ndarray(var.shape) - - # Rotate - newvar[:,0:(ny-yshift)] = var[:,yshift:ny] - newvar[:,(ny-yshift):] = var[:,:yshift] - - # Write to output - #out[varname] = newvar # Write - out.write(varname, newvar) - elif ndims == 3: - print("Shifting '%s' (x,y,z)" % (varname,)) - # 3D, assume X-Y-Z - - var = d[varname] # Read - ny = var.shape[1] - - # Make sure yshift is positive and in range - yshift = ((yshift % ny) + ny) % ny - - newvar = ndarray(var.shape) - - newvar[:,0:(ny-yshift),:] = var[:,yshift:ny,:] - newvar[:,(ny-yshift):,:] = var[:,:yshift,:] - - # Write to output - out.write(varname, newvar) - else: - # Just copy - print("Copying '%s' (%d dimensions)" % (varname, ndims)) - out.write(varname, d[varname]) - - - -def gridcontourf(grid, data2d, nlevel=31, show=True, - mind=None, maxd=None, symmetric=False, - cmap=None, ax=None, - xlabel="Major radius [m]", ylabel="Height [m]", - separatrix=False): - """Plots a 2D contour plot, taking into account branch cuts - (X-points). - - Parameters - ---------- - grid : DataFile - A DataFile object - data2d : array_like - A 2D (x,y) NumPy array of data to plot - nlevel : int, optional - Number of levels in the contour plot - show : bool, optional - If True, will immediately show the plot - mind : float, optional - Minimum data level - maxd : float, optional - Maximum data level - symmetric : bool, optional - Make mind, maxd symmetric about zero - cmap : Colormap, optional - A matplotlib colormap to use. If None, use the current default - ax : Axes, optional - A matplotlib axes instance to plot to. If None, create a new - figure and axes, and plot to that - xlabel, ylabel : str, optional - Labels for the x/y axes - separatrix : bool, optional - Add separatrix - - Returns - ------- - con - The contourf instance - - Examples - -------- - - To put a plot into an axis with a color bar: - - >>> fig, axis = plt.subplots() - >>> c = gridcontourf(grid, data, show=False, ax=axis) - >>> fig.colorbar(c, ax=axis) - >>> plt.show() - - TODO - ---- - - Move into a plotting module - - """ - - if cmap is None: - cmap = plt.cm.get_cmap("YlOrRd") - - if len(data2d.shape) != 2: - raise ValueError("data2d must be 2D (x,y)") - - j11 = grid["jyseps1_1"] - j12 = grid["jyseps1_2"] - j21 = grid["jyseps2_1"] - j22 = grid["jyseps2_2"] - ix1 = grid["ixseps1"] - ix2 = grid["ixseps2"] - try: - nin = grid["ny_inner"] - except: - nin = j12 - - nx = grid["nx"] - ny = grid["ny"] - - if (data2d.shape[0] != nx) or (data2d.shape[1] != ny): - raise ValueError("data2d has wrong size: (%d,%d), expected (%d,%d)" % (data2d.shape[0], data2d.shape[1], nx, ny)) - - if hasattr(j11, "__len__"): - # Arrays rather than scalars - try: - j11 = j11[0] - j12 = j12[0] - j21 = j21[0] - j22 = j22[0] - ix1 = ix1[0] - ix2 = ix2[0] - nin = nin[0] - nx = nx[0] - ny = ny[0] - except: - pass - - R = grid["Rxy"] - Z = grid["Zxy"] - - if data2d.shape != (nx, ny): - raise ValueError("Dimensions do not match") - - add_colorbar = False - if ax is None: - fig = plt.figure() - ax = fig.add_subplot(111) - add_colorbar = True - - if mind is None: - mind = amin(data2d) - if maxd is None: - maxd = amax(data2d) - - if symmetric: - # Make mind, maxd symmetric about zero - maxd = max([maxd, abs(mind)]) - mind = -maxd - - levels = linspace(mind, maxd, nlevel, endpoint=True) - - ystart = 0 # Y index to start the next section - if j11 >= 0: - # plot lower inner leg - ax.contourf(R[:,ystart:(j11+1)], Z[:,ystart:(j11+1)], data2d[:,ystart:(j11+1)], levels,cmap=cmap) - - yind = [j11, j22+1] - ax.contourf(R[:ix1, yind].transpose(), Z[:ix1, yind].transpose(), data2d[:ix1, yind].transpose(), levels,cmap=cmap) - - ax.contourf(R[ix1:,j11:(j11+2)], Z[ix1:,j11:(j11+2)], data2d[ix1:,j11:(j11+2)], levels,cmap=cmap) - ystart = j11+1 - - yind = [j22, j11+1] - ax.contourf(R[:ix1, yind].transpose(), Z[:ix1, yind].transpose(), data2d[:ix1, yind].transpose(), levels, cmap=cmap) - - # Inner SOL - con = ax.contourf(R[:,ystart:(j21+1)], Z[:,ystart:(j21+1)], data2d[:,ystart:(j21+1)], levels, cmap=cmap) - ystart = j21+1 - - if j12 > j21: - # Contains upper PF region - - # Inner leg - ax.contourf(R[ix1:,j21:(j21+2)], Z[ix1:,j21:(j21+2)], data2d[ix1:,j21:(j21+2)], levels, cmap=cmap) - ax.contourf(R[:,ystart:nin], Z[:,ystart:nin], data2d[:,ystart:nin], levels, cmap=cmap) - - # Outer leg - ax.contourf(R[:,nin:(j12+1)], Z[:,nin:(j12+1)], data2d[:,nin:(j12+1)], levels, cmap=cmap) - ax.contourf(R[ix1:,j12:(j12+2)], Z[ix1:,j12:(j12+2)], data2d[ix1:,j12:(j12+2)], levels, cmap=cmap) - ystart = j12+1 - - yind = [j21, j12+1] - ax.contourf(R[:ix1, yind].transpose(), Z[:ix1, yind].transpose(), data2d[:ix1, yind].transpose(), levels, cmap=cmap) - - yind = [j21+1, j12] - ax.contourf(R[:ix1, yind].transpose(), Z[:ix1, yind].transpose(), data2d[:ix1, yind].transpose(), levels, cmap=cmap) - else: - ystart -= 1 - # Outer SOL - ax.contourf(R[:,ystart:(j22+1)], Z[:,ystart:(j22+1)], data2d[:,ystart:(j22+1)], levels, cmap=cmap) - - ystart = j22+1 - - if j22+1 < ny: - # Outer leg - ax.contourf(R[ix1:,j22:(j22+2)], Z[ix1:,j22:(j22+2)], data2d[ix1:,j22:(j22+2)], levels, cmap=cmap) - ax.contourf(R[:,ystart:ny], Z[:,ystart:ny], data2d[:,ystart:ny], levels, cmap=cmap) - - # X-point - Rx = [ [R[ix1-1,j11], R[ix1,j11], R[ix1,j11+1], R[ix1-1,j11+1]], - [R[ix1-1,j22+1], R[ix1,j22+1], R[ix1,j22], R[ix1-1,j22]] ] - - - Zx = [ [Z[ix1-1,j11], Z[ix1,j11], Z[ix1,j11+1], Z[ix1-1,j11+1]], - [Z[ix1-1,j22+1], Z[ix1,j22+1], Z[ix1,j22], Z[ix1-1,j22]] ] - Dx = [ [data2d[ix1-1,j11], data2d[ix1,j11], data2d[ix1,j11+1], data2d[ix1-1,j11+1]], - [data2d[ix1-1,j22+1], data2d[ix1,j22+1], data2d[ix1,j22], data2d[ix1-1,j22]] ] - ax.contourf(Rx, Zx, Dx, levels, cmap=cmap) - - if add_colorbar: - fig.colorbar(con) - - ax.set_aspect("equal") - if xlabel is not None: - ax.set_xlabel(xlabel) - if ylabel is not None: - ax.set_ylabel(ylabel) - - if separatrix: - # Plot separatrix - - # Lower X-point location - Rx = 0.125*(R[ix1-1,j11] + R[ix1,j11] + R[ix1,j11+1] + R[ix1-1,j11+1] - + R[ix1-1,j22+1] + R[ix1,j22+1] + R[ix1,j22] + R[ix1-1,j22]) - Zx = 0.125*(Z[ix1-1,j11] + Z[ix1,j11] + Z[ix1,j11+1] + Z[ix1-1,j11+1] - + Z[ix1-1,j22+1] + Z[ix1,j22+1] + Z[ix1,j22] + Z[ix1-1,j22]) - # Lower inner leg - ax.plot( concatenate( (0.5*(R[ix1-1,0:(j11+1)] + R[ix1,0:(j11+1)]), [Rx]) ), concatenate( (0.5*(Z[ix1-1,0:(j11+1)] + Z[ix1,0:(j11+1)]), [Zx]) ), 'k-') - # Lower outer leg - ax.plot( concatenate( ([Rx],0.5*(R[ix1-1,(j22+1):] + R[ix1,(j22+1):])) ), concatenate( ([Zx], 0.5*(Z[ix1-1,(j22+1):] + Z[ix1,(j22+1):])) ), 'k-') - # Core - - ax.plot( concatenate( ([Rx], 0.5*(R[ix1-1,(j11+1):(j21+1)] + R[ix1,(j11+1):(j21+1)]), 0.5*(R[ix1-1,(j12+1):(j22+1)] + R[ix1,(j12+1):(j22+1)]), [Rx]) ), - concatenate( ([Zx], 0.5*(Z[ix1-1,(j11+1):(j21+1)] + Z[ix1,(j11+1):(j21+1)]), 0.5*(Z[ix1-1,(j12+1):(j22+1)] + Z[ix1,(j12+1):(j22+1)]), [Zx]) ), 'k-') - if show: - plt.show() - - return con - - -def bout2sonnet(grdname, outf): - """Creates a Sonnet format grid from a BOUT++ grid. - - NOTE: Branch cuts are not yet supported - - Parameters - ---------- - grdname : str - Filename of BOUT++ grid file - outf : File - The file-like object to write to - - Examples - -------- - - >>> with open("output.sonnet", "w") as f: - ... bout2sonnet("BOUT.grd.nc", f) - - """ - - with DataFile(grdname) as g: - Rxy = g["Rxy"] - Zxy = g["Zxy"] - Bpxy = g["Bpxy"] - Btxy = g["Btxy"] - Bxy = g["Bxy"] - - # Now iterate over cells in the order Eirene expects - - nx, ny = Rxy.shape - - # Extrapolate values in Y - R = zeros([nx,ny+2]) - Z = zeros([nx,ny+2]) - - R[:,1:-1] = Rxy - Z[:,1:-1] = Zxy - - R[:,0] = 2.*R[:,1] - R[:,2] - Z[:,0] = 2.*Z[:,1] - Z[:,2] - - R[:,-1] = 2.*R[:,-2] - R[:,-3] - Z[:,-1] = 2.*Z[:,-2] - Z[:,-3] - - element = 1 # Element number - - outf.write("BOUT++: "+grdname+"\n\n") - - outf.write("=====================================\n") - - for i in range(2, nx-2): - # Loop in X, excluding guard cells - for j in range(1,ny+1): - # Loop in Y. Guard cells not in grid file - - # Lower left (low Y, low X) - ll = ( 0.25*(R[i-1,j-1] + R[i-1,j] + R[i,j-1] + R[i,j]), - 0.25*(Z[i-1,j-1] + Z[i-1,j] + Z[i,j-1] + Z[i,j]) ) - - # Lower right (low Y, upper X) - lr = ( 0.25*(R[i+1,j-1] + R[i+1,j] + R[i,j-1] + R[i,j]), - 0.25*(Z[i+1,j-1] + Z[i+1,j] + Z[i,j-1] + Z[i,j]) ) - - # Upper left (upper Y, lower X) - ul = ( 0.25*(R[i-1,j+1] + R[i-1,j] + R[i,j+1] + R[i,j]), - 0.25*(Z[i-1,j+1] + Z[i-1,j] + Z[i,j+1] + Z[i,j]) ) - - # Upper right (upper Y, upper X) - ur = ( 0.25*(R[i+1,j+1] + R[i+1,j] + R[i,j+1] + R[i,j]), - 0.25*(Z[i+1,j+1] + Z[i+1,j] + Z[i,j+1] + Z[i,j]) ) - - # Element number - outf.write(" ELEMENT %d = ( %d, %d): (%e, %e) (%e, %e)\n" % ( - element, - j-1, i-2, - ll[0], ll[1], - ul[0], ul[1])) - - # Ratio Bt / Bp at cell centre. Note j-1 because - # Bpxy and Btxy have not had extra points added - outf.write(" FIELD RATIO = %e (%e, %e)\n" % (Bpxy[i,j-1] / Btxy[i,j-1], R[i,j], Z[i,j]) ) - - outf.write(" (%e, %e) (%e, %e)\n" % ( - lr[0], lr[1], - ur[0], ur[1])) - - if (i == nx-3) and (j == ny+1): - # Last element - outf.write("=====================================\n") - else: - outf.write("-------------------------------------\n") - - element += 1 diff --git a/tools/pylib/boutdata/input.py b/tools/pylib/boutdata/input.py deleted file mode 100644 index 46759cfaa5..0000000000 --- a/tools/pylib/boutdata/input.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Fourier transform data for input to BOUT++ - -""" -from builtins import range - -from numpy.fft import rfft -from numpy import ndarray - - -def transform3D(arr): - """Fourier transforms a 3D array in the Z dimension - - BOUT++ can take 3D inputs to be Fourier transformed in the Z - direction. - - Parameters - ---------- - arr : array_like - Input 3-D array - - Returns - ------- - array_like - A 3D array [x,y,kz] where kz is organised in the standard FFT - order, with constant (DC, kz=0) component first, followed by - real/imaginary pairs. - - kz = [0, (real, imag), (real, imag), ...] - - """ - - if len(arr.shape) != 3: - raise ValueError("Input array must be 3D") - - # Take FFT over z (last index), returning a complex array - fa = rfft(arr, axis=-1) - - nmodes = fa.shape[-1] - - # scipy fft normalises to N, but fftw doesn't - fa /= arr.shape[-1] - # Unpack complex array into a real array - - shape = list(arr.shape) - shape[-1] = 1 + (nmodes-1)*2 # One for DC + 2 for other modes - - result = ndarray(shape) - - # kz = 0 (DC) component only has real part - result[:,:,0] = fa[:,:,0].real - - # All other components have both real and imaginary parts - for k in range(1,nmodes): - result[:,:,2*k-1] = fa[:,:,k].real - result[:,:,2*k] = fa[:,:,k].imag - - return result - diff --git a/tools/pylib/boutdata/mayavi2.py b/tools/pylib/boutdata/mayavi2.py deleted file mode 100644 index a32b7433a1..0000000000 --- a/tools/pylib/boutdata/mayavi2.py +++ /dev/null @@ -1,118 +0,0 @@ -from __future__ import print_function -from builtins import range - -import numpy as np -from numpy import cos, sin, pi - -from enthought.tvtk.api import tvtk -from enthought.mayavi.scripts import mayavi2 - -def aligned_points(grid, nz=1, period=1.0, maxshift=0.4): - try: - nx = grid["nx"] - ny = grid["ny"] - zshift = grid["zShift"] - Rxy = grid["Rxy"] - Zxy = grid["Zxy"] - except: - print("Missing required data") - return None - - dz = 2.*pi / (period * (nz-1)) - phi0 = np.linspace(0,2.*pi / period, nz) - - # Need to insert additional points in Y so mesh looks smooth - #for y in range(1,ny): - # ms = np.max(np.abs(zshift[:,y] - zshift[:,y-1])) - # if( - - # Create array of points, structured - points = np.zeros([nx*ny*nz, 3]) - - start = 0 - for y in range(ny): - end = start + nx*nz - - phi = zshift[:,y] + phi0[:,None] - r = Rxy[:,y] + (np.zeros([nz]))[:,None] - - xz_points = points[start:end] - xz_points[:,0] = (r*cos(phi)).ravel() # X - xz_points[:,1] = (r*sin(phi)).ravel() # Y - xz_points[:,2] = (Zxy[:,y]+(np.zeros([nz]))[:,None]).ravel() # Z - - start = end - - return points - -def create_grid(grid, data, period=1): - - s = np.shape(data) - - nx = grid["nx"] - ny = grid["ny"] - nz = s[2] - - print("data: %d,%d,%d grid: %d,%d\n" % (s[0],s[1],s[2], nx,ny)) - - dims = (nx, nz, ny) - sgrid = tvtk.StructuredGrid(dimensions=dims) - pts = aligned_points(grid, nz, period) - print(np.shape(pts)) - sgrid.points = pts - - scalar = np.zeros([nx*ny*nz]) - start = 0 - for y in range(ny): - end = start + nx*nz - scalar[start:end] = (data[:,y,:]).transpose().ravel() - print(y, " = " , np.max(scalar[start:end])) - start = end - - sgrid.point_data.scalars = np.ravel(scalar.copy()) - sgrid.point_data.scalars.name = "data" - - return sgrid - -@mayavi2.standalone -def view3d(sgrid): - from enthought.mayavi.sources.vtk_data_source import VTKDataSource - from enthought.mayavi.modules.api import Outline, GridPlane - - mayavi.new_scene() - src = VTKDataSource(data=sgrid) - mayavi.add_source(src) - mayavi.add_module(Outline()) - g = GridPlane() - g.grid_plane.axis = 'x' - mayavi.add_module(g) - -if __name__ == '__main__': - from boutdata.collect import collect - from boututils.file_import import file_import - - path = "/media/449db594-b2fe-4171-9e79-2d9b76ac69b6/runs/data_33/" - #path="/home/ben/run4" - - #g = file_import("../cbm18_dens8.grid_nx68ny64.nc") - g = file_import("data/cbm18_8_y064_x516_090309.nc") - #g = file_import("/home/ben/run4/reduced_y064_x256.nc") - - data = collect("P", tind=50, path=path) - data = data[0,:,:,:] - s = np.shape(data) - nz = s[2] - - bkgd = collect("P0", path=path) - for z in range(nz): - data[:,:,z] += bkgd - - # Create a structured grid - sgrid = create_grid(g, data, 10) - - - w = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts') - w.write() - - # View the structured grid - view3d(sgrid) diff --git a/tools/pylib/boutdata/mms.py b/tools/pylib/boutdata/mms.py deleted file mode 100644 index b95ff175e1..0000000000 --- a/tools/pylib/boutdata/mms.py +++ /dev/null @@ -1,591 +0,0 @@ -""" Functions for calculating sources for the - Method of Manufactured Solutions (MMS) - -""" -from __future__ import print_function -from __future__ import division - -from sympy import symbols, cos, sin, diff, sqrt, pi, simplify, trigsimp, Wild - -from numpy import arange, zeros - -# Constants -qe = 1.602e-19 -Mp = 1.67262158e-27 -mu0 = 4.e-7*3.141592653589793 - -# Define symbols - -x = symbols('x') -y = symbols('y') -z = symbols('z') -t = symbols('t') - -class Metric(object): - def __init__(self): - # Create an identity metric - self.x = symbols('x\'') - self.y = symbols('y\'') - self.z = symbols('z\'') - - self.g11 = self.g22 = self.g33 = 1.0 - self.g12 = self.g23 = self.g13 = 0.0 - - self.g_11 = self.g_22 = self.g_33 = 1.0 - self.g_12 = self.g_23 = self.g_13 = 0.0 - - self.J = 1.0 - self.B = 1.0 - -identity = Metric() - -# Basic differencing -def ddt(f): - """Time derivative""" - return diff(f, t) - - -def DDX(f, metric = identity): - return diff(f, metric.x) - -def DDY(f, metric = identity): - return diff(f, metric.y) - -def DDZ(f, metric = identity): - return diff(f, metric.z) - - -def D2DX2(f, metric = identity): - return diff(f, metric.x, 2) - -def D2DY2(f, metric = identity): - return diff(f, metric.y, 2) - -def D2DZ2(f, metric = identity): - return diff(f, metric.z, 2) - - -def D2DXDY(f, metric = identity): - message = "* WARNING: D2DXDY is currently not set in BOUT++."+\ - " Check src/sys/derivs.cxx if situation has changed. *" - print("\n"*3) - print("*"*len(message)) - print(message) - print("*"*len(message)) - print("\n"*3) - return DDX(DDY(f, metric), metric) - -def D2DXDZ(f, metric = identity): - return DDX(DDZ(f, metric), metric) - -def D2DYDZ(f, metric = identity): - return DDY(DDZ(f, metric), metric) - -# Operators - -def bracket(f, g, metric = identity): - """ - Calculates [f,g] symbolically - """ - - dfdx = diff(f, metric.x) - dfdz = diff(f, metric.z) - - dgdx = diff(g, metric.x) - dgdz = diff(g, metric.z) - - return dfdz * dgdx - dfdx * dgdz - -def b0xGrad_dot_Grad(phi, A, metric = identity): - """ - Perpendicular advection operator, including - derivatives in y - - Note: If y derivatives are neglected, then this reduces - to bracket(f, g, metric) * metric.B - (in a Clebsch coordinate system) - - """ - dpdx = DDX(phi, metric) - dpdy = DDY(phi, metric) - dpdz = DDZ(phi, metric) - - vx = metric.g_22*dpdz - metric.g_23*dpdy; - vy = metric.g_23*dpdx - metric.g_12*dpdz; - vz = metric.g_12*dpdy - metric.g_22*dpdx; - - return (+ vx*DDX(A, metric) - + vy*DDY(A, metric) - + vz*DDZ(A, metric) ) / (metric.J*sqrt(metric.g_22)) - -def Delp2(f, metric = identity, all_terms=True): - """ Laplacian in X-Z - - If all_terms is false then first derivative terms are neglected. - By default all_terms is true, but can be disabled - in the BOUT.inp file (laplace section) - - """ - d2fdx2 = diff(f, metric.x, 2) - d2fdz2 = diff(f, metric.z, 2) - d2fdxdz = diff(f, metric.x, metric.z) - - result = metric.g11*d2fdx2 + metric.g33*d2fdz2 + 2.*metric.g13*d2fdxdz - - if all_terms: - G1 = (DDX(metric.J*metric.g11, metric) + DDY(metric.J*metric.g12, metric) + DDZ(metric.J*metric.g13, metric)) / metric.J - G3 = (DDX(metric.J*metric.g13, metric) + DDY(metric.J*metric.g23, metric) + DDZ(metric.J*metric.g33, metric)) / metric.J - result += G1 * diff(f, metric.x) + G3 * diff(f, metric.z) - - return result - -def Delp4(f, metric = identity): - d4fdx4 = diff(f, metric.x, 4) - d4fdz4 = diff(f, metric.z, 4) - - return d4fdx4 + d4fdz4 - -def Grad_par(f, metric = identity): - """The parallel gradient""" - return diff(f, metric.y) / sqrt(metric.g_22) - -def Vpar_Grad_par(v, f, metric = identity): - """Parallel advection operator $$v_\parallel \cdot \nabla_\parallel (f)$$""" - return v * Grad_par(f, metric=metric) - -def Div_par(f, metric=identity): - ''' - Divergence of magnetic field aligned vector $$v = \hat{b} f - \nabla \cdot (\hat{b} f) = 1/J \partial_y (f/B) - = B Grad_par(f/B)$$ - ''' - return metric.B*Grad_par(f/metric.B, metric) - -def Laplace(f, metric=identity): - """The full Laplace operator""" - G1 = (DDX(metric.J*metric.g11, metric) + DDY(metric.J*metric.g12, metric) + DDZ(metric.J*metric.g13, metric)) / metric.J - G2 = (DDX(metric.J*metric.g12, metric) + DDY(metric.J*metric.g22, metric) + DDZ(metric.J*metric.g23, metric)) / metric.J - G3 = (DDX(metric.J*metric.g13, metric) + DDY(metric.J*metric.g23, metric) + DDZ(metric.J*metric.g33, metric)) / metric.J - - result = G1*DDX(f, metric) + G2*DDY(f, metric) + G3*DDZ(f, metric)\ - + metric.g11*D2DX2(f, metric) + metric.g22*D2DY2(f, metric) + metric.g33*D2DZ2(f, metric)\ - + 2.0*(metric.g12*D2DXDY(f, metric) + metric.g13*D2DXDZ(f, metric) + metric.g23*D2DYDZ(f, metric)) - - return result - -def Laplace_par(f, metric=identity): - """ - Div( b (b.Grad(f) ) ) = (1/J) d/dy ( J/g_22 * df/dy ) - """ - return diff( (metric.J/metric.g_22)*diff(f, metric.y), metric.y)/ metric.J - -def Laplace_perp(f, metric=identity): - """ - The perpendicular Laplace operator - - Laplace_perp = Laplace - Laplace_par - """ - return Laplace(f, metric) - Laplace_par(f, metric) - -# Convert expression to string - -def trySimplify(expr): - """ - Tries to simplify an expression - """ - try: - return simplify(expr) - except ValueError: - return expr - -def exprToStr(expr): - """ Convert a sympy expression to a string for BOUT++ input - """ - - s = str(expr).replace("**", "^") # Replace exponent operator - - # Try to remove lots of 1.0*... - s = s.replace("(1.0*", "(") - s = s.replace(" 1.0*", " ") - - return s - -def exprMag(expr): - """ - Estimate the magnitude of an expression - - """ - - # Replace all sin, cos with 1 - any = Wild('a') # Wildcard - expr = expr.replace(sin(any), 1.0) - expr = expr.replace(cos(any), 1.0) - - # Pick maximum values of x,y,z - expr = expr.subs(x, 1.0) - expr = expr.subs(y, 2.*pi) - expr = expr.subs(z, 2.*pi) - - return expr.evalf() - -################################## - -class SimpleTokamak(object): - """ - Simple tokamak - - NOTE: This is NOT an equilibrium calculation. The input - is intended solely for testing with MMS - """ - def __init__(self, R = 2, Bt = 1.0, eps = 0.1, dr=0.02, q = lambda x:2+x**2): - """ - R - Major radius [m] - - Bt - Toroidal field [T] - - eps - Inverse aspect ratio - - dr - Width of the radial region [m] - - q(x) - A function which returns the safety factor - as a function of x in range [0,1] - - - Coordinates: - x - Radial, [0,1] - y - Poloidal, [0,2pi]. Origin is at inboard midplane. - - - """ - # X has a range [0,1], and y [0,2pi] - #x, y = symbols("x y") - - self.x = x - self.y = y - - self.R = R - - self.dr = dr - - # Minor radius - self.r = R * eps - - # Get safety factor - self.q = q(x) - - # Toroidal angle of a field-line as function - # of poloidal angle y - self.zShift = self.q*(y + eps * sin(y)) - - # Field-line pitch - self.nu = self.q*(1 + eps*cos(y)) #diff(self.zShift, y) - - # Coordinates of grid points - self.Rxy = R - self.r * cos(y) - self.Zxy = self.r * sin(y) - - # Poloidal arc length - self.hthe = self.r + 0.*x - - # Toroidal magnetic field - self.Btxy = Bt * R / self.Rxy - - # Poloidal magnetic field - self.Bpxy = self.Btxy * self.hthe / (self.nu * self.Rxy) - - # Total magnetic field - self.Bxy = sqrt(self.Btxy**2 + self.Bpxy**2) - - # Approximate poloidal field for radial width calculation - Bp0 = Bt * self.r / (q(0.5) * R) - print("Bp0 = %e" % Bp0) - - # dx = Bp * R * dr -- width of the box in psi space - self.psiwidth = Bp0 * R * dr - print("psi width = %e" % self.psiwidth) - - # Integrated shear - self.sinty = diff(self.zShift, x) / self.psiwidth - - # Extra expressions to add to grid file - self._extra = {} - - def add(self, expr, name): - """ - Add an additional expression to be written to the grid files - - """ - self._extra[name] = expr - - - def write(self, nx, ny, output, MXG=2): - """ - Outputs a tokamak shape to a grid file - - nx - Number of radial grid points, not including guard cells - ny - Number of poloidal (parallel) grid points - output - boututils.datafile object, e.g., an open netCDF file - MXG, Number of guard cells in the x-direction - """ - - ngx = nx + 2*MXG - ngy = ny - - # Create an x and y grid to evaluate expressions on - xarr = (arange(nx + 2*MXG) - MXG + 0.5) / nx - yarr = 2.*pi*arange(ny)/ny - - output.write("nx", ngx) - output.write("ny", ngy) - - dx = self.psiwidth / nx + 0.*self.x - dy = 2.*pi / ny + 0.*self.x - - for name, var in [ ("dx", dx), - ("dy", dy), - ("Rxy", self.Rxy), - ("Zxy", self.Zxy), - ("Btxy", self.Btxy), - ("Bpxy", self.Bpxy), - ("Bxy", self.Bxy), - ("hthe", self.hthe), - ("sinty", self.sinty), - ("zShift", self.zShift)]: - - # Note: This is slow, and could be improved using something like lambdify - values = zeros([ngx, ngy]) - for i, x in enumerate(xarr): - for j, y in enumerate(yarr): - values[i,j] = var.evalf(subs={self.x:x, self.y:y}) - - output.write(name, values) - - for name, var in list(self._extra.items()): - values = zeros([ngx, ngy]) - for i, x in enumerate(xarr): - for j, y in enumerate(yarr): - values[i,j] = var.evalf(subs={self.x:x, self.y:y}) - - output.write(name, values) - - shiftAngle = zeros(ngx) - for i, x in enumerate(xarr): - shiftAngle[i] = 2.*pi*self.q.evalf(subs={self.x:x}) - - output.write("ShiftAngle", shiftAngle) - - def metric(self): - """ - Returns an analytic metric tensor - """ - m = Metric() - - # Set symbols for x and y directions - m.x = self.x - m.y = self.y - - # Calculate metric tensor - - m.g11 = (self.Rxy * self.Bpxy)**2 - m.g22 = 1./self.hthe**2 - m.g33 = self.sinty**2*m.g11 + self.Bxy**2/m.g11 - m.g12 = 0.0*x - m.g13 = -self.sinty*m.g11 - m.g23 = -self.Btxy / (self.hthe * self.Bpxy * self.R) - - m.g_11 = 1./m.g11 + (self.sinty*self.Rxy)**2 - m.g_22 = (self.Bxy * self.hthe / self.Bpxy)**2 - m.g_33 = self.Rxy**2 - m.g_12 = self.Btxy*self.hthe*self.sinty*self.Rxy / self.Bpxy - m.g_13 = self.sinty*self.Rxy**2 - m.g_23 = self.Btxy*self.hthe*self.Rxy / self.Bpxy - - m.J = self.hthe / self.Bpxy - m.B = self.Bxy - - # Convert all "x" symbols from [0,1] into flux - m.Lx = self.psiwidth - xsub = m.x / self.psiwidth - - m.g11 = m.g11.subs(x, xsub) - m.g22 = m.g22.subs(x, xsub) - m.g33 = m.g33.subs(x, xsub) - m.g12 = m.g12.subs(x, xsub) - m.g13 = m.g13.subs(x, xsub) - m.g23 = m.g23.subs(x, xsub) - - m.g_11 = m.g_11.subs(x, xsub) - m.g_22 = m.g_22.subs(x, xsub) - m.g_33 = m.g_33.subs(x, xsub) - m.g_12 = m.g_12.subs(x, xsub) - m.g_13 = m.g_13.subs(x, xsub) - m.g_23 = m.g_23.subs(x, xsub) - - m.J = m.J.subs(x, xsub) - m.B = m.B.subs(x, xsub) - - return m - -########################## -# Shaped tokamak - -class ShapedTokamak(object): - def __init__(self, Rmaj=6.0, rmin=2.0, dr=0.1, kappa=1.0, delta=0.0, b=0.0, ss=0.0, Bt0=1.0, Bp0 = 0.2): - """ - Rmaj - Major radius [m] - rmin - Minor radius [m] - dr - Radial width of region [m] - - kappa - Ellipticity, 1 for a circle - delta - Triangularity, 0 for circle - b - Indentation ("bean" shape), 0 for circle - - ss - Shafranov shift [m] - - Bt0 - Toroidal magnetic field on axis [T]. Varies as 1/R - Bp0 - Poloidal field at outboard midplane [T] - - Outputs - ------- - - Assigns member variables - - x, y - Symbols for x and y coordinates - - R (x,y) - Z (x,y) - - """ - - # X has a range [0,1], and y [0,2pi] - x, y = symbols("x y") - - # Minor radius as function of x - rminx = rmin + (x-0.5)*dr - - # Analytical expression for R and Z coordinates as function of x and y - Rxy = Rmaj - b + (rminx + b*cos(y))*cos(y + delta*sin(y)) + ss*(0.5-x)*(dr/rmin) - Zxy = kappa * rminx * sin(y) - - # Toroidal magnetic field - Btxy = Bt0 * Rmaj / Rxy - - # Poloidal field. dx constant, so set poloidal field - # at outboard midplane (y = 0) - # NOTE: Approximate calculation - - # Distance between flux surface relative to outboard midplane. - expansion = (1 - (old_div(ss,rmin))*cos(y))/(1 - (ss/rmin)) - - Bpxy = Bp0 * ((Rmaj + rmin) / Rxy) / expansion - - # Calculate hthe - hthe = sqrt(diff(Rxy, y)**2 + diff(Zxy, y)**2) - try: - hthe = trigsimp(hthe) - except ValueError: - pass - - # Field-line pitch - nu = Btxy * hthe / (Bpxy * Rxy) - - # Shift angle - # NOTE: Since x has a range [0,1] this could be done better - # than ignoring convergence conditions - self.zShift = integrate(nu, y, conds='none') - - # Safety factor - self.shiftAngle = self.zShift.subs(y, 2*pi) - self.zShift.subs(y, 0) - - # Integrated shear - self.I = diff(self.zShift, x) - - self.x = x - self.y = y - - self.R = Rxy - self.Z = Zxy - - self.Bt = Btxy - self.Bp = Bpxy - self.B = sqrt(Btxy**2 + Bpxy**2) - - self.hthe = hthe - - def write(self, nx, ny, filename, MXG=2): - """ - Outputs a tokamak shape to a grid file - - nx - Number of radial grid points, not including guard cells - ny - Number of poloidal (parallel) grid points - output - boututils.datafile object, e.g., an open netCDF file - MXG, Number of guard cells in the x-direction - """ - - ngx = nx + 2*MXG - ngy = ny - - # Create an x and y grid to evaluate expressions on - xarr = (arange(nx + 2*MXG) - MXG + 0.5) / nx - yarr = 2.*pi*arange(ny)/ny - - Rxy = zeros([ngx, ngy]) - Zxy = zeros([ngx, ngy]) - - Btxy = zeros([ngx, ngy]) - Bpxy = zeros([ngx, ngy]) - - hthe = zeros([ngx, ngy]) - - - I = zeros([ngx, ngy]) - - # Note: This is slow, and could be improved using something like lambdify - for i, x in enumerate(xarr): - for j, y in enumerate(yarr): - Rxy[i,j] = self.R.evalf(subs={self.x:x, self.y:y}) - Zxy[i,j] = self.Z.evalf(subs={self.x:x, self.y:y}) - - Btxy[i,j] = self.Bt.evalf(subs={self.x:x, self.y:y}) - Bpxy[i,j] = self.Bp.evalf(subs={self.x:x, self.y:y}) - - hthe[i,j] = self.hthe.evalf(subs={self.x:x, self.y:y}) - - - plt.plot(Rxy[i,:], Zxy[i,:]) - plt.show() - - Bxy = sqrt(Btxy**2 + Bpxy**2) - - def metric(self): - """ - Returns an analytic metric tensor - """ - m = Metric() - - # Set symbols for x and y directions - m.x = self.x - m.y = self.y - - # Calculate metric tensor - - m.g11 = (self.R * self.Bp)**2 - m.g22 = 1./self.hthe**2 - m.g33 = self.I**2*m.g11 + self.B**2 / m.g11 - m.g12 = 0.0 - m.g13 = -self.I*m.g11 - m.g23 = -self.Bt / (self.hthe * self.Bp * self.R) - - m.g_11 = 1./m.g11 + (self.I*self.R)**2 - m.g_22 = (self.B * self.hthe / self.Bpxy)**2 - m.g_33 = self.R**2 - m.g_12 = self.Bt*self.hthe*self.I*self.R / self.Bp - m.g_13 = self.I*self.R**2 - m.g_23 = self.Bt*self.hthe*self.R / self.Bp - - m.J = self.hthe / self.Bp - m.B = self.B - - return m - - diff --git a/tools/pylib/boutdata/pol_slice.py b/tools/pylib/boutdata/pol_slice.py deleted file mode 100644 index 7adea90c50..0000000000 --- a/tools/pylib/boutdata/pol_slice.py +++ /dev/null @@ -1,110 +0,0 @@ -from __future__ import print_function -from __future__ import division - -from boututils.datafile import DataFile -import numpy as np -from scipy.ndimage import map_coordinates - - -def pol_slice(var3d, gridfile, n=1, zangle=0.0, nyInterp=None): - """Takes a 3D variable, and returns a 2D slice at fixed toroidal angle - - Parameters - ---------- - var3d : array_like - The input array. Should be 3D - gridfile : str - The gridfile containing the coordinate system to used - n : int, optional - The number of times the data must be repeated for a full torus, - e.g. n=2 is half a torus - zangle : float, optional - The (real) toroidal angle of the result - nyInterp : int, optional - The number of y (theta) points to use in the final result. - - Returns - ------- - array - A 2D-slice of var3d interpolated at a fixed toroidal angle - """ - n = int(n) - zangle = float(zangle) - - s = np.shape(var3d) - if len(s) != 3: - raise ValueError("pol_slice expects a 3D variable (got {} dimensions)" - .format(len(s))) - - nx, ny, nz = s - - # Open the grid file - with DataFile(gridfile) as gf: - # Check the grid size is correct - grid_nx = gf.read("nx") - if grid_nx != nx: - raise ValueError("Grid X size ({}) is different to the variable ({})" - .format(grid_nx, nx)) - grid_ny = gf.read("ny") - if grid_ny != ny: - raise ValueError("Grid Y size ({}) is different to the variable ({})" - .format(grid_ny, ny)) - - # Get the toroidal shift - zShift = gf.read("qinty") - - if zShift is not None: - print("Using qinty as toroidal shift angle") - else: - zShift = gf.read("zShift") - if zShift is not None: - print("Using zShift as toroidal shift angle") - else: - raise ValueError("Neither qinty nor zShift found") - - # Decide if we've asked to do interpolation - if nyInterp is not None and nyInterp != ny: - varTmp = var3d - - # Interpolate to output positions and make the correct shape - # np.mgrid gives us an array of indices - # 0:ny-1:nyInterp*1j means use nyInterp points between 0 and ny-1 inclusive - var3d = map_coordinates(varTmp, np.mgrid[0:nx, 0:ny-1:nyInterp*1j, 0:nz], - cval=-999) - zShift = map_coordinates(zShift, np.mgrid[0:nx, 0:ny-1:nyInterp*1j], - cval=-999) - - # Update shape - ny = nyInterp - - var2d = np.zeros([nx, ny]) - - ###################################### - # Perform 2D slice - dz = 2.*np.pi / float(n * nz) - zind = (zangle - zShift) / dz - z0f = np.floor(zind) - z0 = z0f.astype(int) - p = zind - z0f - - # Make z0 between 0 and (nz-2) - z0 = ((z0 % (nz-1)) + (nz-1)) % (nz-1) - - # Get z+ and z- - zp = (z0 + 1) % (nz-1) - zm = (z0 - 1 + (nz-1)) % (nz-1) - - # For some reason numpy imposes a limit of 32 entries to choose - # so if nz>32 we have to use a different approach. This limit may change with numpy version - if nz >= 32: - for x in np.arange(nx): - for y in np.arange(ny): - var2d[x, y] = (0.5*p[x, y]*(p[x, y]-1.0) * var3d[x, y, zm[x, y]] + - (1.0 - p[x, y]*p[x, y]) * var3d[x, y, z0[x, y]] + - 0.5*p[x, y]*(p[x, y]+1.0) * var3d[x, y, zp[x, y]]) - else: - var2d = (0.5*p*(p-1.0) * np.choose(zm.T, var3d.T).T + - (1.0 - p*p) * np.choose(z0.T, var3d.T).T + - 0.5*p*(p+1.0) * np.choose(zp.T, var3d.T).T) - - return var2d diff --git a/tools/pylib/boutdata/processor_rearrange.py b/tools/pylib/boutdata/processor_rearrange.py deleted file mode 100644 index fb91af3763..0000000000 --- a/tools/pylib/boutdata/processor_rearrange.py +++ /dev/null @@ -1,161 +0,0 @@ -"""Routines for redistributing files over different numbers of -processors - -""" - -from math import sqrt -from collections import namedtuple - -processor_layout_ = namedtuple("BOUT_processor_layout", - ["nxpe", "nype", "npes", "mxsub", "mysub", - "nx", "ny", "mz", "mxg", "myg"]) - - -# Subclass the namedtuple above so we can add a docstring -class processor_layout(processor_layout_): - """A namedtuple describing the processor layout, including grid sizes - and guard cells - - Parameters - ---------- - - nxpe, nype : int - The number of processors in x and y - npes : int - The total number of procesors - mxsub, mysub : int - The size of the grid in x and y on a single processor - nx, ny, mz : int - The total size of the grid in x, y and z - mxg : int - The number of guard cells in x and y - - """ - pass - - -def get_processor_layout(boutfile, has_t_dimension=True, mxg=2, myg=2): - """Given a BOUT.restart.* or BOUT.dmp.* file (as a DataFile object), - return the processor layout for its data - - Parameters - ---------- - boutfile : DataFile - Restart or dump file to read - has_t_dimension : bool, optional - Does this file have a time dimension? - mxg, myg : int, optional - Number of x, y guard cells - - Returns - ------- - processor_layout - A description of the processor layout and grid sizes - - """ - - nxpe = boutfile.read('NXPE') - nype = boutfile.read("NYPE") - npes = nxpe * nype - - # Get list of variables - var_list = boutfile.list() - if len(var_list) == 0: - raise ValueError("ERROR: No data found") - - mxsub = 0 - mysub = 0 - mz = 0 - - if has_t_dimension: - maxdims = 4 - else: - maxdims = 3 - for v in var_list: - if boutfile.ndims(v) == maxdims: - s = boutfile.size(v) - mxsub = s[maxdims - 3] - 2 * mxg - if mxsub < 0: - if s[maxdims - 3] == 1: - mxsub = 1 - mxg = 0 - elif s[maxdims - 3] == 3: - mxsub = 1 - mxg = 1 - else: - print("Number of x points is wrong?") - return False - - mysub = s[maxdims - 2] - 2 * myg - if mysub < 0: - if s[maxdims - 2] == 1: - mysub = 1 - myg = 0 - elif s[maxdims - 2] == 3: - mysub = 1 - myg = 1 - else: - print("Number of y points is wrong?") - return False - - mz = s[maxdims - 1] - break - - # Calculate total size of the grid - nx = mxsub * nxpe - ny = mysub * nype - - result = processor_layout(nxpe=nxpe, nype=nype, npes=npes, mxsub=mxsub, mysub=mysub, nx=nx, ny=ny, mz=mz, mxg=mxg, myg=myg) - - return result - - -def create_processor_layout(old_processor_layout, npes, nxpe=None): - """Convert one processor layout into another one with a different - total number of processors - - If nxpe is None, use algorithm from BoutMesh to select optimal nxpe. - Otherwise, check nxpe is valid (divides npes) - - Parameters - ---------- - old_processor_layout : processor_layout - The processor layout to convert - npes : int - The new total number of procesors - nxpe : int, optional - The number of procesors in x to use - - Returns - ------- - processor_layout - A description of the processor layout and grid sizes - - """ - - if nxpe is None: # Copy algorithm from BoutMesh for selecting nxpe - ideal = sqrt(float(old_processor_layout.nx) * float(npes) / float(old_processor_layout.ny)) - # Results in square domain - - for i in range(1, npes + 1): - if npes % i == 0 and old_processor_layout.nx % i == 0 and int(old_processor_layout.nx / i) >= old_processor_layout.mxg and old_processor_layout.ny % (npes / i) == 0: - # Found an acceptable value - # Warning: does not check branch cuts! - - if nxpe is None or abs(ideal - i) < abs(ideal - nxpe): - nxpe = i # Keep value nearest to the ideal - - if nxpe is None: - raise ValueError("ERROR: could not find a valid value for nxpe") - elif npes % nxpe != 0: - raise ValueError( - "ERROR: requested nxpe is invalid, it does not divide npes") - - nype = int(npes / nxpe) - - mxsub = int(old_processor_layout.nx / nxpe) - mysub = int(old_processor_layout.ny / nype) - - result = processor_layout(nxpe=nxpe, nype=nype, npes=npes, mxsub=mxsub, mysub=mysub, nx=old_processor_layout.nx, ny=old_processor_layout.ny, mz=old_processor_layout.mz, mxg=old_processor_layout.mxg, myg=old_processor_layout.myg) - - return result diff --git a/tools/pylib/boutdata/restart.py b/tools/pylib/boutdata/restart.py deleted file mode 100644 index 89cd37c39d..0000000000 --- a/tools/pylib/boutdata/restart.py +++ /dev/null @@ -1,828 +0,0 @@ -"""Routines for manipulating restart files - -TODO ----- - -- Don't import ``numpy.random.normal`` directly, just the ``random`` - submodule, or sphinx includes the documentation for ``normal`` - -""" - -from __future__ import print_function -from __future__ import division -from builtins import str, range - -import os -import glob - -from boutdata.collect import collect, create_cache -from boututils.datafile import DataFile -from boututils.boutarray import BoutArray -from boutdata.processor_rearrange import get_processor_layout, create_processor_layout - -import multiprocessing -import numpy as np -from numpy import mean, zeros, arange -from numpy.random import normal - -from scipy.interpolate import interp1d -try: - from scipy.interpolate import RegularGridInterpolator -except ImportError: - pass - -def resize3DField(var, data, coordsAndSizesTuple, method, mute): - """Resize 3D fields - - To be called by resize. - - Written as a function in order to call it using multiprocess. Must - be defined as a top level function in order to be pickable by the - multiprocess. - - See the function resize for details - - """ - - # Unpack the tuple for better readability - xCoordOld, yCoordOld, zCoordOld,\ - xCoordNew, yCoordNew, zCoordNew,\ - newNx, newNy, newNz = coordsAndSizesTuple - - if not(mute): - print(" Resizing "+var + - ' to (nx,ny,nz) = ({},{},{})'.format(newNx, newNy, newNz)) - - # Make the regular grid function (see examples in - # http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RegularGridInterpolator.html - # for details) - gridInterpolator = RegularGridInterpolator( - (xCoordOld, yCoordOld, zCoordOld), data, method) - - # Need to fill with one exrta z plane (will only contain zeros) - newData = np.zeros((newNx, newNy, newNz)) - - # Interpolate to the new values - for xInd, x in enumerate(xCoordNew): - for yInd, y in enumerate(yCoordNew): - for zInd, z in enumerate(zCoordNew): - newData[xInd, yInd, zInd] = gridInterpolator([x, y, z]) - - return var, newData - - -def resize(newNx, newNy, newNz, mxg=2, myg=2, - path="data", output="./", informat="nc", outformat=None, - method='linear', maxProc=None, mute=False): - """Increase/decrease the number of points in restart files. - - NOTE: Can't overwrite - WARNING: Currently only implemented with uniform BOUT++ grid - - Parameters - ---------- - newNx, newNy, newNz : int - nx, ny, nz for the new file (including ghost points) - mxg, myg : int, optional - Number of ghost points in x, y (default: 2) - path : str, optional - Input path to data files - output : str, optional - Path to write new files - informat : str, optional - File extension of input - outformat : {None, str}, optional - File extension of output (default: use the same as `informat`) - method : {'linear', 'nearest'}, optional - What interpolation method to be used - maxProc : {None, int}, optional - Limits maximum processors to use when interpolating if set - mute : bool, optional - Whether or not output should be printed from this function - - Returns - ------- - return : bool - True on success, else False - - TODO - ---- - - Add 2D field interpolation - - Replace printing errors with raising `ValueError` - - Make informat work like `redistribute` - - """ - - if method is None: - # Make sure the method is set - method = 'linear' - - if outformat is None: - outformat = informat - - if path == output: - print("ERROR: Can't overwrite restart files when expanding") - return False - - def is_pow2(x): - """Returns true if x is a power of 2""" - return (x > 0) and ((x & (x-1)) == 0) - - if not is_pow2(newNz): - print("ERROR: New Z size {} must be a power of 2".format(newNz)) - return False - - file_list = glob.glob(os.path.join(path, "BOUT.restart.*."+informat)) - file_list.sort() - nfiles = len(file_list) - - if nfiles == 0: - print("ERROR: No data found in {}".format(path)) - return False - - if not(mute): - print("Number of files found: " + str(nfiles)) - - for f in file_list: - new_f = os.path.join(output, f.split('/')[-1]) - if not(mute): - print("Changing {} => {}".format(f, new_f)) - - # Open the restart file in read mode and create the new file - with DataFile(f) as old, DataFile(new_f, write=True, create=True) as new: - - # Find the dimension - for var in old.list(): - # Read the data - data = old.read(var) - # Find 3D variables - if old.ndims(var) == 3: - break - - nx, ny, nz = data.shape - # Make coordinates - # NOTE: The max min of the coordinates are irrelevant when - # interpolating (as long as old and new coordinates - # are consistent), so we just choose all variable to - # be between 0 and 1 Calculate the old coordinates - xCoordOld = np.linspace(0, 1, nx) - yCoordOld = np.linspace(0, 1, ny) - zCoordOld = np.linspace(0, 1, nz) - - # Calculate the new coordinates - xCoordNew = np.linspace(xCoordOld[0], xCoordOld[-1], newNx) - yCoordNew = np.linspace(yCoordOld[0], yCoordOld[-1], newNy) - zCoordNew = np.linspace(zCoordOld[0], zCoordOld[-1], newNz) - - # Make a pool of workers - pool = multiprocessing.Pool(maxProc) - # List of jobs and results - jobs = [] - # Pack input to resize3DField together - coordsAndSizesTuple = (xCoordOld, yCoordOld, zCoordOld, - xCoordNew, yCoordNew, zCoordNew, - newNx, newNy, newNz) - - # Loop over the variables in the old file - for var in old.list(): - # Read the data - data = old.read(var) - attributes = old.attributes(var) - - # Find 3D variables - if old.ndims(var) == 3: - - # Asynchronous call (locks first at .get()) - jobs.append(pool.apply_async(resize3DField, - args=(var, data, coordsAndSizesTuple, method, mute, ))) - - else: - if not(mute): - print(" Copying "+var) - newData = data.copy() - if not(mute): - print("Writing "+var) - new.write(var, newData) - - for job in jobs: - var, newData = job.get() - newData = BoutArray(newData, attributes=attributes) - if not(mute): - print("Writing "+var) - new.write(var, newData) - - # Close the pool of workers - pool.close() - # Wait for all processes to finish - pool.join() - - return True - - -def resizeZ(newNz, path="data", output="./", informat="nc", outformat=None): - """Increase the number of Z points in restart files - - NOTE: - * Can't overwrite - * Will not yield a result close to the original if there are - asymmetries in the z-direction - - Parameters - ---------- - newNz : int - nz for the new file - path : str, optional - Path to original restart files (default: "data") - output : str, optional - Path to write new restart files (default: current directory) - informat : str, optional - File extension of original files (default: "nc") - outformat : str, optional - File extension of new files (default: use the same as `informat`) - - Returns - ------- - True on success, else False - - TODO - ---- - - Replace printing errors with raising `ValueError` - - Make informat work like `redistribute` - - """ - - if outformat is None: - outformat = informat - - if path == output: - print("ERROR: Can't overwrite restart files when expanding") - return False - - def is_pow2(x): - """Returns true if x is a power of 2""" - return (x > 0) and ((x & (x-1)) == 0) - - if not is_pow2(newNz): - print("ERROR: New Z size must be a power of 2") - return False - - file_list = glob.glob(os.path.join(path, "BOUT.restart.*."+informat)) - file_list.sort() - nfiles = len(file_list) - - if nfiles == 0: - print("ERROR: No data found") - return False - - print("Number of files found: " + str(nfiles)) - - for f in file_list: - new_f = os.path.join(output, f.split('/')[-1]) - print("Changing {} => {}".format(f, new_f)) - - # Open the restart file in read mode and create the new file - with DataFile(f) as old,\ - DataFile(new_f, write=True, create=True) as new: - # Loop over the variables in the old file - for var in old.list(): - # Read the data - data = old.read(var) - attributes = old.attributes(var) - - # Find 3D variables - if old.ndims(var) == 3: - print(" Resizing "+var) - - nx, ny, nz = data.shape - - newdata = np.zeros((nx, ny, newNz)) - for x in range(nx): - for y in range(ny): - f_old = np.fft.fft(data[x, y, :]) - - # Number of points in f is power of 2 - f_new = np.zeros(newNz) - - # Copy coefficients across (ignoring Nyquist) - f_new[0] = f_old[0] # DC - for m in range(1, int(nz/2)): - # + ve frequencies - f_new[m] = f_old[m] - # - ve frequencies - f_new[newNz-m] = f_old[nz-m] - - # Invert fft - newdata[x, y, :] = np.fft.ifft(f_new).real - newdata[x, y, :] = newdata[x, y, 0] - - # Multiply with the ratio of newNz/nz - # This is not needed in the IDL routine as the - # forward transfrom has the scaling factor 1/N in - # the forward transform, whereas the scaling factor - # 1/N is the inverse transform in np.fft - # Note that ifft(fft(a)) = a for the same number of - # points in both IDL and np.ftt - newdata *= (newNz/nz) - else: - print(" Copying "+var) - newdata = data.copy() - - newdata = BoutArray(newdata, attributes=attributes) - - new.write(var, newdata) - - return True - - -def addnoise(path=".", var=None, scale=1e-5): - """Add random noise to restart files - - .. warning:: Modifies restart files in place! This is in contrast - to most of the functions in this module! - - Parameters - ---------- - path : str, optional - Path to restart files (default: current directory) - var : str, optional - The variable to modify. By default all 3D variables are modified - scale : float - Amplitude of the noise. Gaussian noise is used, with zero mean - and this parameter as the standard deviation - - """ - file_list = glob.glob(os.path.join(path, "BOUT.restart.*")) - nfiles = len(file_list) - - print("Number of restart files: %d" % (nfiles,)) - - for file in file_list: - print(file) - with DataFile(file, write=True) as d: - if var is None: - for v in d.list(): - if d.ndims(v) == 3: - print(" -> "+v) - data = d.read(v, asBoutArray=True) - data += normal(scale=scale, size=data.shape) - d.write(v, data) - else: - # Modify a single variable - print(" -> "+var) - data = d.read(var) - data += normal(scale=scale, size=data.shape) - d.write(var, data) - - -def scalevar(var, factor, path="."): - """Scales a variable by a given factor, modifying restart files in - place - - .. warning:: Modifies restart files in place! This is in contrast - to most of the functions in this module! - - Parameters - ---------- - var : str - Name of the variable - factor : float - Factor to multiply - path : str, optional - Path to the restart files (default: current directory) - - """ - - file_list = glob.glob(os.path.join(path, "BOUT.restart.*")) - nfiles = len(file_list) - - print("Number of restart files: %d" % (nfiles,)) - for file in file_list: - print(file) - with DataFile(file, write=True) as d: - d[var] = d[var] * factor - - -def create(averagelast=1, final=-1, path="data", output="./", informat="nc", outformat=None): - """Create restart files from data (dmp) files. - - Parameters - ---------- - averagelast : int, optional - Number of time points (counting from `final`, inclusive) to - average over (default is 1 i.e. just take last time-point) - final : int, optional - The last time point to use (default is last, -1) - path : str, optional - Path to original restart files (default: "data") - output : str, optional - Path to write new restart files (default: current directory) - informat : str, optional - File extension of original files (default: "nc") - outformat : str, optional - File extension of new files (default: use the same as `informat`) - - """ - - if outformat is None: - outformat = informat - - file_list = glob.glob(os.path.join(path, "BOUT.dmp.*."+informat)) - nfiles = len(file_list) - - print(("Number of data files: ", nfiles)) - - for i in range(nfiles): - # Open each data file - infname = os.path.join(path, "BOUT.dmp."+str(i)+"."+informat) - outfname = os.path.join(output, "BOUT.restart."+str(i)+"."+outformat) - - print((infname, " -> ", outfname)) - - infile = DataFile(infname) - outfile = DataFile(outfname, create=True) - - # Get the data always needed in restart files - hist_hi = infile.read("iteration") - print(("hist_hi = ", hist_hi)) - outfile.write("hist_hi", hist_hi) - - t_array = infile.read("t_array") - tt = t_array[final] - print(("tt = ", tt)) - outfile.write("tt", tt) - - tind = final - if tind < 0.0: - tind = len(t_array) + final - - NXPE = infile.read("NXPE") - NYPE = infile.read("NYPE") - print(("NXPE = ", NXPE, " NYPE = ", NYPE)) - outfile.write("NXPE", NXPE) - outfile.write("NYPE", NYPE) - - # Get a list of variables - varnames = infile.list() - - for var in varnames: - if infile.ndims(var) == 4: - # Could be an evolving variable - - print((" -> ", var)) - - data = infile.read(var) - - if averagelast == 1: - slice = data[final, :, :, :] - else: - slice = mean(data[(final - averagelast) - :final, :, :, :], axis=0) - - print(slice.shape) - - outfile.write(var, slice) - - infile.close() - outfile.close() - - -def redistribute(npes, path="data", nxpe=None, output=".", informat=None, outformat=None, mxg=2, myg=2): - """Resize restart files across NPES processors. - - Does not check if new processor arrangement is compatible with the - branch cuts. In this respect :py:func:`restart.split` is - safer. However, BOUT++ checks the topology during initialisation - anyway so this is not too serious. - - Parameters - ---------- - npes : int - Number of processors for the new restart files - path : str, optional - Path to original restart files (default: "data") - nxpe : int, optional - Number of processors to use in the x-direction (determines - split: npes = nxpe * nype). Default is None which uses the - same algorithm as BoutMesh (but without topology information) - to determine a suitable value for nxpe. - output : str, optional - Location to save new restart files (default: current directory) - informat : str, optional - Specify file format of old restart files (must be a suffix - understood by DataFile, e.g. 'nc'). Default uses the format of - the first 'BOUT.restart.*' file listed by glob.glob. - outformat : str, optional - Specify file format of new restart files (must be a suffix - understood by DataFile, e.g. 'nc'). Default is to use the same - as informat. - - Returns - ------- - True on success - - TODO - ---- - - Replace printing errors with raising `ValueError` - - """ - - if npes <= 0: - print("ERROR: Negative or zero number of processors") - return False - - if path == output: - print("ERROR: Can't overwrite restart files") - return False - - if informat is None: - file_list = glob.glob(os.path.join(path, "BOUT.restart.*")) - else: - file_list = glob.glob(os.path.join(path, "BOUT.restart.*."+informat)) - - nfiles = len(file_list) - - # Read old processor layout - f = DataFile(file_list[0]) - - # Get list of variables - var_list = f.list() - if len(var_list) == 0: - print("ERROR: No data found") - return False - - old_processor_layout = get_processor_layout(f, has_t_dimension=False) - print("Grid sizes: ", old_processor_layout.nx, - old_processor_layout.ny, old_processor_layout.mz) - - if nfiles != old_processor_layout.npes: - print("WARNING: Number of restart files inconsistent with NPES") - print("Setting nfiles = " + str(old_processor_layout.npes)) - nfiles = old_processor_layout.npes - - if nfiles == 0: - print("ERROR: No restart files found") - return False - - informat = file_list[0].split(".")[-1] - if outformat is None: - outformat = informat - - try: - new_processor_layout = create_processor_layout( - old_processor_layout, npes, nxpe=nxpe) - except ValueError as e: - print("Could not find valid processor split. " + e.what()) - - nx = old_processor_layout.nx - ny = old_processor_layout.ny - mz = old_processor_layout.mz - mxg = old_processor_layout.mxg - myg = old_processor_layout.myg - old_npes = old_processor_layout.npes - old_nxpe = old_processor_layout.nxpe - old_nype = old_processor_layout.nype - old_mxsub = old_processor_layout.mxsub - old_mysub = old_processor_layout.mysub - - nxpe = new_processor_layout.nxpe - nype = new_processor_layout.nype - mxsub = new_processor_layout.mxsub - mysub = new_processor_layout.mysub - mzsub = new_processor_layout.mz - - outfile_list = [] - for i in range(npes): - outpath = os.path.join(output, "BOUT.restart."+str(i)+"."+outformat) - outfile_list.append(DataFile(outpath, write=True, create=True)) - - DataFileCache = create_cache(path, "BOUT.restart") - - for v in var_list: - dimensions = f.dimensions(v) - ndims = len(dimensions) - - # collect data - data = collect(v, xguards=True, yguards=True, info=False, - datafile_cache=DataFileCache) - - # write data - for i in range(npes): - ix = i % nxpe - iy = int(i/nxpe) - outfile = outfile_list[i] - if v == "NPES": - outfile.write(v, npes) - elif v == "NXPE": - outfile.write(v, nxpe) - elif v == "NYPE": - outfile.write(v, nype) - elif v == "MXSUB": - outfile.write(v, mxsub) - elif v == "MYSUB": - outfile.write(v, mysub) - elif v == "MZSUB": - outfile.write(v, mzsub) - elif dimensions == (): - # scalar - outfile.write(v, data) - elif dimensions == ('x', 'y'): - # Field2D - outfile.write( - v, data[ix*mxsub:(ix+1)*mxsub+2*mxg, iy*mysub:(iy+1)*mysub+2*myg]) - elif dimensions == ('x', 'z'): - # FieldPerp - yindex_global = data.attributes['yindex_global'] - if yindex_global + myg >= iy*mysub and yindex_global + myg < (iy+1)*mysub+2*myg: - outfile.write(v, data[ix*mxsub:(ix+1)*mxsub+2*mxg, :]) - else: - nullarray = BoutArray(np.zeros([mxsub+2*mxg, mysub+2*myg]), attributes={"bout_type":"FieldPerp", "yindex_global":-myg-1}) - outfile.write(v, nullarray) - elif dimensions == ('x', 'y', 'z'): - # Field3D - outfile.write( - v, data[ix*mxsub:(ix+1)*mxsub+2*mxg, iy*mysub:(iy+1)*mysub+2*myg, :]) - else: - print( - "ERROR: variable found with unexpected dimensions,", dimensions, v) - - f.close() - for outfile in outfile_list: - outfile.close() - - return True - - -def resizeY(newy, path="data", output=".", informat="nc", outformat=None, myg=2): - """Increase the number of Y points in restart files - - NOTE: - * Can't overwrite - - Parameters - ---------- - newy : int - ny for the new file - path : str, optional - Path to original restart files (default: "data") - output : str, optional - Path to write new restart files (default: current directory) - informat : str, optional - File extension of original files (default: "nc") - outformat : str, optional - File extension of new files (default: use the same as `informat`) - myg : int, optional - Number of ghost points in y (default: 2) - - Returns - ------- - True on success, else False - - TODO - ---- - - Replace printing errors with raising `ValueError` - - Make informat work like `redistribute` - - """ - - if outformat is None: - outformat = informat - - file_list = glob.glob(os.path.join(path, "BOUT.restart.*."+informat)) - - nfiles = len(file_list) - - if nfiles == 0: - print("ERROR: No restart files found") - return False - - for i in range(nfiles): - # Open each data file - infname = os.path.join(path, "BOUT.restart."+str(i)+"."+informat) - outfname = os.path.join(output, "BOUT.restart."+str(i)+"."+outformat) - - print("Processing %s -> %s" % (infname, outfname)) - - infile = DataFile(infname) - outfile = DataFile(outfname, create=True) - - # Copy basic information - for var in ["hist_hi", "NXPE", "NYPE", "tt"]: - data = infile.read(var) - try: - # Convert to scalar if necessary - data = data[0] - except: - pass - outfile.write(var, data) - - # Get a list of variables - varnames = infile.list() - - for var in varnames: - if infile.ndims(var) == 3: - # Could be an evolving variable [x,y,z] - - print(" -> Resizing " + var) - - # Read variable from input - indata = infile.read(var) - - nx, ny, nz = indata.shape - - # y coordinate in input and output data - iny = (arange(ny) - myg + 0.5) / (ny - 2*myg) - outy = (arange(newy) - myg + 0.5) / (newy - 2*myg) - - outdata = zeros([nx, newy, nz]) - - for x in range(nx): - for z in range(nz): - f = interp1d( - iny, indata[x, :, z], bounds_error=False, fill_value=0.0) - outdata[x, :, z] = f(outy) - - outfile.write(var, outdata) - elif infile.ndims(var) == 2: - # Assume evolving variable [x,y] - print(" -> Resizing " + var) - - # Read variable from input - indata = infile.read(var) - - nx, ny = indata.shape - - # y coordinate in input and output data - iny = (arange(ny) - myg + 0.5) / (ny - 2*myg) - outy = (arange(newy) - myg + 0.5) / (newy - 2*myg) - - outdata = zeros([nx, newy]) - - for x in range(nx): - f = interp1d(iny, indata[x, :], - bounds_error=False, fill_value=0.0) - outdata[x, :] = f(outy) - - outfile.write(var, outdata) - else: - # Copy variable - print(" -> Copying " + var) - - # Read variable from input - data = infile.read(var) - try: - # Convert to scalar if necessary - data = data[0] - except: - pass - outfile.write(var, data) - - infile.close() - outfile.close() - - -def addvar(var, value, path="."): - """Adds a variable with constant value to all restart files. - - .. warning:: Modifies restart files in place! This is in contrast - to most of the functions in this module! - - This is useful for restarting simulations whilst turning on new - equations. By default BOUT++ throws an error if an evolving - variable is not in the restart file. By setting an option the - variable can be set to zero. This allows it to start with a - non-zero value. - - Parameters - ---------- - var : str - The name of the variable to add - value : float - Constant value for the variable - path : str, optional - Input path to data files (default: current directory) - - """ - - file_list = glob.glob(os.path.join(path, "BOUT.restart.*")) - nfiles = len(file_list) - - print("Number of restart files: %d" % (nfiles,)) - # Loop through all the restart files - for filename in file_list: - print(filename) - # Open the restart file for writing (modification) - with DataFile(filename, write=True) as df: - size = None - # Find a 3D variable and get its size - for varname in df.list(): - size = df.size(varname) - if len(size) == 3: - break - if size is None: - raise Exception("no 3D variables found") - - # Create a new 3D array with input value - data = np.zeros(size) + value - - # Set the variable in the NetCDF file - df.write(var, data) diff --git a/tools/pylib/boutdata/settings.py b/tools/pylib/boutdata/settings.py deleted file mode 100644 index c09b5cbd7f..0000000000 --- a/tools/pylib/boutdata/settings.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Parse BOUT.inp settings file - -""" - - -def get(filename, name, section=None): - """Find and return a single value from a BOUT.inp settings file - - .. deprecated::3.0 - `settings.get` has been replaced with - `boututils.options.BoutOptions` - - Parameters - ---------- - filename : str - Name of the settings file - name : str - The name of the setting - section : str, optional - The section to look in (default: the global section) - - Note that names and sections are case insensitive - - Returns - ------- - str - Value of the setting. If not found, raises a ValueError - - Examples - -------- - - >>> settings.get("BOUT.inp", "nout") - '100' - - >>> settings.get("BOUT.inp", "compress", section="highbeta") - 'true' - - """ - with open(filename, "rt") as f: - if section is not None: - # First find the section - found = False - for line in f: - # Strip spaces from left - line = line.lstrip(' \t\n\r') - if len(line) < 1: - continue # Empty line - - # if line starts with '[' then this is a section - if line[0] == '[': - # Split on ']' - head, _ = line[1:].split(']', 1) - # head is now the section name - if head.lower() == section.lower(): - found = True - break - if not found: - raise ValueError("Section '%s' not found" % (section)) - - # Now in the correct section - - for line in f: - # Strip spaces from left - line = line.lstrip(' \t\n\r') - if len(line) < 1: - continue # Empty line - - # if line starts with '[' then this is a section - if line[0] == '[': - raise ValueError("Name '%s' not found in section '%s'" % (name,section)) - # Check if this line contains an '=' - if '=' in line: - # Check if contains comment - comment = '' - if '#' in line: - line, comment = line.split('#', 1) - # Split on '=' - key, value = line.split('=',1) - # Strip whitespace - key = key.strip(' \t\n\r') - value = value.strip(' \t\n\r') - - # Strip out quotes if present - if value[0] == '"' or value[0] == "'": - value = value[1:] - if value[-1] == '"' or value[-1] == "'": - value = value[:-1] - - #print("'%s' = '%s'" % (key, value)) - if key.lower() == name.lower(): # Case insensitive - return value - diff --git a/tools/pylib/boutdata/shiftz.py b/tools/pylib/boutdata/shiftz.py deleted file mode 100644 index 606b94f176..0000000000 --- a/tools/pylib/boutdata/shiftz.py +++ /dev/null @@ -1,91 +0,0 @@ -from numpy import ndarray, pi, cos, sin -from numpy import fft - - -def shiftz(var, zangle, zperiod=1.0): - """Shift a variable in Z, changing between field-aligned and - orthogonal X-Z coordinates. This mainly used for tokamak - simulations in field-aligned coordinates. - - Parameters - ---------- - var : array_like - Data to be shifted - 4D [t,x,y,z] - 3D [x,y,z] or [t,x,z] - 2D [x,z] - zangle : array_like - The shift angle - 2D [x,y] (if var is 4D or 3D [x,y,z]) - 1D [x] (if var is 3D [t,x,z] or 2D) - zperiod : float, optional - The fraction of 2pi covered by the variable in Z. This - corresponds to the ZPERIOD variable in BOUT.inp and multiplies - the kz wavenumbers by this factor. - - Returns - ------- - ndarray - A numpy array of the same size and shape as var - - Examples - -------- - - >>> from boutdata import collect - >>> from boututils.datafile import DataFile - >>> from boutdata.shiftz import shiftz - >>> n = collect("Ne") # Read 4D variable [t,x,y,z] - >>> d = DataFile("grid.nc") # Read the grid file - >>> nxz = shiftz(n, d["zShift"], zperiod=4) - - nxz is now in orthogonal X-Z coordinates (X is psi). - - Note that in older grid files "qinty" is used rather - than "zShift". - - """ - - if len(var.shape) == 4: - # 4D variable [t,x,y,z] - result = ndarray(var.shape) - for t in range(var.shape[0]): - # Shift each time slice separately - result[t,:,:,:] = shiftz(var[t,:,:,:], zangle, zperiod=zperiod) - return result - elif len(var.shape) == 3: - if len(zangle.shape) == 2: - # 3D variable [x,y,z], array [x,y] - result = ndarray(var.shape) - for y in range(var.shape[1]): - result[:,y,:] = shiftz(var[:,y,:], zangle[:,y], zperiod=zperiod) - return result - elif len(zangle.shape) == 1: - # 3D variable [t,x,z], array [x] - result = ndarray(var.shape) - for t in range(var.shape[0]): - result[t,:,:] = shiftz(var[t,:,:], zangle, zperiod=zperiod) - return result - else: - raise ValueError("Expecting zangle to be 1 or 2D") - elif len(var.shape) == 2: - if len(zangle.shape) != 1: - raise ValueError("Expecting zangle to be 1D") - - ################################ - # Main algorithm here - # var is [x,z] - # zangle is [x] - - # Take FFT in Z direction - f = fft.rfft(var, axis=1) - - zlength = 2.*pi/zperiod - - for z in range(1, f.shape[1]): - kwave=z*2.0*pi/zlength - f[:,z] *= cos(kwave * zangle) - 1j*sin(kwave*zangle) - return fft.irfft(f, var.shape[1], axis=1) - - else: - raise ValueError("Don't know how to handle 1D variable") - diff --git a/tools/pylib/boutdata/squashoutput.py b/tools/pylib/boutdata/squashoutput.py deleted file mode 100644 index c0ed69a81b..0000000000 --- a/tools/pylib/boutdata/squashoutput.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -Collect all data from BOUT.dmp.* files and create a single output file. - -Output file named BOUT.dmp.nc by default - -Useful because this discards ghost cell data (that is only useful for debugging) -and because single files are quicker to download. - -""" - -from boutdata.data import BoutOutputs -from boututils.datafile import DataFile -from boututils.boutarray import BoutArray -import numpy -import os -import gc -import tempfile -import shutil -import glob - - -def squashoutput(datadir=".", outputname="BOUT.dmp.nc", format="NETCDF4", tind=None, - xind=None, yind=None, zind=None, singleprecision=False, compress=False, - least_significant_digit=None, quiet=False, complevel=None, append=False, - delete=False): - """ - Collect all data from BOUT.dmp.* files and create a single output file. - - Parameters - ---------- - datadir : str - Directory where dump files are and where output file will be created. - default "." - outputname : str - Name of the output file. File suffix specifies whether to use NetCDF or - HDF5 (see boututils.datafile.DataFile for suffixes). - default "BOUT.dmp.nc" - format : str - format argument passed to DataFile - default "NETCDF4" - tind : slice, int, or [int, int, int] - tind argument passed to collect - default None - xind : slice, int, or [int, int, int] - xind argument passed to collect - default None - yind : slice, int, or [int, int, int] - yind argument passed to collect - default None - zind : slice, int, or [int, int, int] - zind argument passed to collect - default None - singleprecision : bool - If true convert data to single-precision floats - default False - compress : bool - If true enable compression in the output file - least_significant_digit : int or None - How many digits should be retained? Enables lossy - compression. Default is lossless compression. Needs - compression to be enabled. - complevel : int or None - Compression level, 1 should be fastest, and 9 should yield - highest compression. - quiet : bool - Be less verbose. default False - append : bool - Append to existing squashed file - delete : bool - Delete the original files after squashing. - """ - - fullpath = os.path.join(datadir, outputname) - - if append: - datadirnew = tempfile.mkdtemp(dir=datadir) - for f in glob.glob(datadir + "/BOUT.dmp.*.??"): - if not quiet: - print("moving", f) - shutil.move(f, datadirnew) - oldfile = datadirnew + "/" + outputname - datadir = datadirnew - - if os.path.isfile(fullpath) and not append: - raise ValueError( - fullpath + " already exists. Collect may try to read from this file, which is presumably not desired behaviour.") - - # useful object from BOUT pylib to access output data - outputs = BoutOutputs(datadir, info=False, xguards=True, - yguards=True, tind=tind, xind=xind, yind=yind, zind=zind) - outputvars = outputs.keys() - # Read a value to cache the files - outputs[outputvars[0]] - - if append: - # move only after the file list is cached - shutil.move(fullpath, oldfile) - - t_array_index = outputvars.index("t_array") - outputvars.append(outputvars.pop(t_array_index)) - - kwargs = {} - if compress: - kwargs['zlib'] = True - if least_significant_digit is not None: - kwargs['least_significant_digit'] = least_significant_digit - if complevel is not None: - kwargs['complevel'] = complevel - if append: - old = DataFile(oldfile) - # Check if dump on restart was enabled - # If so, we want to drop the duplicated entry - cropnew = 0 - if old['t_array'][-1] == outputs['t_array'][0]: - cropnew = 1 - # Make sure we don't end up with duplicated data: - for ot in old['t_array']: - if ot in outputs['t_array'][cropnew:]: - raise RuntimeError( - "For some reason t_array has some duplicated entries in the new and old file.") - # Create single file for output and write data - with DataFile(fullpath, create=True, write=True, format=format, **kwargs) as f: - for varname in outputvars: - if not quiet: - print(varname) - - var = outputs[varname] - if append: - dims = outputs.dimensions[varname] - if 't' in dims: - var = var[cropnew:, ...] - varold = old[varname] - var = BoutArray(numpy.append( - varold, var, axis=0), var.attributes) - - if singleprecision: - if not isinstance(var, int): - var = BoutArray(numpy.float32(var), var.attributes) - - f.write(varname, var) - # Write changes, free memory - f.sync() - var = None - gc.collect() - - if delete: - if append: - os.remove(oldfile) - for f in glob.glob(datadir + "/BOUT.dmp.*.??"): - if not quiet: - print("Deleting", f) - os.remove(f) - if append: - os.rmdir(datadir) diff --git a/tools/pylib/boututils b/tools/pylib/boututils new file mode 120000 index 0000000000..5eaca68d8c --- /dev/null +++ b/tools/pylib/boututils @@ -0,0 +1 @@ +../../externalpackages/boututils/boututils/ \ No newline at end of file diff --git a/tools/pylib/boututils/View3D.py b/tools/pylib/boututils/View3D.py deleted file mode 100644 index 3c509504a2..0000000000 --- a/tools/pylib/boututils/View3D.py +++ /dev/null @@ -1,390 +0,0 @@ -""" -View a 3D rendering of the magnetic field lines and the streamlines of the rational surfaces. -The quality of the later can be used as an indicator of the quality of the grid. The magnetic field -is computed from efit_analyzed.py. The script can be used as a template to show additional properties of the field - -based on enthought's example by Gael Varoquaux -http://docs.enthought.com/mayavi/mayavi/auto/example_magnetic_field.html#example-magnetic-field - -""" -from __future__ import absolute_import -from __future__ import division -from builtins import range -from past.utils import old_div - - -from boutdata.collect import collect -import numpy as np - -import sys - -if sys.version_info[0]>=3: - message = "View3D uses the VTK library through mayavi, which"+\ - " is currently only available in python 2" - raise ImportError(message) -else: - from mayavi import mlab - -from .read_geqdsk import read_geqdsk -from boututils.View2D import View2D -from scipy import interpolate -from .boutgrid import * - - -def View3D(g,path=None, gb=None): - ############################################################################## - # Resolution - - n=51 - - #compute Bxy - [Br,Bz,x,y,q]=View2D(g,option=1) - - - rd=g.r.max()+.5 - zd=g.z.max()+.5 - ############################################################################## - # The grid of points on which we want to evaluate the field - X, Y, Z = np.mgrid[-rd:rd:n*1j, -rd:rd:n*1j, -zd:zd:n*1j] - ## Avoid rounding issues : - #f = 1e4 # this gives the precision we are interested by : - #X = np.round(X * f) / f - #Y = np.round(Y * f) / f - #Z = np.round(Z * f) / f - - r = np.c_[X.ravel(), Y.ravel(), Z.ravel()] - - ############################################################################## - # Calculate field - # First initialize a container matrix for the field vector : - B = np.empty_like(r) - - - #Compute Toroidal field - # fpol is given between simagx (psi on the axis) and sibdry ( - # psi on limiter or separatrix). So the toroidal field (fpol/R) and the q profile are within these boundaries - # For each r,z we have psi thus we get fpol if (r,z) is within the boundary (limiter or separatrix) and fpol=fpol(outer_boundary) for outside - - #The range of psi is g.psi.max(), g.psi.min() but we have f(psi) up to the limit. Thus we use a new extended variable padded up to max psi - # set points between psi_limit and psi_max - - add_psi=np.linspace(g.sibdry,g.psi.max(),10) - - # define the x (psi) array - xf=np.arange(np.float(g.qpsi.size))*(g.sibdry-g.simagx)/np.float(g.qpsi.size-1) + g.simagx - - # pad the extra values excluding the 1st value - - xf=np.concatenate((xf, add_psi[1::]), axis=0) - - # pad fpol with corresponding points - - fp=np.lib.pad(g.fpol, (0,9), 'edge') - - # create interpolating function - - f = interpolate.interp1d(xf, fp) - - #calculate Toroidal field - - Btrz = old_div(f(g.psi), g.r) - - - rmin=g.r[:,0].min() - rmax=g.r[:,0].max() - zmin=g.z[0,:].min() - zmax=g.z[0,:].max() - - - B1p,B2p,B3p,B1t,B2t,B3t = magnetic_field(g,X,Y,Z,rmin,rmax,zmin,zmax, Br,Bz,Btrz) - - bpnorm = np.sqrt(B1p**2 + B2p**2 + B3p**2) - btnorm = np.sqrt(B1t**2 + B2t**2 + B3t**2) - - BBx=B1p+B1t - BBy=B2p+B2t - BBz=B3p+B3t - btotal = np.sqrt(BBx**2 + BBy**2 + BBz**2) - - Psi = psi_field(g,X,Y,Z,rmin,rmax,zmin,zmax) - - ############################################################################## - # Visualization - - # We threshold the data ourselves, as the threshold filter produce a - # data structure inefficient with IsoSurface - #bmax = bnorm.max() - # - #B1[B > bmax] = 0 - #B2[B > bmax] = 0 - #B3[B > bmax] = 0 - #bnorm[bnorm > bmax] = bmax - - mlab.figure(1, size=(1080,1080))#, bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5)) - - mlab.clf() - - fieldp = mlab.pipeline.vector_field(X, Y, Z, B1p, B2p, B3p, - scalars=bpnorm, name='Bp field') - - fieldt = mlab.pipeline.vector_field(X, Y, Z, B1t, B2t, B3t, - scalars=btnorm, name='Bt field') - - field = mlab.pipeline.vector_field(X, Y, Z, BBx, BBy, BBz, - scalars=btotal, name='B field') - - - - field2 = mlab.pipeline.scalar_field(X, Y, Z, Psi, name='Psi field') - - #vectors = mlab.pipeline.vectors(field, - # scale_factor=1,#(X[1, 0, 0] - X[0, 0, 0]), - # ) - - #vcp1 = mlab.pipeline.vector_cut_plane(fieldp, - # scale_factor=1, - # colormap='jet', - # plane_orientation='y_axes') - ## - #vcp2 = mlab.pipeline.vector_cut_plane(fieldt, - # scale_factor=1, - # colormap='jet', - # plane_orientation='x_axes') - - - # Mask random points, to have a lighter visualization. - #vectors.glyph.mask_input_points = True - #vectors.glyph.mask_points.on_ratio = 6 - - #vcp = mlab.pipeline.vector_cut_plane(field1) - #vcp.glyph.glyph.scale_factor=5*(X[1, 0, 0] - X[0, 0, 0]) - # For prettier picture: - #vcp1.implicit_plane.widget.enabled = False - #vcp2.implicit_plane.widget.enabled = False - - iso = mlab.pipeline.iso_surface(field2, - contours=[Psi.min()+.01], - opacity=0.4, - colormap='bone') - - for i in range(q.size): - iso.contour.contours[i+1:i+2]=[q[i]] - - iso.compute_normals = True - # - - #mlab.pipeline.image_plane_widget(field2, - # plane_orientation='x_axes', - # #slice_index=10, - # extent=[-rd, rd, -rd, rd, -zd,zd] - # ) - #mlab.pipeline.image_plane_widget(field2, - # plane_orientation='y_axes', - # # slice_index=10, - # extent=[-rd, rd, -rd,rd, -zd,zd] - # ) - - - - #scp = mlab.pipeline.scalar_cut_plane(field2, - # colormap='jet', - # plane_orientation='x_axes') - # For prettier picture and with 2D streamlines: - #scp.implicit_plane.widget.enabled = False - #scp.enable_contours = True - #scp.contour.number_of_contours = 20 - - # - - # Magnetic Axis - - s=mlab.pipeline.streamline(field) - s.streamline_type = 'line' - s.seed.widget = s.seed.widget_list[3] - s.seed.widget.position=[g.rmagx,0.,g.zmagx] - s.seed.widget.enabled = False - - - # q=i surfaces - - for i in range(np.shape(x)[0]): - - s=mlab.pipeline.streamline(field) - s.streamline_type = 'line' - ##s.seed.widget = s.seed.widget_list[0] - ##s.seed.widget.center = 0.0, 0.0, 0.0 - ##s.seed.widget.radius = 1.725 - ##s.seed.widget.phi_resolution = 16 - ##s.seed.widget.handle_direction =[ 1., 0., 0.] - ##s.seed.widget.enabled = False - ##s.seed.widget.enabled = True - ##s.seed.widget.enabled = False - # - if x[i].size>1 : - s.seed.widget = s.seed.widget_list[3] - s.seed.widget.position=[x[i][0],0.,y[i][0]] - s.seed.widget.enabled = False - - - # A trick to make transparency look better: cull the front face - iso.actor.property.frontface_culling = True - - #mlab.view(39, 74, 0.59, [.008, .0007, -.005]) - out=mlab.outline(extent=[-rd, rd, -rd, rd, -zd, zd], line_width=.5 ) - out.outline_mode = 'cornered' - out.outline_filter.corner_factor = 0.0897222 - - - w = mlab.gcf() - w.scene.camera.position = [13.296429046581462, 13.296429046581462, 12.979811259697154] - w.scene.camera.focal_point = [0.0, 0.0, -0.31661778688430786] - w.scene.camera.view_angle = 30.0 - w.scene.camera.view_up = [0.0, 0.0, 1.0] - w.scene.camera.clipping_range = [13.220595435695394, 35.020427055647517] - w.scene.camera.compute_view_plane_normal() - w.scene.render() - w.scene.show_axes = True - - mlab.show() - - if(path is not None): - #BOUT data - #path='../Aiba/' - # - #gb = file_import(path+'aiba.bout.grd.nc') - #gb = file_import("../cbm18_8_y064_x516_090309.nc") - #gb = file_import("cbm18_dens8.grid_nx68ny64.nc") - #gb = file_import("/home/ben/run4/reduced_y064_x256.nc") - - data = collect('P', path=path) - data = data[50,:,:,:] - #data0=collect("P0", path=path) - #data=data+data0[:,:,None] - - s = np.shape(data) - nz = s[2] - - - sgrid = create_grid(gb, data, 1) - - # OVERPLOT the GRID - #mlab.pipeline.add_dataset(sgrid) - #gr=mlab.pipeline.grid_plane(sgrid) - #gr.grid_plane.axis='x' - - - ## pressure scalar cut plane from bout - scpb = mlab.pipeline.scalar_cut_plane(sgrid, - colormap='jet', - plane_orientation='x_axes') - - scpb.implicit_plane.widget.enabled = False - scpb.enable_contours = True - scpb.contour.filled_contours=True - # - scpb.contour.number_of_contours = 20 - # - # - #loc=sgrid.points - #p=sgrid.point_data.scalars - - # compute pressure from scatter points interpolation - #pint=interpolate.griddata(loc, p, (X, Y, Z), method='linear') - #dpint=np.ma.masked_array(pint,np.isnan(pint)).filled(0.) - # - #p2 = mlab.pipeline.scalar_field(X, Y, Z, dpint, name='P field') - # - #scp2 = mlab.pipeline.scalar_cut_plane(p2, - # colormap='jet', - # plane_orientation='y_axes') - # - #scp2.implicit_plane.widget.enabled = False - #scp2.enable_contours = True - #scp2.contour.filled_contours=True - #scp2.contour.number_of_contours = 20 - #scp2.contour.minimum_contour=.001 - - - - # CHECK grid orientation - #fieldr = mlab.pipeline.vector_field(X, Y, Z, -BBx, BBy, BBz, - # scalars=btotal, name='B field') - # - #sg=mlab.pipeline.streamline(fieldr) - #sg.streamline_type = 'tube' - #sg.seed.widget = sg.seed.widget_list[3] - #sg.seed.widget.position=loc[0] - #sg.seed.widget.enabled = False - - - - #OUTPUT grid - - #ww = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts') - #ww.write() - - return - -def magnetic_field(g,X,Y,Z,rmin,rmax,zmin,zmax,Br,Bz,Btrz): - - rho = np.sqrt(X**2 + Y**2) - phi=np.arctan2(Y,X) - - br=np.zeros(np.shape(X)) - bz=np.zeros(np.shape(X)) - bt=np.zeros(np.shape(X)) - - nx,ny,nz=np.shape(X) - - mask = (rho >= rmin) & (rho <= rmax) & (Z >= zmin) & (Z <= zmax) - k=np.argwhere(mask==True) - - fr=interpolate.interp2d(g.r[:,0], g.z[0,:], Br.T) - fz=interpolate.interp2d(g.r[:,0], g.z[0,:], Bz.T) - ft=interpolate.interp2d(g.r[:,0], g.z[0,:], Btrz.T) - - for i in range(len(k)): - br[k[i,0],k[i,1],k[i,2]]=fr(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]]) - bz[k[i,0],k[i,1],k[i,2]]=fz(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]]) - bt[k[i,0],k[i,1],k[i,2]]=ft(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]]) - - # Toroidal component - B1t=-bt*np.sin(phi) - B2t=bt*np.cos(phi) - B3t=0*bz - - # Poloidal component - B1p=br*np.cos(phi) - B2p=br*np.sin(phi) - B3p=bz - - - # Rotate the field back in the lab's frame - return B1p,B2p,B3p,B1t,B2t,B3t - - -def psi_field(g,X,Y,Z,rmin,rmax,zmin,zmax): - - rho = np.sqrt(X**2 + Y**2) - - psi=np.zeros(np.shape(X)) - - nx,ny,nz=np.shape(X) - - mask = (rho >= rmin) & (rho <= rmax) & (Z >= zmin) & (Z <= zmax) - k=np.argwhere(mask==True) - - f=interpolate.interp2d(g.r[:,0], g.z[0,:], g.psi.T) - - for i in range(len(k)): - psi[k[i,0],k[i,1],k[i,2]]=f(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]]) - - # Rotate the field back in the lab's frame - return psi - - -if __name__ == '__main__': - path='../../tokamak_grids/pyGridGen/' - g=read_geqdsk(path+"g118898.03400") - View3D(g) - mlab.show() diff --git a/tools/pylib/boututils/__init__.py b/tools/pylib/boututils/__init__.py deleted file mode 100644 index f815d3e525..0000000000 --- a/tools/pylib/boututils/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -""" Generic routines, useful for all data """ - -import sys - -try: - from builtins import str -except ImportError: - raise ImportError("Please install the future module to use Python 2") - -# Modules to be imported independent of version -for_all_versions = [\ - 'calculus',\ - 'closest_line',\ - 'datafile',\ - # 'efit_analyzer',\ # bunch pkg required - 'fft_deriv',\ - 'fft_integrate',\ - 'file_import',\ - 'int_func',\ - 'linear_regression',\ - 'mode_structure',\ - # 'moment_xyzt',\ # bunch pkg requried - 'run_wrapper',\ - 'shell',\ - 'showdata',\ - # 'surface_average',\ - # 'volume_integral',\ #bunch pkg required - ] - -# Check the current python version -if sys.version_info[0]>=3: - do_import = for_all_versions - __all__ = do_import -else: - do_import = for_all_versions - do_import.append('anim') - do_import.append('plotpolslice') - do_import.append('View3D') - __all__ = do_import diff --git a/tools/pylib/boututils/analyse_equil_2.py b/tools/pylib/boututils/analyse_equil_2.py deleted file mode 100644 index 93b98fc3ec..0000000000 --- a/tools/pylib/boututils/analyse_equil_2.py +++ /dev/null @@ -1,272 +0,0 @@ -"""Equilibrium analysis routine - -Takes a RZ psi grid, and finds x-points and o-points -""" - -from __future__ import print_function -from __future__ import division - -from builtins import zip -from builtins import str -from builtins import range -from past.utils import old_div - -import numpy -from bunch import Bunch -from . import local_min_max -from scipy.interpolate import RectBivariateSpline -from matplotlib.pyplot import contour, gradient, annotate, plot, draw -from crosslines import find_inter - - -def analyse_equil(F, R, Z): - """Takes an RZ psi grid, and finds x-points and o-points - - Parameters - ---------- - F : array_like - 2-D array of psi values - R : array_like - 1-D array of major radii, its length should be the same as the - first dimension of F - Z : array_like - 1-D array of heights, its length should be the same as the - second dimension of F - - Returns - ------- - bunch - A structure of critical points containing: - - n_opoint, n_xpoint - Number of O- and X-points - primary_opt - Index of plasma centre O-point - inner_sep - X-point index of inner separatrix - opt_ri, opt_zi - R and Z indices for each O-point - opt_f - Psi value at each O-point - xpt_ri, xpt_zi - R and Z indices for each X-point - xpt_f - Psi value of each X-point - - """ - s = numpy.shape(F) - nx = s[0] - ny = s[1] - - #;;;;;;;;;;;;;;; Find critical points ;;;;;;;;;;;;; - # - # Need to find starting locations for O-points (minima/maxima) - # and X-points (saddle points) - # - Rr=numpy.tile(R,nx).reshape(nx,ny).T - Zz=numpy.tile(Z,ny).reshape(nx,ny) - - contour1=contour(Rr,Zz,gradient(F)[0], levels=[0.0], colors='r') - contour2=contour(Rr,Zz,gradient(F)[1], levels=[0.0], colors='r') - - draw() - - -### 1st method - line crossings --------------------------- - res=find_inter( contour1, contour2) - - #rex1=numpy.interp(res[0], R, numpy.arange(R.size)).astype(int) - #zex1=numpy.interp(res[1], Z, numpy.arange(Z.size)).astype(int) - - rex1=res[0] - zex1=res[1] - - w=numpy.where((rex1 > R[2]) & (rex1 < R[nx-3]) & (zex1 > Z[2]) & (zex1 < Z[nx-3])) - nextrema = numpy.size(w) - rex1=rex1[w].flatten() - zex1=zex1[w].flatten() - - -### 2nd method - local maxima_minima ----------------------- - res1=local_min_max.detect_local_minima(F) - res2=local_min_max.detect_local_maxima(F) - res=numpy.append(res1,res2,1) - - rex2=res[0,:].flatten() - zex2=res[1,:].flatten() - - - w=numpy.where((rex2 > 2) & (rex2 < nx-3) & (zex2 >2) & (zex2 < nx-3)) - nextrema = numpy.size(w) - rex2=rex2[w].flatten() - zex2=zex2[w].flatten() - - - n_opoint=nextrema - n_xpoint=numpy.size(rex1)-n_opoint - - # Needed for interp below - - Rx=numpy.arange(numpy.size(R)) - Zx=numpy.arange(numpy.size(Z)) - - - - print("Number of O-points: "+numpy.str(n_opoint)) - print("Number of X-points: "+numpy.str(n_xpoint)) - - # Deduce the O & X points - - x=R[rex2] - y=Z[zex2] - - dr=old_div((R[numpy.size(R)-1]-R[0]),numpy.size(R)) - dz=old_div((Z[numpy.size(Z)-1]-Z[0]),numpy.size(Z)) - - - repeated=set() - for i in range(numpy.size(rex1)): - for j in range(numpy.size(x)): - if numpy.abs(rex1[i]-x[j]) < 2*dr and numpy.abs(zex1[i]-y[j]) < 2*dz : repeated.add(i) - - # o-points - - o_ri=numpy.take(rex1,numpy.array(list(repeated))) - opt_ri=numpy.interp(o_ri,R,Rx) - o_zi=numpy.take(zex1,numpy.array(list(repeated))) - opt_zi=numpy.interp(o_zi,Z,Zx) - opt_f=numpy.zeros(numpy.size(opt_ri)) - func = RectBivariateSpline(Rx, Zx, F) - for i in range(numpy.size(opt_ri)): opt_f[i]=func(opt_ri[i], opt_zi[i]) - - n_opoint=numpy.size(opt_ri) - - # x-points - - x_ri=numpy.delete(rex1, numpy.array(list(repeated))) - xpt_ri=numpy.interp(x_ri,R,Rx) - x_zi=numpy.delete(zex1, numpy.array(list(repeated))) - xpt_zi=numpy.interp(x_zi,Z,Zx) - xpt_f=numpy.zeros(numpy.size(xpt_ri)) - func = RectBivariateSpline(Rx, Zx, F) - for i in range(numpy.size(xpt_ri)): xpt_f[i]=func(xpt_ri[i], xpt_zi[i]) - - n_xpoint=numpy.size(xpt_ri) - - # plot o-points - - plot(o_ri,o_zi,'o', markersize=10) - - labels = ['{0}'.format(i) for i in range(o_ri.size)] - for label, xp, yp in zip(labels, o_ri, o_zi): - annotate(label, xy = (xp, yp), xytext = (10, 10), textcoords = 'offset points',size='large', color='b') - - draw() - - # plot x-points - - plot(x_ri,x_zi,'x', markersize=10) - - labels = ['{0}'.format(i) for i in range(x_ri.size)] - for label, xp, yp in zip(labels, x_ri, x_zi): - annotate(label, xy = (xp, yp), xytext = (10, 10), textcoords = 'offset points',size='large', color='r') - - draw() - - print("Number of O-points: "+str(n_opoint)) - - if n_opoint == 0 : - print("No O-points! Giving up on this equilibrium") - return Bunch(n_opoint=0, n_xpoint=0, primary_opt=-1) - - - #;;;;;;;;;;;;;; Find plasma centre ;;;;;;;;;;;;;;;;;;; - # Find the O-point closest to the middle of the grid - - mind = (opt_ri[0] - (old_div(numpy.float(nx),2.)))**2 + (opt_zi[0] - (old_div(numpy.float(ny),2.)))**2 - ind = 0 - for i in range (1, n_opoint) : - d = (opt_ri[i] - (old_div(numpy.float(nx),2.)))**2 + (opt_zi[i] - (old_div(numpy.float(ny),2.)))**2 - if d < mind : - ind = i - mind = d - - primary_opt = ind - print("Primary O-point is at "+ numpy.str(numpy.interp(opt_ri[ind],numpy.arange(numpy.size(R)),R)) + ", " + numpy.str(numpy.interp(opt_zi[ind],numpy.arange(numpy.size(Z)),Z))) - print("") - - if n_xpoint > 0 : - - # Find the primary separatrix - - # First remove non-monotonic separatrices - nkeep = 0 - for i in range (n_xpoint) : - # Draw a line between the O-point and X-point - - n = 100 # Number of points - farr = numpy.zeros(n) - dr = old_div((xpt_ri[i] - opt_ri[ind]), numpy.float(n)) - dz = old_div((xpt_zi[i] - opt_zi[ind]), numpy.float(n)) - for j in range (n) : - # interpolate f at this location - func = RectBivariateSpline(Rx, Zx, F) - - farr[j] = func(opt_ri[ind] + dr*numpy.float(j), opt_zi[ind] + dz*numpy.float(j)) - - - # farr should be monotonic, and shouldn't cross any other separatrices - - maxind = numpy.argmax(farr) - minind = numpy.argmin(farr) - if (maxind < minind) : maxind, minind = minind, maxind - - # Allow a little leeway to account for errors - # NOTE: This needs a bit of refining - if (maxind > (n-3)) and (minind < 3) : - # Monotonic, so add this to a list of x-points to keep - if nkeep == 0 : - keep = [i] - else: - keep = numpy.append(keep, i) - - - nkeep = nkeep + 1 - - - if nkeep > 0 : - print("Keeping x-points ", keep) - xpt_ri = xpt_ri[keep] - xpt_zi = xpt_zi[keep] - xpt_f = xpt_f[keep] - else: - "No x-points kept" - - n_xpoint = nkeep - - - # Now find x-point closest to primary O-point - s = numpy.argsort(numpy.abs(opt_f[ind] - xpt_f)) - xpt_ri = xpt_ri[s] - xpt_zi = xpt_zi[s] - xpt_f = xpt_f[s] - inner_sep = 0 - - else: - - # No x-points. Pick mid-point in f - - xpt_f = 0.5*(numpy.max(F) + numpy.min(F)) - - print("WARNING: No X-points. Setting separatrix to F = "+str(xpt_f)) - - xpt_ri = 0 - xpt_zi = 0 - inner_sep = 0 - - - - #;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - # Put results into a structure - - result = Bunch(n_opoint=n_opoint, n_xpoint=n_xpoint, # Number of O- and X-points - primary_opt=primary_opt, # Which O-point is the plasma centre - inner_sep=inner_sep, #Innermost X-point separatrix - opt_ri=opt_ri, opt_zi=opt_zi, opt_f=opt_f, # O-point location (indices) and psi values - xpt_ri=xpt_ri, xpt_zi=xpt_zi, xpt_f=xpt_f) # X-point locations and psi values - - return result - diff --git a/tools/pylib/boututils/anim.py b/tools/pylib/boututils/anim.py deleted file mode 100755 index d2f783858c..0000000000 --- a/tools/pylib/boututils/anim.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python3 -"""Animate graph with mayavi - -""" - -from __future__ import print_function -from builtins import range -from boutdata.collect import collect -import numpy as np -import os -try: - from enthought.mayavi import mlab - from enthought.mayavi.mlab import * -except ImportError: - try: - from mayavi import mlab - from mayavi.mlab import * - except ImportError: - print("No mlab available") - -from tvtk.tools import visual - - -@mlab.show -@mlab.animate(delay=250) -def anim(s, d, *args, **kwargs): - """Animate graph with mayavi - - Parameters - ---------- - s : mayavi axis object - Axis to animate data on - d : array_like - 3-D array to animate - s1 : mayavi axis object, optional - Additional bundled graph (first item in *args) - save : bool, optional - Save png files for creating movie (default: False) - - """ - - if len(args) == 1: - s1 = args[0] - else: - s1=None - - try: - save = kwargs['save'] - except: - save = False - - - nt=d.shape[0] - - print('animating for ',nt,'timesteps') - if save == True : - print('Saving pics in folder Movie') - if not os.path.exists('Movie'): - os.makedirs('Movie') - - - for i in range(nt): - s.mlab_source.scalars = d[i,:,:] - if s1 is not None : s1.mlab_source.scalars = d[i,:,:] - title="t="+np.string0(i) - mlab.title(title,height=1.1, size=0.26) - if save == True : mlab.savefig('Movie/anim%d.png'%i) - yield - -if __name__ == '__main__': - - path='../../../examples/elm-pb/data' - - data = collect("P", path=path) - - nt=data.shape[0] - - ns=data.shape[1] - ne=data.shape[2] - nz=data.shape[3] - - - f = mayavi.mlab.figure(size=(600,600)) - # Tell visual to use this as the viewer. - visual.set_viewer(f) - - #First way - - s1 = contour_surf(data[0,:,:,10]+.1, contours=30, line_width=.5, transparent=True) - s = surf(data[0,:,:,10]+.1, colormap='Spectral')#, warp_scale='.1')#, representation='wireframe') - - - # second way - - #x, y= mgrid[0:ns:1, 0:ne:1] - #s = mesh(x,y,data[0,:,:,10], colormap='Spectral')#, warp_scale='auto')#, representation='wireframe') - s.enable_contours=True - s.contour.filled_contours=True -# - - #x, y, z= mgrid[0:ns:1, 0:ne:1, 0:nz:1] - # - #p=plot3d(x,y,z,data[10,:,:,:], tube_radius=0.025, colormap='Spectral') - #p=points3d(x,y,z,data[10,:,:,:], colormap='Spectral') -# - #s=contour3d(x,y,z,data[10,:,:,:], contours=4, transparent=True) - - #mlab.view(0.,0.) - colorbar() - #axes() - #outline() - - - # Run the animation. - anim(s,data[:,:,:,10]+.1,s1, save=True) diff --git a/tools/pylib/boututils/ask.py b/tools/pylib/boututils/ask.py deleted file mode 100644 index 14aa1947d4..0000000000 --- a/tools/pylib/boututils/ask.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Ask a yes/no question and return the answer. - -""" - -from builtins import input -import sys - - -def query_yes_no(question, default="yes"): - """Ask a yes/no question via input() and return their answer. - - Answers are case-insensitive. - - Probably originally from http://code.activestate.com/recipes/577058/ - via https://stackoverflow.com/a/3041990/2043465 - - Parameters - ---------- - question : str - Question to be presented to the user - default : {"yes", "no", None} - The presumed answer if the user just hits . - It must be "yes" (the default), "no" or None (meaning - an answer is required of the user). - - Returns - ------- - bool - True if the answer was "yes" or "y", False if "no" or "n" - """ - - valid = {"yes":True, "y":True, "ye":True, - "no":False, "n":False, "No":False, "N":False } - - if default is None: - prompt = " [y/n] " - elif default == "yes": - prompt = " [Y/n] " - elif default == "no": - prompt = " [y/N] " - else: - raise ValueError("invalid default answer: '%s'" % default) - - while True: - sys.stdout.write(question + prompt) - choice = input().lower() - if default is not None and choice == '': - return valid[default] - elif choice in valid: - return valid[choice] - else: - sys.stdout.write("Please respond with 'yes' or 'no' "\ - "(or 'y' or 'n').\n") diff --git a/tools/pylib/boututils/boutarray.py b/tools/pylib/boututils/boutarray.py deleted file mode 100644 index ce38baec2c..0000000000 --- a/tools/pylib/boututils/boutarray.py +++ /dev/null @@ -1,73 +0,0 @@ -"""Wrapper for ndarray with extra attributes for BOUT++ fields. - -""" - -import numpy - - -class BoutArray(numpy.ndarray): - """Wrapper for ndarray with extra attributes for BOUT++ fields. - - Parameters - ---------- - input_array : array_like - Data to convert to BoutArray - attributes : dict - Dictionary of extra attributes for BOUT++ fields - - Notably, these attributes should contain - ``bout_type``. Possible values are: - - - scalar - - Field2D - - Field3D - - If the variable is an evolving variable (i.e. has a time - dimension), then it is appended with a "_t" - - """ - - # See https://docs.scipy.org/doc/numpy-1.13.0/user/basics.subclassing.html - # for explanation of the structure of this numpy.ndarray wrapper - - def __new__(cls, input_array, attributes={}): - # Input array is an already formed ndarray instance - # We first cast to be our class type - obj = numpy.asarray(input_array).view(cls) - # add the dict of attributes to the created instance - obj.attributes = attributes - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self, obj): - # ``self`` is a new object resulting from - # ndarray.__new__(BoutArray, ...), therefore it only has - # attributes that the ndarray.__new__ constructor gave it - - # i.e. those of a standard ndarray. - # - # We could have got to the ndarray.__new__ call in 3 ways: - # From an explicit constructor - e.g. BoutArray(): - # obj is None - # (we're in the middle of the BoutArray.__new__ - # constructor, and self.attributes will be set when we return to - # BoutArray.__new__) - if obj is None: - return - # From view casting - e.g arr.view(BoutArray): - # obj is arr - # (type(obj) can be BoutArray) - # From new-from-template - e.g boutarray[:3] - # type(obj) is BoutArray - # - # Note that it is here, rather than in the __new__ method, that we set - # the default value for 'attributes', because this method sees all - # creation of default objects - with the BoutArray.__new__ constructor, - # but also with arr.view(BoutArray). - self.attributes = getattr(obj, 'attributes', None) - # We do not need to return anything - - def __format__(self, str): - try: - return super().__format__(str) - except TypeError: - return float(self).__format__(str) diff --git a/tools/pylib/boututils/boutgrid.py b/tools/pylib/boututils/boutgrid.py deleted file mode 100755 index ace67663cd..0000000000 --- a/tools/pylib/boututils/boutgrid.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import print_function -from builtins import range - -import numpy as np -from numpy import cos, sin, pi - -from tvtk.api import tvtk -#from enthought.mayavi.scripts import mayavi2 - -def aligned_points(grid, nz=1, period=1.0, maxshift=0.4): - try: - nx = grid["nx"]#[0] - ny = grid["ny"]#[0] - zshift = grid["zShift"] - Rxy = grid["Rxy"] - Zxy = grid["Zxy"] - except: - print("Missing required data") - return None - - - dz = 2.*pi / (period * (nz-1)) - phi0 = np.linspace(0,2.*pi / period, nz) - - - # Need to insert additional points in Y so mesh looks smooth - #for y in range(1,ny): - # ms = np.max(np.abs(zshift[:,y] - zshift[:,y-1])) - # if( - - # Create array of points, structured - - points = np.zeros([nx*ny*nz, 3]) - - - start = 0 - for y in range(ny): - - - end = start + nx*nz - - phi = zshift[:,y] + phi0[:,None] - r = Rxy[:,y] + (np.zeros([nz]))[:,None] - - xz_points = points[start:end] - - - xz_points[:,0] = (r*cos(phi)).ravel() # X - xz_points[:,1] = (r*sin(phi)).ravel() # Y - xz_points[:,2] = (Zxy[:,y]+(np.zeros([nz]))[:,None]).ravel() # Z - - - start = end - - return points - -def create_grid(grid, data, period=1): - - s = np.shape(data) - - nx = grid["nx"]#[0] - ny = grid["ny"]#[0] - nz = s[2] - - print("data: %d,%d,%d grid: %d,%d\n" % (s[0],s[1],s[2], nx,ny)) - - dims = (nx, nz, ny) - sgrid = tvtk.StructuredGrid(dimensions=dims) - pts = aligned_points(grid, nz, period) - print(np.shape(pts)) - sgrid.points = pts - - scalar = np.zeros([nx*ny*nz]) - start = 0 - for y in range(ny): - end = start + nx*nz - - #scalar[start:end] = (data[:,y,:]).transpose().ravel() - scalar[start:end] = (data[:,y,:]).ravel() - - print(y, " = " , np.max(scalar[start:end])) - start = end - - sgrid.point_data.scalars = np.ravel(scalar.copy()) - sgrid.point_data.scalars.name = "data" - - return sgrid - -#@mayavi2.standalone -def view3d(sgrid): - from mayavi.sources.vtk_data_source import VTKDataSource - from mayavi.modules.api import Outline, GridPlane - from mayavi.api import Engine - from mayavi.core.ui.engine_view import EngineView - e=Engine() - e.start() - s = e.new_scene() - # Do this if you need to see the MayaVi tree view UI. - ev = EngineView(engine=e) - ui = ev.edit_traits() - -# mayavi.new_scene() - src = VTKDataSource(data=sgrid) - e.add_source(src) - e.add_module(Outline()) - g = GridPlane() - g.grid_plane.axis = 'x' - e.add_module(g) - -if __name__ == '__main__': - from boutdata.collect import collect - from boututils.file_import import file_import - - #path = "/media/449db594-b2fe-4171-9e79-2d9b76ac69b6/runs/data_33/" - path="../data" - - g = file_import("../bout.grd.nc") - #g = file_import("../cbm18_8_y064_x516_090309.nc") - #g = file_import("/home/ben/run4/reduced_y064_x256.nc") - - data = collect("P", tind=10, path=path) - data = data[0,:,:,:] - s = np.shape(data) - nz = s[2] - - #bkgd = collect("P0", path=path) - #for z in range(nz): - # data[:,:,z] += bkgd - - # Create a structured grid - sgrid = create_grid(g, data, 1) - - - w = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts') - w.write() - - # View the structured grid - view3d(sgrid) diff --git a/tools/pylib/boututils/boutwarnings.py b/tools/pylib/boututils/boutwarnings.py deleted file mode 100644 index cdb03b0518..0000000000 --- a/tools/pylib/boututils/boutwarnings.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -Wrappers for warnings functions. - -Allows raising warnings that are always printed by default. -""" - -import warnings - -class AlwaysWarning(UserWarning): - def __init__(self, *args, **kwargs): - super(AlwaysWarning, self).__init__(*args, **kwargs) - -warnings.simplefilter("always", AlwaysWarning) - -def alwayswarn(message): - warnings.warn(message, AlwaysWarning, stacklevel=2) - -def defaultwarn(message): - warnings.warn(message, stacklevel=2) diff --git a/tools/pylib/boututils/calculus.py b/tools/pylib/boututils/calculus.py deleted file mode 100644 index 274230906b..0000000000 --- a/tools/pylib/boututils/calculus.py +++ /dev/null @@ -1,252 +0,0 @@ -""" -Derivatives and integrals of periodic and non-periodic functions - - -B.Dudson, University of York, Nov 2009 -""" -from __future__ import print_function -from __future__ import division - -from builtins import range - -try: - from past.utils import old_div -except ImportError: - def old_div(a, b): - return a / b - -from numpy import zeros, pi, array, transpose, sum, where, arange, multiply -from numpy.fft import rfft, irfft - -def deriv(*args, **kwargs): - """Take derivative of 1D array - - result = deriv(y) - result = deriv(x, y) - - keywords - - periodic = False Domain is periodic - """ - - nargs = len(args) - if nargs == 1: - var = args[0] - x = arange(var.size) - elif nargs == 2: - x = args[0] - var = args[1] - else: - raise RuntimeError("deriv must be given 1 or 2 arguments") - - try: - periodic = kwargs['periodic'] - except: - periodic = False - - n = var.size - if periodic: - # Use FFTs to take derivatives - f = rfft(var) - f[0] = 0.0 # Zero constant term - if n % 2 == 0: - # Even n - for i in arange(1,old_div(n,2)): - f[i] *= 2.0j * pi * float(i)/float(n) - f[-1] = 0.0 # Nothing from Nyquist frequency - else: - # Odd n - for i in arange(1,old_div((n-1),2) + 1): - f[i] *= 2.0j * pi * float(i)/float(n) - return irfft(f) - else: - # Non-periodic function - result = zeros(n) # Create empty array - if n > 2: - for i in arange(1, n-1): - # 2nd-order central difference in the middle of the domain - result[i] = old_div((var[i+1] - var[i-1]), (x[i+1] - x[i-1])) - # Use left,right-biased stencils on edges (2nd order) - result[0] = old_div((-1.5*var[0] + 2.*var[1] - 0.5*var[2]), (x[1] - x[0])) - result[n-1] = old_div((1.5*var[n-1] - 2.*var[n-2] + 0.5*var[n-3]), (x[n-1] - x[n-2])) - elif n == 2: - # Just 1st-order difference for both points - result[0] = result[1] = old_div((var[1] - var[0]),(x[1] - x[0])) - elif n == 1: - result[0] = 0.0 - return result - -def deriv2D(data,axis=-1,dx=1.0,noise_suppression=True): - """ Takes 1D or 2D Derivative of 2D array using convolution - - result = deriv2D(data) - result = deriv2D(data, dx) - - output is 2D (if only one axis specified) - output is 3D if no axis specified [nx,ny,2] with the third dimension being [dfdx, dfdy] - - keywords: - axis = 0/1 If no axis specified 2D derivative will be returned - dx = 1.0 axis spacing, must be 2D if 2D deriv is taken - default is [1.0,1.0] - noise_suppression = True noise suppressing coefficients used to take derivative - default = True - """ - - from scipy.signal import convolve - - s = data.shape - if axis > len(s)-1: - raise RuntimeError("ERROR: axis out of bounds for derivative") - - if noise_suppression: - if s[axis] < 11: - raise RuntimeError("Data too small to use 11th order method") - tmp = array([old_div(-1.0,512.0),old_div(-8.0,512.0),old_div(-27.0,512.0),old_div(-48.0,512.0),old_div(-42.0,512.0),0.0,old_div(42.0,512.0),old_div(48.0,512.0),old_div(27.0,512.0),old_div(8.0,512.0),old_div(1.0,512.0)]) - else: - if s[axis] < 9: - raise RuntimeError("Data too small to use 9th order method") - tmp = array([old_div(1.0,280.0),old_div(-4.0,105.0),old_div(1.0,5.0),old_div(-4.0,5.0),0.0,old_div(4.0,5.0),old_div(-1.0,5.0),old_div(4.0,105.0),old_div(-1.0,280.0)]) - - N = int((tmp.size-1)/2) - if axis==1: - W = transpose(tmp[:,None]) - data_deriv = convolve(data,W,mode='same')/dx*-1.0 - for i in range(s[0]): - data_deriv[i,0:N-1] = old_div(deriv(data[i,0:N-1]),dx) - data_deriv[i,s[1]-N:] = old_div(deriv(data[i,s[1]-N:]),dx) - - elif axis==0: - W = tmp[:,None] - data_deriv = convolve(data,W,mode='same')/dx*-1.0 - for i in range(s[1]): - data_deriv[0:N-1,i] = old_div(deriv(data[0:N-1,i]),dx) - data_deriv[s[0]-N:,i] = old_div(deriv(data[s[0]-N:,i]),dx) - else: - data_deriv = zeros((s[0],s[1],2)) - if (not hasattr(dx, '__len__')) or len(dx)==1: - dx = array([dx,dx]) - - W = tmp[:,None]#transpose(multiply(tmp,ones((s[1],tmp.size)))) - data_deriv[:,:,0] = convolve(data,W,mode='same')/dx[0]*-1.0 - for i in range(s[1]): - data_deriv[0:N-1,i,0] = old_div(deriv(data[0:N-1,i]),dx[0]) - data_deriv[s[0]-N:s[0]+1,i,0] = old_div(deriv(data[s[0]-N:s[0]+1,i]),dx[0]) - - W = transpose(tmp[:,None])#multiply(tmp,ones((s[0],tmp.size))) - data_deriv[:,:,1] = convolve(data,W,mode='same')/dx[1]*-1.0 - for i in range(s[0]): - data_deriv[i,0:N-1,1] = old_div(deriv(data[i,0:N-1]),dx[1]) - data_deriv[i,s[1]-N:s[1]+1,1] = old_div(deriv(data[i,s[1]-N:s[1]+1]),dx[1]) - - return data_deriv - -def integrate(var, periodic=False): - """Integrate a 1D array - - Return array is the same size as the input - """ - if periodic: - # Use FFT - f = rfft(var) - n = var.size - # Zero frequency term - result = f[0].real*arange(n, dtype=float) - f[0] = 0. - if n % 2 == 0: - # Even n - for i in arange(1,old_div(n,2)): - f[i] /= 2.0j * pi * float(i)/float(n) - f[-1] = 0.0 # Nothing from Nyquist frequency - else: - # Odd n - for i in arange(1,old_div((n-1),2) + 1): - f[i] /= 2.0j * pi * float(i)/float(n) - return result + irfft(f) - else: - # Non-periodic function - def int_total(f): - """Integrate over a set of points""" - n = f.size - if n > 7: - # Need to split into several segments - # Use one 5-point, leaving at least 4-points - return int_total(f[0:5]) + int_total(f[4:]) - elif (n == 7) or (n == 6): - # Try to keep 4th-order - # Split into 4+4 or 4+3 - return int_total(f[0:4]) + int_total(f[3:]) - elif n == 5: - # 6th-order Bool's rule - return 4.*(7.*f[0] + 32.*f[1] + 12.*f[2] + 32.*f[3] + 7.*f[4])/90. - elif n == 4: - # 4th-order Simpson's 3/8ths rule - return 3.*(f[0] + 3.*f[1] + 3.*f[2] + f[3])/8. - elif n == 3: - # 4th-order Simpson's rule - return (f[0] + 4.*f[1] + f[2])/3. - elif n == 2: - # 2nd-order Trapezium rule - return 0.5*(f[0] + f[1]) - else: - print("WARNING: Integrating a single point") - return 0.0 - # Integrate using maximum number of grid-points - n = var.size - n2 = int(old_div(n,2)) - result = zeros(n) - for i in arange(n2, n): - result[i] = int_total(var[0:(i+1)]) - for i in arange(1, n2): - result[i] = result[-1] - int_total(var[i:]) - return result - -def simpson_integrate(data,dx,dy,kernel=0.0,weight=1.0): - """ Integrates 2D data to one value using the simpson method and matrix convolution - - result = simpson_integrate(data,dx,dy) - - keywords: - - kernel - can be supplied if the simpson matrix is calculated ahead of time - - if not supplied, is calculated within this function - - if you need to integrate the same shape data over and over, calculated - it ahead of time using: - kernel = simpson_matrix(Nx,Ny,dx,dy) - - weight - can be used to scale data if single number - - can be used to mask data if weight is array (same size as data) - """ - s = data.shape - Nx = s[0] - Ny = s[1] - - if len(kernel)==1: - kernel = simpson_matrix(Nx,Ny,dx,dy) - - return sum(multiply(multiply(weight,kernel),data))/sum(multiply(weight,kernel)) - - -def simpson_matrix(Nx,Ny,dx,dy): - """ - Creates a 2D matrix of coefficients for the simpson_integrate function - - Call ahead of time if you need to perform integration of the same size data with the - same dx and dy - - Otherwise, simpson_integrate will automatically call this - - """ - Wx = arange(Nx) + 2 - Wx[where(arange(Nx) % 2 == 1)] = 4 - Wx[0] = 1 - Wx[Nx-1] = 1 - - Wy = arange(Ny) + 2 - Wy[where(arange(Ny) % 2 == 1)] = 4 - Wy[0] = 1 - Wy[Ny-1] = 1 - - W = Wy[None,:] * Wx[:,None] - - A = dx*dy/9.0 - - return W*A diff --git a/tools/pylib/boututils/check_scaling.py b/tools/pylib/boututils/check_scaling.py deleted file mode 100644 index af59b0b786..0000000000 --- a/tools/pylib/boututils/check_scaling.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Functions for checking the error scaling of MMS or MES results - -""" - -from numpy import array, isclose, log, polyfit - - -def get_order(grid_spacing, errors): - """Get the convergence order of errors over the full range of - grid_spacing, and at small spacings - - Parameters - ---------- - grid_spacing : list of float - The grid spacing or inverse of number of grid points - errors : list of float - The error at each grid spacing - - Returns - ------- - tuple of float - The first value is the error scaling over the full range of - grid spacings; the second value is the scaling over the last - two points - - """ - if len(errors) != len(grid_spacing): - raise ValueError("errors (len: {}) and grid_spacing (len: {}) should be the same length" - .format(len(errors), len(grid_spacing))) - - full_range = polyfit(log(grid_spacing), log(errors), 1) - - small_spacing = log(errors[-2] / errors[-1]) / log(grid_spacing[-2] / grid_spacing[-1]) - - return (full_range[0], small_spacing) - - -def check_order(error_list, expected_order, tolerance=2.e-1, spacing=None): - """Check if the actual_order is sufficiently close to the - expected_order within a given tolerance - - """ - - if len(error_list) < 2: - raise RuntimeError("Expected at least 2 data points to calculate error") - - success = True - - for i in range(len(error_list)-1): - grid_spacing = 2 if spacing is None else spacing[i] / spacing[i+1] - actual_order = log(error_list[i] / error_list[i+1]) / log(grid_spacing) - - if not isclose(actual_order, expected_order, atol=tolerance, rtol=0): - success = False - return success - - -def error_rate_table(errors, grid_sizes, label): - """Create a nicely formatted table of the error convergence rate over - the grid_sizes - - The error rate is calculated between adjacent points - - Parameters - ---------- - errors : list of float - The errors at each grid size - grid_sizes : list of int - The number of grid points - label : string - What the error is measuring - - Returns - ------- - string - - """ - if len(errors) != len(grid_sizes): - raise ValueError("errors (len: {}) and grid_sizes (len: {}) should be the same length" - .format(len(errors), len(grid_sizes))) - - dx = 1. / array(grid_sizes) - message = "{}:\nGrid points | Error | Rate\n".format(label) - for i, grid_size in enumerate(grid_sizes): - message += "{:<11} | {:f} | ".format(grid_size, errors[i]) - if i > 0: - message += "{:f} \n".format(log(errors[i] / errors[i-1]) / log(dx[i] / dx[i-1])) - else: - message += "--\n" - return message diff --git a/tools/pylib/boututils/closest_line.py b/tools/pylib/boututils/closest_line.py deleted file mode 100644 index 42dbbe047f..0000000000 --- a/tools/pylib/boututils/closest_line.py +++ /dev/null @@ -1,14 +0,0 @@ -from builtins import range -import numpy -# Find the closest contour line to a given point -def closest_line(n, x, y, ri, zi, mind=None): - - mind = numpy.min( (x[0] - ri)**2 + (y[0] - zi)**2 ) - ind = 0 - - for i in range (1, n) : - d = numpy.min( (x[i] - ri)**2 + (y[i] - zi)**2 ) - if d < mind : - mind = d - ind = i - return ind diff --git a/tools/pylib/boututils/contour.py b/tools/pylib/boututils/contour.py deleted file mode 100644 index 0d648cab83..0000000000 --- a/tools/pylib/boututils/contour.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -Contour calculation routines - -http://members.bellatlantic.net/~vze2vrva/thesis.html - -""" -from __future__ import print_function -from __future__ import division -from past.utils import old_div - -import numpy as np - - -def contour(f, level): - """Return a list of contours matching the given level""" - - if len(f.shape) != 2: - print("Contour only works on 2D data") - return None - nx,ny = f.shape - - # Go through each cell edge and mark which ones contain - # a level crossing. Approximating function as - # f = axy + bx + cy + d - # Hence linear interpolation along edges. - - edgecross = {} # Dictionary: (cell number, edge number) to crossing location - - for i in np.arange(nx-1): - for j in np.arange(ny-1): - # Lower-left corner of cell is (i,j) - if (np.max(f[i:(i+2),j:(j+2)]) < level) or (np.min(f[i:(i+2),j:(j+2)]) > level): - # not in the correct range - skip - continue - - # Check each edge - ncross = 0 - def location(a, b): - if (a > level) ^ (a > level): - # One of the corners is > level, and the other is <= level - ncross += 1 - # Find location - return old_div((level - a), (b - a)) - else: - return None - - loc = [ - location(f[i,j], f[i+1,j]), - location(f[i+1,j], f[i+1,j+1]), - location(f[i+1,j+1], f[i,j+1]), - location(f[i,j+1], f[i,j])] - - if ncross != 0: # Only put into dictionary if has a crossing - cellnr = (ny-1)*i + j # The cell number - edgecross[cellnr] = [loc,ncross] # Tack ncross onto the end - - # Process crossings into contour lines - - while True: - # Start from an arbitrary location and follow until - # it goes out of the domain or closes - try: - startcell, cross = edgecross.popitem() - except KeyError: - # No keys left so finished - break - - def follow(): - return - - # Follow - - return - -def find_opoints(var2d): - """Find O-points in psi i.e. local minima/maxima""" - return - -def find_xpoints(var2d): - """Find X-points in psi i.e. inflection points""" - return - diff --git a/tools/pylib/boututils/crosslines.py b/tools/pylib/boututils/crosslines.py deleted file mode 120000 index fa0acafd36..0000000000 --- a/tools/pylib/boututils/crosslines.py +++ /dev/null @@ -1 +0,0 @@ -../../tokamak_grids/pyGridGen/crosslines.py \ No newline at end of file diff --git a/tools/pylib/boututils/datafile.py b/tools/pylib/boututils/datafile.py deleted file mode 100644 index ef2c62c57d..0000000000 --- a/tools/pylib/boututils/datafile.py +++ /dev/null @@ -1,954 +0,0 @@ -"""File I/O class - -A wrapper around various NetCDF libraries and h5py, used by BOUT++ -routines. Creates a consistent interface across machines - -Supported libraries: - -- ``h5py`` (for HDF5 files) -- ``netCDF4`` (preferred NetCDF library) - -NOTE ----- -NetCDF and HDF5 include unlimited dimensions, but this library is just -for very simple I/O operations. Educated guesses are made for the -dimensions. - -TODO ----- -- Don't raise ``ImportError`` if no NetCDF libraries found, use HDF5 - instead? -- Cleaner handling of different NetCDF libraries -- Support for h5netcdf? - -""" - -from __future__ import print_function -from builtins import map, zip, str, object - -import numpy as np -import time -import getpass -from boututils.boutwarnings import alwayswarn -from boututils.boutarray import BoutArray - -try: - from netCDF4 import Dataset - has_netCDF = True -except ImportError: - raise ImportError( - "DataFile: No supported NetCDF modules available -- requires netCDF4") - -try: - import h5py - has_h5py = True -except ImportError: - has_h5py = False - - -class DataFile(object): - """File I/O class - - A wrapper around various NetCDF libraries and h5py, used by BOUT++ - routines. Creates a consistent interface across machines - - Parameters - ---------- - filename : str, optional - Name of file to open. If no filename supplied, you will need - to call :py:obj:`~DataFile.open` and supply `filename` there - write : bool, optional - If True, open the file in read-write mode (existing files will - be appended to). Default is read-only mode - create : bool, optional - If True, open the file in write mode (existing files will be - truncated). Default is read-only mode - format : str, optional - Name of a filetype to use (e.g. ``NETCDF3_CLASSIC``, - ``NETCDF3_64BIT``, ``NETCDF4``, ``HDF5``) - - TODO - ---- - - `filename` should not be optional! - - Take a ``mode`` argument to be more in line with other file types - - `format` should be checked to be a sensible value - - Make sure ``__init__`` methods are first - - Make `impl` and `handle` private - - """ - impl = None - - def __init__(self, filename=None, write=False, create=False, format='NETCDF3_64BIT', **kwargs): - """ - - NetCDF formats are described here: http://unidata.github.io/netcdf4-python/ - - NETCDF3_CLASSIC Limited to 2.1Gb files - - NETCDF3_64BIT_OFFSET or NETCDF3_64BIT is an extension to allow larger file sizes - - NETCDF3_64BIT_DATA adds 64-bit integer data types and 64-bit dimension sizes - - NETCDF4 and NETCDF4_CLASSIC use HDF5 as the disk format - """ - if filename is not None: - if filename.split('.')[-1] in ('hdf5', 'hdf', 'h5'): - self.impl = DataFile_HDF5( - filename=filename, write=write, create=create, format=format) - else: - self.impl = DataFile_netCDF( - filename=filename, write=write, create=create, format=format, **kwargs) - elif format == 'HDF5': - self.impl = DataFile_HDF5( - filename=filename, write=write, create=create, - format=format) - else: - self.impl = DataFile_netCDF( - filename=filename, write=write, create=create, format=format, **kwargs) - - def open(self, filename, write=False, create=False, - format='NETCDF3_CLASSIC'): - """Open the file - - Parameters - ---------- - filename : str, optional - Name of file to open - write : bool, optional - If True, open the file in read-write mode (existing files will - be appended to). Default is read-only mode - create : bool, optional - If True, open the file in write mode (existing files will be - truncated). Default is read-only mode - format : str, optional - Name of a filetype to use (e.g. ``NETCDF3_CLASSIC``, - ``NETCDF4``, ``HDF5``) - - TODO - ---- - - Return the result of calling open to be more like stdlib's - open - - `keys` should be more pythonic (return generator) - - """ - self.impl.open(filename, write=write, create=create, - format=format) - - def close(self): - """Close a file and flush data to disk - - """ - self.impl.close() - - def __del__(self): - if self.impl is not None: - self.impl.__del__() - - def __enter__(self): - self.impl.__enter__() - return self - - def __exit__(self, type, value, traceback): - self.impl.__exit__(type, value, traceback) - - def read(self, name, ranges=None, asBoutArray=True): - """Read a variable from the file - - Parameters - ---------- - name : str - Name of the variable to read - ranges : list of slice objects, optional - Slices of variable to read, can also be converted from lists or - tuples of (start, stop, stride). The number of elements in `ranges` - should be equal to the number of dimensions of the variable you - wish to read. See :py:obj:`~DataFile.size` for how to get the - dimensions - asBoutArray : bool, optional - If True, return the variable as a - :py:obj:`~boututils.boutarray.BoutArray` (the default) - - Returns - ------- - ndarray or :py:obj:`~boututils.boutarray.BoutArray` - The variable from the file - (:py:obj:`~boututils.boutarray.BoutArray` if `asBoutArray` - is True) - - """ - if ranges is not None: - for x in ranges: - if isinstance(x, (list, tuple)): - x = slice(*x) - return self.impl.read(name, ranges=ranges, asBoutArray=asBoutArray) - - def list(self): - """List all variables in the file - - Returns - ------- - list of str - A list containing all the names of the variables - - """ - return self.impl.list() - - def keys(self): - """A synonym for :py:obj:`~DataFile.list` - - TODO - ---- - - Make a generator to be more like python3 dict keys - - """ - return self.list() - - def dimensions(self, varname): - """Return the names of all the dimensions of a variable - - Parameters - ---------- - varname : str - The name of the variable - - Returns - ------- - tuple of str - The names of the variable's dimensions - - """ - return self.impl.dimensions(varname) - - def ndims(self, varname): - """Return the number of dimensions for a variable - - Parameters - ---------- - varname : str - The name of the variable - - Returns - ------- - int - The number of dimensions - - """ - return self.impl.ndims(varname) - - def sync(self): - """Write pending changes to disk. - - """ - self.impl.sync() - - def size(self, varname): - """Return the size of each dimension of a variable - - Parameters - ---------- - varname : str - The name of the variable - - Returns - ------- - tuple of int - The size of each dimension - - """ - return self.impl.size(varname) - - def bout_type(self, varname): - """Return the name of the BOUT++ type of a variable - - Possible values are: - - - scalar - - Field2D - - Field3D - - If the variable is an evolving variable (i.e. has a time - dimension), then it is appended with a "_t" - - Parameters - ---------- - varname : str - The name of the variable - - Returns - ------- - str - The name of the BOUT++ type - - """ - return self.attributes(varname)["bout_type"] - - def write(self, name, data, info=False): - """Write a variable to file - - If the variable is not a :py:obj:`~boututils.boutarray.BoutArray` with - the ``bout_type`` attribute, a guess will be made for the - dimensions - - Parameters - ---------- - name : str - Name of the variable to use in the file - data : :py:obj:`~boututils.boutarray.BoutArray` or ndarray - An array containing the variable data - info : bool, optional - If True, print information about what is being written to - file - - Returns - ------- - None - - """ - return self.impl.write(name, data, info) - - def __getitem__(self, name): - return self.impl.__getitem__(name) - - def __setitem__(self, key, value): - self.impl.__setitem__(key, value) - - def attributes(self, varname): - """Return a dictionary of attributes - - Parameters - ---------- - varname : str - The name of the variable - - Returns - ------- - dict - The attribute names and their values - - """ - return self.impl.attributes(varname) - - -class DataFile_netCDF(DataFile): - handle = None - - def open(self, filename, write=False, create=False, - format='NETCDF3_CLASSIC'): - if (not write) and (not create): - self.handle = Dataset(filename, "r") - elif create: - self.handle = Dataset(filename, "w", format=format) - else: - self.handle = Dataset(filename, "a") - # Record if writing - self.writeable = write or create - - def close(self): - if self.handle is not None: - self.handle.close() - self.handle = None - - def __init__(self, filename=None, write=False, create=False, - format='NETCDF3_CLASSIC', **kwargs): - self._kwargs = kwargs - if not has_netCDF: - message = "DataFile: No supported NetCDF python-modules available" - raise ImportError(message) - if filename is not None: - self.open(filename, write=write, create=create, format=format) - self._attributes_cache = {} - - def __del__(self): - self.close() - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def read(self, name, ranges=None, asBoutArray=True): - """Read a variable from the file.""" - if self.handle is None: - return None - - try: - var = self.handle.variables[name] - n = name - except KeyError: - # Not found. Try to find using case-insensitive search - var = None - for n in list(self.handle.variables.keys()): - if n.lower() == name.lower(): - print( - "WARNING: Reading '" + n + "' instead of '" + name + "'") - var = self.handle.variables[n] - if var is None: - return None - - if asBoutArray: - attributes = self.attributes(n) - - ndims = len(var.dimensions) - if ndims == 0: - data = var.getValue() - if asBoutArray: - data = BoutArray(data, attributes=attributes) - return data # [0] - else: - if ranges: - if len(ranges) == 2 * ndims: - # Reform list of pairs of ints into slices - ranges = [slice(a, b) for a, b in - zip(ranges[::2], ranges[1::2])] - elif len(ranges) != ndims: - raise ValueError("Incorrect number of elements in ranges argument " - "(got {}, expected {} or {})" - .format(len(ranges), ndims, 2 * ndims)) - - data = var[ranges[:ndims]] - if asBoutArray: - data = BoutArray(data, attributes=attributes) - return data - else: - data = var[:] - if asBoutArray: - data = BoutArray(data, attributes=attributes) - return data - - def __getitem__(self, name): - var = self.read(name) - if var is None: - raise KeyError("No variable found: " + name) - return var - - def __setitem__(self, key, value): - self.write(key, value) - - def list(self): - if self.handle is None: - return [] - return list(self.handle.variables.keys()) - - def keys(self): - return self.list() - - def dimensions(self, varname): - if self.handle is None: - return None - try: - var = self.handle.variables[varname] - except KeyError: - raise ValueError("No such variable") - return var.dimensions - - def ndims(self, varname): - if self.handle is None: - raise ValueError("File not open") - try: - var = self.handle.variables[varname] - except KeyError: - raise ValueError("No such variable") - return len(var.dimensions) - - def sync(self): - self.handle.sync() - - def size(self, varname): - if self.handle is None: - return [] - try: - var = self.handle.variables[varname] - except KeyError: - return [] - - def dimlen(d): - dim = self.handle.dimensions[d] - if dim is not None: - t = type(dim).__name__ - if t == 'int': - return dim - return len(dim) - return 0 - return [dimlen(d) for d in var.dimensions] - - def _bout_type_from_dimensions(self, varname): - dims = self.dimensions(varname) - dims_dict = { - ('t', 'x', 'y', 'z'): "Field3D_t", - ('t', 'x', 'y'): "Field2D_t", - ('t', 'x', 'z'): "FieldPerp_t", - ('t',): "scalar_t", - ('x', 'y', 'z'): "Field3D", - ('x', 'y'): "Field2D", - ('x', 'z'): "FieldPerp", - ('x'): "ArrayX", - (): "scalar", - } - - return dims_dict.get(dims, None) - - def _bout_dimensions_from_type(self, bout_type): - dims_dict = { - "Field3D_t": ('t', 'x', 'y', 'z'), - "Field2D_t": ('t', 'x', 'y'), - "FieldPerp_t": ('t', 'x', 'z'), - "scalar_t": ('t',), - "Field3D": ('x', 'y', 'z'), - "Field2D": ('x', 'y'), - "FieldPerp": ('x', 'z'), - "ArrayX": ('x'), - "scalar": (), - } - - return dims_dict.get(bout_type, None) - - def write(self, name, data, info=False): - - if not self.writeable: - raise Exception("File not writeable. Open with write=True keyword") - - s = np.shape(data) - - # Get the variable type - t = type(data).__name__ - - if t == 'NoneType': - print("DataFile: None passed as data to write. Ignoring") - return - - if t == 'ndarray' or t == 'BoutArray': - # Numpy type or BoutArray wrapper for Numpy type. Get the data type - t = data.dtype.str - - if t == 'list': - # List -> convert to numpy array - data = np.array(data) - t = data.dtype.str - - if (t == 'int') or (t == ' -# -# * Modified to allow calls with only one argument -# - -def int_func( xin, fin=None, simple=None): - if fin is None : - f = copy.deepcopy(xin) - x = numpy.arange(numpy.size(f)).astype(float) - else: - f = copy.deepcopy(fin) - x = copy.deepcopy(xin) - - n = numpy.size(f) - - g = numpy.zeros(n) - - if simple is not None : - # Just use trapezium rule - - g[0] = 0.0 - for i in range (1, n) : - g[i] = g[i-1] + 0.5*(x[i] - x[i-1])*(f[i] + f[i-1]) - - else: - - n2 = numpy.int(old_div(n,2)) - - g[0] = 0.0 - for i in range (n2, n) : - g[i] = simps( f[0:i+1], x[0:i+1]) - - - - for i in range (1, n2) : - g[i] = g[n-1] - simps( f[i::], x[i::]) - - return g - - diff --git a/tools/pylib/boututils/linear_regression.py b/tools/pylib/boututils/linear_regression.py deleted file mode 100644 index c2f3f2cc39..0000000000 --- a/tools/pylib/boututils/linear_regression.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import division -# -# Perform a linear regression fit -# - -from numpy import mean - -def linear_regression(x, y): - """ Simple linear regression of two variables - - y = a + bx - - a, b = linear_regression(x, y) - - """ - - if x.size != y.size: - raise ValueError("x and y inputs must be the same size") - - mx = mean(x) - my = mean(y) - - b = (mean(x*y) - mx*my) / (mean(x**2) - mx**2) - a = my - b*mx - - return a, b - diff --git a/tools/pylib/boututils/local_min_max.py b/tools/pylib/boututils/local_min_max.py deleted file mode 120000 index 6ac6b0819e..0000000000 --- a/tools/pylib/boututils/local_min_max.py +++ /dev/null @@ -1 +0,0 @@ -../../tokamak_grids/pyGridGen/local_min_max.py \ No newline at end of file diff --git a/tools/pylib/boututils/mode_structure.py b/tools/pylib/boututils/mode_structure.py deleted file mode 100644 index 1196c823c6..0000000000 --- a/tools/pylib/boututils/mode_structure.py +++ /dev/null @@ -1,417 +0,0 @@ -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division -from builtins import range -from past.utils import old_div -import numpy as numpy -import sys -from pylab import plot,xlabel,ylim,savefig,gca, xlim, show, clf, draw, title -from boututils.fft_integrate import fft_integrate -from .ask import query_yes_no - -#; Calculates mode structure from BOUT++ output -#; for comparison to ELITE -#; -#; April 2009 - Added ERGOS flag. This is intended -#; for producing plots similar to the ERGOS -#; vacuum RMP code - -# interpolates a 1D periodic function -def zinterp( v, zind): - - v = numpy.ravel(v) - - nz = numpy.size(v) - z0 = numpy.round(zind) - - p = zind - float(z0) # between -0.5 and 0.5 - - if p < 0.0 : - z0 = z0 - 1 - p = p + 1.0 - - - z0 = ((z0 % (nz-1)) + (nz-1)) % (nz-1) - - # for now 3-point interpolation - - zp = (z0 + 1) % (nz - 1) - zm = (z0 - 1 + (nz-1)) % (nz - 1) - - - result = 0.5*p*(p-1.0)*v[zm.astype(int)] \ - + (1.0 - p*p)*v[z0.astype(int)] \ - + 0.5*p*(p+1.0)*v[zp.astype(int)] - - return result - - -def mode_structure( var_in, grid_in, period=1, - zangle=0.0, n=None, addq=None, output=None, - xq=None, xpsi=None, slow=None, subset=None, - filter=None, famp=None, quiet=None, - ergos=None, ftitle=None, - xrange=None, yrange=None, rational=None, pmodes=None, - _extra=None): - - - #ON_ERROR, 2 - # - # period = 1 ; default = full torus - - if n is None : - if filter is not None : - n = filter*period - else: n = period - - - # if (grid_in.JYSEPS1_1 GE 0) OR (grid_in.JYSEPS1_2 NE grid_in.JYSEPS2_1) OR (grid_in.JYSEPS2_2 NE grid_in.ny-1) THEN BEGIN - # PRINT, "Mesh contains branch-cuts. Keeping only core" - # - # grid = core_mesh(grid_in) - # var = core_mesh(var_in, grid_in) - #ENDIF ELSE BEGIN - grid = grid_in - vr = var_in - #ENDELSE - - - #IF KEYWORD_SET(filter) THEN BEGIN - # var = zfilter(var, filter) - #ENDIF - - nx = grid.get('nx') - ny = grid.get('ny') - - s = numpy.shape(vr) - if numpy.size(s) != 3 : - print("Error: Variable must be 3 dimensional") - return - - if (s[0] != nx) or (s[1] != ny) : - print("Error: Size of variable doesn't match grid") - - return - - nz = s[2] - - dz = 2.0*numpy.pi / numpy.float(period*(nz-1)) - - # GET THE TOROIDAL SHIFT - tn = list(grid.keys()) - tn = numpy.char.upper(tn) - count = numpy.where(tn == "QINTY") - if numpy.size(count) > 0 : - print("Using qinty as toroidal shift angle") - zShift = grid.get('qinty') - else: - count = numpy.where(tn == "ZSHIFT") - if numpy.size(count) > 0 : - print("Using zShift as toroidal shift angle") - zShift = grid.get('zShift') - else: - print("ERROR: Can't find qinty or zShift variable") - return - - zshift=grid.get('zShift') - - rxy=grid.get('Rxy') - zxy=grid.get('Zxy') - Btxy=grid.get('Btxy') - Bpxy=grid.get('Bpxy') - shiftangle=grid.get('ShiftAngle') - psixy=grid.get('psixy') - psi_axis=grid.get('psi_axis') - psi_bndry=grid.get('psi_bndry') - - np = 4*ny - - nf = old_div((np - 2), 2) - famp = numpy.zeros((nx, nf)) - - for x in range (nx): - #;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - # transform data into fixed poloidal angle - - # get number of poloidal points - nskip = numpy.zeros(ny-1) - for y in range (ny-1): - yp = y + 1 - nskip[y] = old_div(numpy.abs(zshift[x,yp] - zshift[x,y]), dz) - 1 - - - nskip =numpy.int_(numpy.round(nskip)) - nskip=numpy.where(nskip > 0, nskip, 0) - - - ny2 = numpy.int_(ny + numpy.sum(nskip)) # number of poloidal points - - # IF NOT KEYWORD_SET(quiet) THEN PRINT, x, ny2 - - f = numpy.zeros(ny2) # array for values - R = numpy.zeros(ny2) # Rxy - Z = numpy.zeros(ny2) # Zxy - BtBp = numpy.zeros(ny2) # Bt / Bp - - # interpolate values onto points - - ypos = 0 - for y in range(ny-1): - # original points - zind = old_div((zangle - zshift[x,y]),dz) - - - if numpy.size(zind) != 1 : sys.exit() - f[ypos] = zinterp(vr[x,y,:], zind) - R[ypos] = rxy[x,y] - Z[ypos] = zxy[x,y] - BtBp[ypos] = old_div(Btxy[x,y], Bpxy[x,y]) - - ypos = ypos + 1 - - # add the extra points - - zi0 = old_div((zangle - zshift[x,y]),dz) - zip1 = old_div((zangle - zshift[x,y+1]),dz) - - dzi = old_div((zip1 - zi0), (nskip[y] + 1)) - - for i in range (nskip[y]): - zi = zi0 + numpy.float(i+1)*dzi # zindex - w = old_div(numpy.float(i+1),numpy.float(nskip[y]+1)) # weighting - - f[ypos+i] = w*zinterp(vr[x,y+1,:], zi) + (1.0-w)*zinterp(vr[x,y,:], zi) - - R[ypos+i] = w*rxy[x,y+1] + (1.0-w)*rxy[x,y] - Z[ypos+i] = w*zxy[x,y+1] + (1.0-w)*zxy[x,y] - BtBp[ypos+i] = old_div((w*Btxy[x,y+1] + (1.0-w)*Btxy[x,y]), (w*Bpxy[x,y+1] + (1.0-w)*Bpxy[x,y])) - - ypos = ypos + nskip[y] - - # final point - - zind = old_div((zangle - zShift[x,ny-1]),dz) - - f[ypos] = zinterp(vr[x,ny-1,:], zind) - R[ypos] = rxy[x,ny-1] - Z[ypos] = zxy[x,ny-1] - BtBp[ypos] = old_div(Btxy[x,ny-1], Bpxy[x,ny-1]) - - - #STOP - - #;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - #; calculate poloidal angle - - - drxy = numpy.gradient(R) - dzxy = numpy.gradient(Z) - dl = numpy.sqrt(drxy*drxy + dzxy*dzxy) - - nu = dl * BtBp / R # field-line pitch - theta = old_div(numpy.real(fft_integrate(nu)), shiftangle[x]) - - if numpy.max(theta) > 1.0 : - # mis-match between q and nu (integration error?) - if quiet is None : print("Mismatch ", x, numpy.max(theta)) - theta = old_div(theta, (numpy.max(theta) + numpy.abs(theta[1] - theta[0]))) - - - theta = 2.0*numpy.pi * theta - - #;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - #; take Fourier transform in theta angle - - tarr = 2.0*numpy.pi*numpy.arange(np) / numpy.float(np) # regular array in theta - - farr = numpy.interp(tarr, theta, f) - - #STOP - - ff = old_div(numpy.fft.fft(farr),numpy.size(farr)) - - for i in range (nf): - famp[x, i] = 2.0*numpy.abs(ff[i+1]) - - - - - # sort modes by maximum size - - fmax = numpy.zeros(nf) - for i in range(nf): - fmax[i] = numpy.max(famp[:,i]) - - - inds = numpy.argsort(fmax)[::-1] - - - if pmodes is None : pmodes = 10 - - qprof = old_div(numpy.abs(shiftangle), (2.0*numpy.pi)) - - xarr = numpy.arange(nx) - xtitle="Radial index" - if xq is not None : - # show as a function of q*n - xarr = qprof*numpy.float(n) - - xtitle="q * n" - elif xpsi is not None : - # show as a function of psi. Should be normalised psi - xarr = psixy[:,0] - - # Check if the grid includes psi axis and boundary - count1 = numpy.where(tn == "PSI_AXIS") - count2 = numpy.where(tn == "PSI_BNDRY") - - if (numpy.size(count1) > 0) and (numpy.size(count2) > 0) : - xarr = old_div((xarr - psi_axis), (psi_bndry - psi_axis)) - - else: - # Use hard-wired values - print("WARNING: Using hard-wired psi normalisation") - # for circular case - #xarr = (xarr + 0.1937) / (0.25044 + 0.1937) - # for ellipse case - #xarr = xarr / 0.74156 - - # cbm18_dens8 - xarr = old_div((xarr + 0.854856), (0.854856 + 0.0760856)) - - - xtitle="Psi normalised" - - - - if slow is not None : - # plot modes slowly for examination - #safe_colors, /first -# ax = fig.add_subplot(111) - # go through and plot each mode - for i in range(nf): - if numpy.max(famp[:,i]) > 0.05*numpy.max(famp): - print("Mode m = ", i+1, " of ", nf) - plot(xarr, famp[:,i], 'k') - ylim(0,numpy.max(famp)) - xlim(xrange) - xlabel(xtitle) - show(block=False) - - q = old_div(numpy.float(i+1), numpy.float(n)) - - pos = numpy.interp(q, qprof, xarr) - - plot( [pos, pos],[0, 2.*numpy.max(fmax)], 'k--') - draw() - - ans=query_yes_no('next mode') - if ans: - clf() - - - - elif ergos is not None : - # ERGOS - style output - - if output is not None and slow is None : - savefig('output.png') - - -# -# contour2, famp, xarr, indgen(nf)+1, $ -# xlabel=xtitle, xrange=xrange, yrange=yrange, _extra=_extra -# -# ; overplot the q profile -# -# oplot, xarr, qprof * n, color=1, thick=2 -# -# IF KEYWORD_SET(rational) THEN BEGIN -# maxm = FIX(MAX(qprof)) * n -# -# qreson = (FINDGEN(maxm)+1) / FLOAT(n) -# -# ; get x location for each of these resonances -# qloc = INTERPOL(xarr, qprof, qreson) -# -# oplot, qloc, findgen(maxm)+1., psym=4, color=1 -# ENDIF -# -# IF KEYWORD_SET(output) THEN BEGIN -# ; output data to save file -# SAVE, xarr, qprof, famp, file=output+".idl" -# -# DEVICE, /close -# SET_PLOT, 'X' -# ENDIF - - else: - if output is not None and slow is None : - savefig('output.png') - # savefig('output.ps') - - # - # - if subset is not None : - - # get number of modes larger than 5% of the maximum - count = numpy.size(numpy.where(fmax > 0.10*numpy.max(fmax))) - - minind = numpy.min(inds[0:count]) - maxind = numpy.max(inds[0:count]) - - print("Mode number range: ", minind, maxind) - - plot( xarr, famp[:,0], 'k', visible=False) - ylim(0,numpy.max(famp)) - xlabel(xtitle) - xlim(xrange) - title(ftitle) - - gca().set_color_cycle(['red', 'red', 'black', 'black']) - - for i in range(minind, maxind+1, subset): - plot( xarr, famp[:,i]) - - q = old_div(numpy.float(i+1), numpy.float(n)) - pos = numpy.interp(q, qprof, xarr) - - plot( [pos, pos], [0, 2.*numpy.max(fmax)], '--') - - - # - else: - # default - just plot everything - gca().set_color_cycle(['black', 'red']) - - plot(xarr, famp[:,0]) - ylim(0,numpy.max(famp)) #, color=1, - xlabel(xtitle) #, chars=1.5, xrange=xrange,title=title, _extra=_extra - xlim(xrange) - for i in range (nf): - plot( xarr, famp[:,i]) - - - # - # IF KEYWORD_SET(addq) THEN BEGIN - # - # FOR i=0, pmodes-1 DO BEGIN - # PRINT, "m = "+STRTRIM(STRING(inds[i]+1), 2)+" amp = "+STRTRIM(STRING(fmax[inds[i]]),2) - # q = FLOAT(inds[i]+1) / FLOAT(n) - # - # pos = INTERPOL(xarr, qprof, q) - # - # oplot, [pos, pos], [0, 2.*MAX(fmax)], lines=2, color=1 - # ENDFOR - # ENDIF - # - # ENDELSE - # IF KEYWORD_SET(output) THEN BEGIN - # ; output data to save file - # SAVE, xarr, qprof, famp, file=output+".idl" - # - # DEVICE, /close - # SET_PLOT, 'X' - # ENDIF - # ENDELSE - # diff --git a/tools/pylib/boututils/moment_xyzt.py b/tools/pylib/boututils/moment_xyzt.py deleted file mode 100644 index 8d7dc4c012..0000000000 --- a/tools/pylib/boututils/moment_xyzt.py +++ /dev/null @@ -1,80 +0,0 @@ -from __future__ import print_function -from __future__ import division -from builtins import range -from past.utils import old_div -import numpy as np -from bunch import Bunch - - -def RMSvalue( vec1d): -#; -#; -get rms of a 1D signal -#;------------------------ - - nel=np.size(vec1d) - valav=old_div(np.sum(vec1d),nel) - valrms=np.sqrt(old_div(np.sum((vec1d-valav)**2),nel)) - acvec=vec1d-valav - - - return Bunch(valrms=valrms, valav=valav, acvec=acvec) - - - - -def moment_xyzt( sig_xyzt, *args):#rms=None, dc=None, ac=None): -#; -#; Calculate moments of a 4d signal of (x,y,z,t), i.e, -#; -RMS, i.e., a function of (x,y,t) -#; -DC (average in z), i.e., a function of (x,y,t) -#; -AC (DC subtracted out), i.e., a function of (x,y,z,t) -#;------------------------------------------------------------------- - - try: # return to caller - - d = np.shape(sig_xyzt) - if np.size(d) != 4 : - print("Error: Variable must be 4D (x,y,z,t)") - return - - - siz=np.shape(sig_xyzt) - rms=np.zeros((siz[0],siz[1],siz[2])) - dc=np.zeros((siz[0],siz[1],siz[2])) - if 'AC' in args : ac=np.zeros((siz[0],siz[1],siz[2],siz[3])) - - - data = sig_xyzt - if np.modf(np.log2(siz[3]))[0] != 0.0 : - print("WARNING: Expecting a power of 2 in Z direction") - - if np.modf(np.log2(siz[3]-1))[0] and (siz[3] > 1) : - print(" -> Truncating last point to get power of 2") - data = data[:,:,0:(siz[3]-2),:] - siz[3] = siz[3] - 1 - - - for ix in range (siz[1]): - for iy in range (siz[2]): - for it in range (siz[0]): - val=RMSvalue(sig_xyzt[it,ix,iy,:]) - - rms[it,ix,iy]=val.valrms - dc[it,ix,iy]=val.valav - if 'AC' in args : ac[it,ix,iy,:]=[val.acvec,val.acvec[0]] - - res=Bunch() - - if 'RMS' in args: - res.rms = rms - if 'DC' in args: - res.dc = dc - if 'AC' in args: - res.ac = ac - - if 'RMS' not in args and 'DC' not in args and 'AC' not in args : - print('Wrong argument') - return res - except: - print('moment_xyz failed') - return diff --git a/tools/pylib/boututils/options.py b/tools/pylib/boututils/options.py deleted file mode 100644 index 2de3ebc3e0..0000000000 --- a/tools/pylib/boututils/options.py +++ /dev/null @@ -1,165 +0,0 @@ -"""Module to allow BOUT.inp files to be read into python and -manipulated with ease. - - -Nick Walkden, June 2015 -nick.walkden@ccfe.ac.uk - -""" - -from copy import copy -import os - - -class BOUTOptions(object): - """Class to store and interact with options from BOUT++ - - Parameters - ---------- - inp_path : str, optional - Path to BOUT++ options file - - Examples - -------- - - Instantiate with - - >>> myOpts = BOUTOptions() - >>> myOpts.read_inp('path/to/input/file') - - or - - >>> myOpts = BOUTOptions('path/to/input/file') - - To get a list of sections use - - >>> section_list = myOpts.list_sections - >>> # Also print to screen: - >>> section_list = myOpts.list_sections(verbose=True) - - Each section of the input is stored as a dictionary attribute so - that, if you want all the settings in the section [ddx]: - - >> ddx_opt_dict = myOpts.ddx - - and access individual settings by - - >>> ddx_setting = myOpts.ddx['first'] - - Any settings in BOUT.inp without a section are stored in - - >>> root_dict = myOpts.root - - TODO - ---- - - Merge this and BoutOptionsFile or replace with better class - - """ - - def __init__(self, inp_path=None): - - self._sections = ['root'] - - for section in self._sections: - super(BOUTOptions,self).__setattr__(section,{}) - - if inp_path is not None: - self.read_inp(inp_path) - - def read_inp(self, inp_path=''): - """Read a BOUT++ input file - - Parameters - ---------- - inp_path : str, optional - Path to the input file (default: current directory) - - """ - - try: - inpfile = open(os.path.join(inp_path, 'BOUT.inp'),'r') - except: - raise TypeError("ERROR: Could not read file "+\ - os.path.join(inp_path, "BOUT.inp")) - - current_section = 'root' - inplines = inpfile.read().splitlines() - # Close the file after use - inpfile.close() - for line in inplines: - #remove white space - line = line.replace(" ","") - - - if len(line) > 0 and line[0] is not '#': - #Only read lines that are not comments or blank - if '[' in line: - #Section header - section = line.split('[')[1].split(']')[0] - current_section = copy(section) - if current_section not in self._sections: - self.add_section(current_section) - - elif '=' in line: - #option setting - attribute = line.split('=')[0] - value = line.split('=')[1].split('#')[0] - value = value.replace("\n","") - value = value.replace("\t","") - value = value.replace("\r","") - value = value.replace("\"","") - self.__dict__[copy(current_section)][copy(attribute)] = copy(value) - else: - pass - - def add_section(self, section): - """Add a section to the options - - Parameters - ---------- - section : str - The name of a new section - - TODO - ---- - - Guard against wrong type - """ - self._sections.append(section) - super(BOUTOptions,self).__setattr__(section,{}) - - def remove_section(self, section): - """Remove a section from the options - - Parameters - ---------- - section : str - The name of a section to remove - - TODO - ---- - - Fix undefined variable - """ - if section in self._sections: - self._sections.pop(self._sections.index(sections)) - super(BOUTOptions,self).__delattr__(section) - else: - print("WARNING: Section "+section+" not found.\n") - - def list_sections(self, verbose=False): - """Return all the sections in the options - - Parameters - ---------- - verbose : bool, optional - If True, print sections to screen - - TODO - ---- - - Better pretty-print - """ - if verbose: - print("Sections Contained: \n") - for section in self._sections: - print("\t"+section+"\n") - - return self._sections diff --git a/tools/pylib/boututils/plotdata.py b/tools/pylib/boututils/plotdata.py deleted file mode 100644 index 72627f4033..0000000000 --- a/tools/pylib/boututils/plotdata.py +++ /dev/null @@ -1,90 +0,0 @@ -from __future__ import print_function -# Plot a data set - -import numpy as np -import matplotlib -import matplotlib.cm as cm -import matplotlib.mlab as mlab -import matplotlib.pyplot as plt - -matplotlib.rcParams['xtick.direction'] = 'out' -matplotlib.rcParams['ytick.direction'] = 'out' - -def plotdata(data, x=None, y=None, - title=None, xtitle=None, ytitle=None, - output=None, range=None, - fill=True, mono=False, colorbar=True, - xerr=None, yerr=None): - """Plot 1D or 2D data, with a variety of options.""" - - size = data.shape - ndims = len(size) - - if ndims == 1: - if (xerr is not None) or (yerr is not None): - # Points with error bars - if x is None: - x = np.arange(size) - errorbar(x, data, xerr, yerr) - # Line plot - if x is None: - plt.plot(data) - else: - plt.plot(x, data) - - elif ndims == 2: - # A contour plot - - if x is None: - x = np.arange(size[1]) - if y is None: - y = np.arange(size[0]) - - if fill: - #plt.contourf(data, colors=colors) - cmap=None - if mono: cmap = cm.gray - plt.imshow(data, interpolation='bilinear', cmap=cmap) - else: - colors = None - if mono: colors = 'k' - - plt.contour(x, y, data, colors=colors) - - # Add a color bar - if colorbar: - CB = plt.colorbar(shrink=0.8, extend='both') - - else: - print("Sorry, can't handle %d-D variables" % ndims) - return - - if title is not None: - plt.title(title) - if xtitle is not None: - plt.xlabel(xtitle) - if ytitle is not None: - plt.ylabel(ytitle) - - if output is not None: - # Write to a file - plt.savefig(output) - else: - # Plot to screen - plt.show() - -def test(): - """Test the plotdata routine.""" - # Generate and plot test data - - delta = 0.025 - x = np.arange(-3.0, 3.0, delta) - y = np.arange(-2.0, 2.0, delta) - X, Y = np.meshgrid(x, y) - Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0) - Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1) - # difference of Gaussians - Z = 10.0 * (Z2 - Z1) - - plotdata(Z, title="test data", fill=False, mono=False) - plotdata(Z, title="Fill in mono", fill=True, mono=True) diff --git a/tools/pylib/boututils/plotpolslice.py b/tools/pylib/boututils/plotpolslice.py deleted file mode 100644 index 924272fb85..0000000000 --- a/tools/pylib/boututils/plotpolslice.py +++ /dev/null @@ -1,143 +0,0 @@ -from __future__ import print_function -from __future__ import division -from builtins import str -from builtins import range -from past.utils import old_div -import numpy as np -from boututils.file_import import file_import -import sys - -if sys.version_info[0]>=3: - message = "polplotslice uses the VTK library through mayavi, which"+\ - " is currently only available in python 2" - raise ImportError(message) -else: - from mayavi import mlab - from tvtk.tools import visual - - - -def zinterp( v, zind): - #v = REFORM(v) - nz = np.size(v) - z0 = np.round(zind) - - p = zind - float(z0) # between -0.5 and 0.5 - - if p < 0.0 : - z0 = z0 - 1 - p = p + 1.0 - - - z0 = ((z0 % (nz-1)) + (nz-1)) % (nz-1) - - # for now 3-point interpolation - - zp = (z0 + 1) % (nz - 1) - zm = (z0 - 1 + (nz-1)) % (nz - 1) - - result = 0.5*p*(p-1.0)*v[zm] \ - + (1.0 - p*p)*v[z0] \ - + 0.5*p*(p+1.0)*v[zp] - - return result - - -def plotpolslice(var3d,gridfile,period=1,zangle=0.0, rz=1, fig=0): - """ data2d = plotpolslice(data3d, 'gridfile' , period=1, zangle=0.0, rz:return (r,z) grid also=1, fig: to do the graph, set to 1 ) """ - - g=file_import(gridfile) - - nx=var3d.shape[0] - ny=var3d.shape[1] - nz=var3d.shape[2] - - - zShift=g.get('zShift') - rxy=g.get('Rxy') - zxy=g.get('Zxy') - - dz = 2.0*np.pi / float(period*nz) - - ny2=ny - nskip=np.zeros(ny-1) - for i in range(ny-1): - ip=(i+1)%ny - nskip[i]=0 - for x in range(nx): - ns=old_div(np.max(np.abs(zShift[x,ip]-zShift[x,i])),dz)-1 - if ns > nskip[i] : nskip[i] = ns - - nskip = np.int_(np.round(nskip)) - ny2 = np.int_(ny2 + np.sum(nskip)) - - print("Number of poloidal points in output:" + str(ny2)) - - var2d = np.zeros((nx, ny2)) - r = np.zeros((nx, ny2)) - z = np.zeros((nx, ny2)) - - ypos = 0 - for y in range (ny-1) : - # put in the original points - for x in range (nx): - zind = old_div((zangle - zShift[x,y]),dz) - var2d[x,ypos] = zinterp(var3d[x,y,:], zind) - # IF KEYWORD_SET(profile) THEN var2d[x,ypos] = var2d[x,ypos] + profile[x,y] - r[x,ypos] = rxy[x,y] - z[x,ypos] = zxy[x,y] - - ypos = ypos + 1 - - print((y, ypos)) - - # and the extra points - - for x in range (nx): - zi0 = old_div((zangle - zShift[x,y]),dz) - zip1 = old_div((zangle - zShift[x,y+1]),dz) - - dzi = old_div((zip1 - zi0), (nskip[y] + 1)) - - for i in range (nskip[y]): - zi = zi0 + float(i+1)*dzi # zindex - w = old_div(float(i+1),float(nskip[y]+1)) # weighting - - var2d[x,ypos+i] = w*zinterp(var3d[x,y+1,:], zi) + (1.0-w)*zinterp(var3d[x,y,:], zi) - # IF KEYWORD_SET(profile) THEN var2d[x,ypos+i] = var2d[x,ypos+i] + w*profile[x,y+1] + (1.0-w)*profile[x,y] - r[x,ypos+i] = w*rxy[x,y+1] + (1.0-w)*rxy[x,y] - z[x,ypos+i] = w*zxy[x,y+1] + (1.0-w)*zxy[x,y] - - - - ypos = ypos + nskip[y] - - - # FINAL POINT - - for x in range(nx): - zind = old_div((zangle - zShift[x,ny-1]),dz) - var2d[x,ypos] = zinterp(var3d[x,ny-1,:], zind) - # IF KEYWORD_SET(profile) THEN var2d[x,ypos] = var2d[x,ypos] + profile[x,ny-1] - r[x,ypos] = rxy[x,ny-1] - z[x,ypos] = zxy[x,ny-1] - - - if(fig==1): - - f = mlab.figure(size=(600,600)) - # Tell visual to use this as the viewer. - visual.set_viewer(f) - - - s = mlab.mesh(r,z,var2d, colormap='PuOr')#, wrap_scale='true')#, representation='wireframe') - s.enable_contours=True - s.contour.filled_contours=True - mlab.view(0,0) - - else: - # return according to opt - if rz==1 : - return r,z,var2d - else: - return var2d diff --git a/tools/pylib/boututils/radial_grid.py b/tools/pylib/boututils/radial_grid.py deleted file mode 100644 index e0cf9c446c..0000000000 --- a/tools/pylib/boututils/radial_grid.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import division -from past.utils import old_div -import numpy -#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -# -# radial grid -# -# n - number of grid points -# pin, pout - range of psi -# seps - locations of separatrices -# sep_factor - separatrix peaking -# in_dp=in_dp - Fix the dx on the lower side -# out_dp=out_dp - Fix the dx on the upper side - -def radial_grid( n, pin, pout, include_in, include_out, seps, sep_factor, - in_dp=None, out_dp=None): - - if n == 1 : - return [0.5*(pin+pout)] - - - x = numpy.arange(0.,n) - m = numpy.float(n-1) - if include_in is None : - x = x + 0.5 - m = m + 0.5 - - - if include_out is None: - m = m + 0.5 - - x = old_div(x, m) - - - if in_dp is None and out_dp is None : - # Neither inner or outer gradients set. Just return equal spacing - return pin + (pout - pin)*x - - - norm = (x[1] - x[0])*(pout - pin) - - if in_dp is not None and out_dp is not None : - # Fit to dist = a*i^3 + b*i^2 + c*i - c = old_div(in_dp,norm) - b = 3.*(1. - c) - old_div(out_dp,norm) + c - a = 1. - c - b - elif in_dp is not None : - # Only inner set - c = old_div(in_dp,norm) - a = 0.5*(c-1.) - b = 1. - c - a - - #a = 0 - #c = in_dp/norm - #b = 1. - c - else: - # Only outer set. Used in PF region - # Fit to (1-b)*x^a + bx for fixed b - df = old_div(out_dp, norm) - b = 0.25 < df # Make sure a > 0 - a = old_div((df - b), (1. - b)) - vals = pin + (pout - pin)*( (1.-b)*x^a + b*x ) - return vals - - - vals = pin + (pout - pin)*(c*x + b*x^2 + a*x^3) - #STOP - return vals diff --git a/tools/pylib/boututils/read_geqdsk.py b/tools/pylib/boututils/read_geqdsk.py deleted file mode 100644 index 755b3f1cff..0000000000 --- a/tools/pylib/boututils/read_geqdsk.py +++ /dev/null @@ -1,92 +0,0 @@ -from __future__ import print_function -from builtins import range -import numpy -from bunch import Bunch -from geqdsk import Geqdsk - -def read_geqdsk (file): - - data=Geqdsk() - - data.openFile(file) - - nxefit =data.get('nw') - nyefit =data.get('nh') - xdim =data.get('rdim') - zdim =data.get('zdim') - rcentr =data.get('rcentr') - rgrid1 =data.get('rleft') - zmid =data.get('zmid') - - rmagx =data.get('rmaxis') - zmagx =data.get('zmaxis') - simagx =data.get('simag') - sibdry =data.get('sibry') - bcentr =data.get('bcentr') - - cpasma =data.get('current') - #simagx =data.get('simag') - #xdum =data.get() - #rmagx =data.get('rmaxis') - #xdum =data.get() - - #zmagx =data.get('zmaxis') - #xdum =data.get() - #sibdry =data.get('sibry') - #xdum =data.get() - #xdum =data.get() - -# Read arrays - - fpol=data.get('fpol') - pres=data.get('pres') - - f=data.get('psirz') - qpsi=data.get('qpsi') - - nbdry=data.get('nbbbs') - nlim=data.get('limitr') - - if(nlim != 0) : - xlim=data.get('rlim') - ylim=data.get('zlim') - else: - xlim=[0] - ylim=[0] - - rbdry=data.get('rbbbs') - zbdry=data.get('zbbbs') - - - # Reconstruct the (R,Z) mesh - r=numpy.zeros((nxefit, nyefit), numpy.float64) - z=numpy.zeros((nxefit, nyefit), numpy.float64) - - - for i in range(0,nxefit): - for j in range(0,nyefit): - r[i,j] = rgrid1 + xdim*i/(nxefit-1) - z[i,j] = (zmid-0.5*zdim) + zdim*j/(nyefit-1) - - - - f=f.T - - print('nxefit = ', nxefit, ' nyefit= ', nyefit) - - return Bunch(nx=nxefit, ny=nyefit, # Number of horizontal and vertical points - r=r, z=z, # Location of the grid-points - xdim=xdim, zdim=zdim, # Size of the domain in meters - rcentr=rcentr, bcentr=bcentr, # Reference vacuum toroidal field (m, T) - rgrid1=rgrid1, # R of left side of domain - zmid=zmid, # Z at the middle of the domain - rmagx=rmagx, zmagx=zmagx, # Location of magnetic axis - simagx=simagx, # Poloidal flux at the axis (Weber / rad) - sibdry=sibdry, # Poloidal flux at plasma boundary (Weber / rad) - cpasma=cpasma, # - psi=f, # Poloidal flux in Weber/rad on grid points - fpol=fpol, # Poloidal current function on uniform flux grid - pres=pres, # Plasma pressure in nt/m^2 on uniform flux grid - qpsi=qpsi, # q values on uniform flux grid - nbdry=nbdry, rbdry=rbdry, zbdry=zbdry, # Plasma boundary - nlim=nlim, xlim=xlim, ylim=ylim) # Wall boundary \ No newline at end of file diff --git a/tools/pylib/boututils/run_wrapper.py b/tools/pylib/boututils/run_wrapper.py deleted file mode 100644 index 5e05052758..0000000000 --- a/tools/pylib/boututils/run_wrapper.py +++ /dev/null @@ -1,279 +0,0 @@ -"""Collection of functions which can be used to make a BOUT++ run""" - -from builtins import str -import os -import re -import subprocess - -try: - # Python 2.4 onwards - from subprocess import call, Popen, STDOUT, PIPE - lib = "call" -except ImportError: - # FIXME: drop support for python < 2.4! - # Use os.system (depreciated) - from os import popen4, system - lib = "system" - - -def getmpirun(default="mpirun -np"): - """Return environment variable named MPIRUN, if it exists else return - a default mpirun command - - Parameters - ---------- - default : str, optional - An mpirun command to return if ``MPIRUN`` is not set in the environment - - """ - MPIRUN = os.getenv("MPIRUN") - - if MPIRUN is None or MPIRUN == "": - MPIRUN = default - print("getmpirun: using the default " + str(default)) - - return MPIRUN - - -def shell(command, pipe=False): - """Run a shell command - - Parameters - ---------- - command : list of str - The command to run, split into (shell) words - pipe : bool, optional - Grab the output as text, else just run the command in the - background - - Returns - ------- - tuple : (int, str) - The return code, and either command output if pipe=True else None - """ - output = None - status = 0 - if lib == "system": - if pipe: - handle = popen4(command) - output = handle[1].read() - else: - status = system(command) - else: - if pipe: - child = Popen(command, stderr=STDOUT, stdout=PIPE, shell=True) - # This returns a b'string' which is casted to string in - # python 2. However, as we want to use f.write() in our - # runtest, we cast this to utf-8 here - output = child.stdout.read().decode("utf-8", "ignore") - # Wait for the process to finish. Note that child.wait() - # would have deadlocked the system as stdout is PIPEd, we - # therefore use communicate, which in the end also waits for - # the process to finish - child.communicate() - status = child.returncode - else: - status = call(command, shell=True) - - return status, output - - -def determineNumberOfCPUs(): - """Number of virtual or physical CPUs on this system - - i.e. user/real as output by time(1) when called with an optimally - scaling userspace-only program - - Taken from a post on stackoverflow: - http://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of-cpus-in-python - - Returns - ------- - int - The number of CPUs - """ - - # Python 2.6+ - try: - import multiprocessing - return multiprocessing.cpu_count() - except (ImportError,NotImplementedError): - pass - - # POSIX - try: - res = int(os.sysconf('SC_NPROCESSORS_ONLN')) - - if res > 0: - return res - except (AttributeError,ValueError): - pass - - # Windows - try: - res = int(os.environ['NUMBER_OF_PROCESSORS']) - - if res > 0: - return res - except (KeyError, ValueError): - pass - - # jython - try: - from java.lang import Runtime - runtime = Runtime.getRuntime() - res = runtime.availableProcessors() - if res > 0: - return res - except ImportError: - pass - - # BSD - try: - sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], - stdout=subprocess.PIPE) - scStdout = sysctl.communicate()[0] - res = int(scStdout) - - if res > 0: - return res - except (OSError, ValueError): - pass - - # Linux - try: - res = open('/proc/cpuinfo').read().count('processor\t:') - - if res > 0: - return res - except IOError: - pass - - # Solaris - try: - pseudoDevices = os.listdir('/devices/pseudo/') - expr = re.compile('^cpuid@[0-9]+$') - - res = 0 - for pd in pseudoDevices: - if expr.match(pd) is not None: - res += 1 - - if res > 0: - return res - except OSError: - pass - - # Other UNIXes (heuristic) - try: - try: - dmesg = open('/var/run/dmesg.boot').read() - except IOError: - dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE) - dmesg = dmesgProcess.communicate()[0] - - res = 0 - while '\ncpu' + str(res) + ':' in dmesg: - res += 1 - - if res > 0: - return res - except OSError: - pass - - raise Exception('Can not determine number of CPUs on this system') - - -def launch(command, runcmd=None, nproc=None, mthread=None, - output=None, pipe=False, verbose=False): - """Launch parallel MPI jobs - - >>> status = launch(command, nproc, output=None) - - Parameters - ---------- - command : str - The command to run - runcmd : str, optional - Command for running parallel job; defaults to what getmpirun() returns" - nproc : int, optional - Number of processors (default: all available processors) - mthread : int, optional - Number of omp threads (default: the value of the - ``OMP_NUM_THREADS`` environment variable - output : str, optional - Name of file to save output to - pipe : bool, optional - If True, return the output of the command - verbose : bool, optional - Print the full command to be run before running it - - Returns - ------- - tuple : (int, str) - The return code, and either command output if pipe=True else None - - """ - - if runcmd is None: - runcmd = getmpirun() - - if nproc is None: - # Determine number of CPUs on this machine - nproc = determineNumberOfCPUs() - - cmd = runcmd + " " + str(nproc) + " " + command - - if output is not None: - cmd = cmd + " > "+output - - if mthread is not None: - cmd = "OMP_NUM_THREADS={j} ".format(j=mthread)+cmd - - if verbose == True: - print(cmd) - - return shell(cmd, pipe=pipe) - - -def shell_safe(command, *args, **kwargs): - """'Safe' version of shell. - - Raises a `RuntimeError` exception if the command is not - successful - - Parameters - ---------- - command : str - The command to run - *args, **kwargs - Optional arguments passed to `shell` - - """ - s, out = shell(command,*args,**kwargs) - if s: - raise RuntimeError("Run failed with %d.\nCommand was:\n%s\n\n" - "Output was\n\n%s"% - (s,command,out)) - return s, out - - -def launch_safe(command, *args, **kwargs): - """'Safe' version of launch. - - Raises an RuntimeError exception if the command is not successful - - Parameters - ---------- - command : str - The command to run - *args, **kwargs - Optional arguments passed to `shell` - - """ - s, out = launch(command,*args,**kwargs) - if s: - raise RuntimeError("Run failed with %d.\nCommand was:\n%s\n\n" - "Output was\n\n%s"% - (s,command,out)) - return s, out diff --git a/tools/pylib/boututils/showdata.py b/tools/pylib/boututils/showdata.py deleted file mode 100644 index 9959402c14..0000000000 --- a/tools/pylib/boututils/showdata.py +++ /dev/null @@ -1,702 +0,0 @@ -""" -Visualisation and animation routines - -Written by Luke Easy -le590@york.ac.uk -Last Updated 19/3/2015 -Additional functionality by George Breyiannis 26/12/2014 - -""" -from __future__ import print_function -from __future__ import division -from builtins import str, chr, range - -from matplotlib import pyplot as plt -from matplotlib import animation -from numpy import linspace, meshgrid, array, min, max, abs, floor, pi, isclose -from boutdata.collect import collect -from boututils.boutwarnings import alwayswarn - -#################################################################### -# Specify manually ffmpeg path -#plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg' - -FFwriter = animation.FFMpegWriter() -#################################################################### - - -################### -#http://stackoverflow.com/questions/16732379/stop-start-pause-in-python-matplotlib-animation -# -j=-2 -pause = False -################### - - -def showdata(vars, titles=[], legendlabels=[], surf=[], polar=[], tslice=0, t_array=None, - movie=0, fps=28, dpi=200, intv=1, Ncolors=25, x=[], y=[], - global_colors=False, symmetric_colors=False, hold_aspect=False, - cmap=None, clear_between_frames=None, return_animation=False, window_title=""): - """A Function to animate time dependent data from BOUT++ - - To animate multiple variables on different axes: - - >>> showdata([var1, var2, var3]) - - To animate more than one line on a single axes: - - >>> showdata([[var1, var2, var3]]) - - The default graph types are: - 2D (time + 1 spatial dimension) arrays = animated line plot - 3D (time + 2 spatial dimensions) arrays = animated contour plot. - - To use surface or polar plots: - - >>> showdata(var, surf=1) - >>> showdata(var, polar=1) - - Can plot different graph types on different axes. Default graph types will - be used depending on the dimensions of the input arrays. To specify - polar/surface plots on different axes: - - >>> showdata([var1, var2], surf=[1, 0], polar=[0, 1]) - - Movies require FFmpeg (for .mp4) and/or ImageMagick (for .gif) to be - installed. The 'movie' option can be set to 1 (which will produce an mp4 - called 'animation.mp4'), to a name with no extension (which will produce an - mp4 called '.mp4') - - The `tslice` variable is used to control the time value that is printed on - each frame of the animation. If the input data matches the time values - found within BOUT++'s dmp data files, then these time values will be used. - Otherwise, an integer counter is used. - - The `cmap` variable (if specified) will set the colormap used in the plot - cmap must be a matplotlib colormap instance, or the name of a registered - matplotlib colormap - - During animation click once to stop in the current frame. Click again to - continue. - - Parameters - ---------- - vars : array_like or list of array_like - Variable or list of variables to plot - titles : str or list of str, optional - Title or list of titles for each axis - legendlabels : str or list of str, optional - Legend or list of legends for each variable - surf : list of int - Which axes to plot as a surface plot - polar : list of int - Which axes to plot as a polar plot - tslice : list of int - Use these time values from a dump file (see above) - t_array : array - Pass in t_array using this argument to use the simulation time in plot - titles. Otherwise, just use the t-index. - movie : int - If 1, save the animation to file - fps : int - Number of frames per second to use when saving animation - dpi : int - Dots per inch to use when saving animation - intv : int - ??? - Ncolors : int - Number of levels in contour plots - x, y : array_like, list of array_like - X, Y coordinates - global_colors : bool - If "vars" is a list the colorlevels are determined from the - maximum of the maxima and and the minimum of the minima in all - fields in vars - symmetric_colors : bool - Colour levels are symmetric - hold_aspect : bool - Use equal aspect ratio in plots - cmap : colormap - A matplotlib colormap instance to use - clear_between_frames : bool, optional - - Default (None) - all plots except line plots will clear between frames - - True - all plots will clear between frames - - False - no plots will clear between frames - return_animation : bool - Return the matplotlib animation instance - window_title : str - Give a title for the animation window - - TODO - ---- - - Replace empty lists in signature with None - - Use bools in sensible places - - Put massive list of arguments in kwargs - - Speed up animations ???? - - Look at theta in polar plots - periodic?!? - - Log axes, colorbars - - Figureplot - - """ - plt.ioff() - - # Check to see whether vars is a list or not. - if isinstance(vars, list): - Nvar = len(vars) - else: - vars = [vars] - Nvar = len(vars) - - if Nvar < 1: - raise ValueError("No data supplied") - - # Check to see whether each variable is a list - used for line plots only - Nlines = [] - for i in range(0, Nvar): - if isinstance(vars[i], list): - Nlines.append(len(vars[i])) - else: - Nlines.append(1) - vars[i] = [vars[i]] - - # Sort out titles - if len(titles) == 0: - for i in range(0,Nvar): - titles.append(('Var' + str(i+1))) - elif len(titles) != Nvar: - raise ValueError('The length of the titles input list must match the length of the vars list.') - - # Sort out legend labels - if len(legendlabels) == 0: - for i in range(0,Nvar): - legendlabels.append([]) - for j in range(0,Nlines[i]): - legendlabels[i].append(chr(97+j)) - elif (isinstance(legendlabels[0], list) != 1): - if Nvar != 1: - check = 0 - for i in range(0,Nvar): - if len(legendlabels) != Nlines[i]: - check = check+1 - if check == 0: - alwayswarn("The legendlabels list does not contain a sublist for each variable, but its length matches the number of lines on each plot. Will apply labels to each plot") - legendlabelsdummy = [] - for i in range(0, Nvar): - legendlabelsdummy.append([]) - for j in range(0,Nlines[i]): - legendlabelsdummy[i].append(legendlabels[j]) - legendlabels = legendlabelsdummy - else: - alwayswarn("The legendlabels list does not contain a sublist for each variable, and it's length does not match the number of lines on each plot. Will default apply labels to each plot") - legendlabels = [] - for i in range(0,Nvar): - legendlabels.append([]) - for j in range(0,Nlines[i]): - legendlabels[i].append(chr(97+j)) - else: - if (Nlines[0] == len(legendlabels)): - legendlabels = [legendlabels] - elif len(legendlabels) != Nvar: - alwayswarn("The length of the legendlabels list does not match the length of the vars list, will continue with default values") - legendlabels = [] - for i in range(0,Nvar): - legendlabels.append([]) - for j in range(0,Nlines[i]): - legendlabels[i].append(chr(97+j)) - else: - for i in range(0,Nvar): - if isinstance(legendlabels[i], list): - if len(legendlabels[i]) != Nlines[i]: - alwayswarn('The length of the legendlabel (sub)list for each plot does not match the number of datasets for each plot. Will continue with default values') - legendlabels[i] = [] - for j in range(0,Nlines[i]): - legendlabels[i].append(chr(97+j)) - else: - legendlabels[i] = [legendlabels[i]] - if len(legendlabels[i]) != Nlines[i]: - alwayswarn('The length of the legendlabel (sub)list for each plot does not match the number of datasets for each plot. Will continue with default values') - legendlabels[i] = [] - for j in range(0,Nlines[i]): - legendlabels[i].append(chr(97+j)) - - - # Sort out surf list - if isinstance(surf, list): - if (len(surf) == Nvar): - for i in range(0, Nvar): - if surf[i] >= 1: - surf[i] = 1 - else: - surf[i] = 0 - elif (len(surf) == 1): - if surf[0] >= 1: - surf[0] = 1 - else: - surf[0] = 0 - if (Nvar > 1): - for i in range(1,Nvar): - surf.append(surf[0]) - elif (len(surf) == 0): - for i in range(0,Nvar): - surf.append(0) - else: - alwayswarn('Length of surf list does not match number of variables. Will default to no polar plots') - for i in range(0,Nvar): - surf.append(0) - - else: - surf = [surf] - if surf[0] >= 1: - surf[0] = 1 - else: - surf[0] = 0 - if (Nvar > 1): - for i in range(1,Nvar): - surf.append(surf[0]) - - # Sort out polar list - if isinstance(polar, list): - if (len(polar) == Nvar): - for i in range(0, Nvar): - if polar[i] >= 1: - polar[i] = 1 - else: - polar[i] = 0 - elif (len(polar) == 1): - if polar[0] >= 1: - polar[0] = 1 - else: - polar[0] = 0 - if (Nvar > 1): - for i in range(1,Nvar): - polar.append(polar[0]) - elif (len(polar) == 0): - for i in range(0,Nvar): - polar.append(0) - else: - alwayswarn('Length of polar list does not match number of variables. Will default to no polar plots') - for i in range(0,Nvar): - polar.append(0) - else: - polar = [polar] - if polar[0] >= 1: - polar[0] = 1 - else: - polar[0] = 0 - if (Nvar > 1): - for i in range(1,Nvar): - polar.append(polar[0]) - - # Determine shapes of arrays - dims = [] - Ndims = [] - lineplot = [] - contour = [] - for i in range(0,Nvar): - dims.append([]) - Ndims.append([]) - for j in range(0, Nlines[i]): - dims[i].append(array((vars[i][j].shape))) - Ndims[i].append(dims[i][j].shape[0]) - # Perform check to make sure that data is either 2D or 3D - if (Ndims[i][j] < 2): - raise ValueError('data must be either 2 or 3 dimensional. Exiting') - - if (Ndims[i][j] > 3): - raise ValueError('data must be either 2 or 3 dimensional. Exiting') - - if ((Ndims[i][j] == 2) & (polar[i] != 0)): - alwayswarn('Data must be 3 dimensional (time, r, theta) for polar plots. Will plot lineplot instead') - - if ((Ndims[i][j] == 2) & (surf[i] != 0)): - alwayswarn('Data must be 3 dimensional (time, x, y) for surface plots. Will plot lineplot instead') - - if ((Ndims[i][j] == 3) & (Nlines[i] != 1)): - raise ValueError('cannot have multiple sets of 3D (time + 2 spatial dimensions) on each subplot') - - - if ((Ndims[i][j] != Ndims[i][0])): - raise ValueError('Error, Number of dimensions must be the same for all variables on each plot.') - - if (Ndims[i][0] == 2): # Set polar and surf list entries to 0 - polar[i] = 0 - surf[i] = 0 - lineplot.append(1) - contour.append(0) - else: - if ((polar[i] == 1) & (surf[i] == 1)): - alwayswarn('Cannot do polar and surface plots at the same time. Default to contour plot') - contour.append(1) - lineplot.append(0) - polar[i] = 0 - surf[i] = 0 - elif (polar[i] == 1) | (surf[i] == 1): - contour.append(0) - lineplot.append(0) - else: - contour.append(1) - lineplot.append(0) - - # Obtain size of data arrays - Nt = [] - Nx = [] - Ny = [] - for i in range(0, Nvar): - Nt.append([]) - Nx.append([]) - Ny.append([]) - for j in range(0, Nlines[i]): - Nt[i].append(vars[i][j].shape[0]) - Nx[i].append(vars[i][j].shape[1]) - if (Nt[i][j] != Nt[0][0]): - raise ValueError('time dimensions must be the same for all variables.') - - #if (Nx[i][j] != Nx[i][0]): - # raise ValueError('Dimensions must be the same for all variables on each plot.') - - if (Ndims[i][j] == 3): - Ny[i].append(vars[i][j].shape[2]) - #if (Ny[i][j] != Ny[i][0]): - # raise ValueError('Dimensions must be the same for all variables.') - - # Obtain number of frames - Nframes = int(Nt[0][0]/intv) - - # Generate grids for plotting - # Try to use provided grids where possible - # If x and/or y are not lists, apply to all variables - if not isinstance(x, (list,tuple)): - x = [x]*Nvar # Make list of x with length Nvar - if not isinstance(y, (list,tuple)): - y = [y]*Nvar # Make list of x with length Nvar - xnew = [] - ynew = [] - for i in range(0,Nvar): - xnew.append([]) - try: - xnew[i].append(x[i]) - if not (x[i].shape==(Nx[i][0],) or x[i].shape==(Nx[i][0],Ny[i][0]) or x[i].shape==(Nt[i][0],Nx[i][0],Ny[i],[0])): - raise ValueError("For variable number "+str(i)+", "+titles[i]+", the shape of x is not compatible with the shape of the variable. Shape of x should be (Nx), (Nx,Ny) or (Nt,Nx,Ny).") - except: - for j in range(0, Nlines[i]): - xnew[i].append(linspace(0,Nx[i][j]-1, Nx[i][j])) - - #x.append(linspace(0,Nx[i][0]-1, Nx[i][0])) - - if (Ndims[i][0] == 3): - try: - ynew.append(y[i]) - if not (y[i].shape==(Ny[i][0],) or y[i].shape==(Nx[i][0],Ny[i][0]) or y[i].shape==(Nt[i][0],Nx[i][0],Ny[i],[0])): - raise ValueError("For variable number "+str(i)+", "+titles[i]+", the shape of y is not compatible with the shape of the variable. Shape of y should be (Ny), (Nx,Ny) or (Nt,Nx,Ny).") - except: - ynew.append(linspace(0, Ny[i][0]-1, Ny[i][0])) - else: - ynew.append(0) - x = xnew - y = ynew - # Determine range of data. Used to ensure constant colour map and - # to set y scale of line plot. - fmax = [] - fmin = [] - xmax = [] - dummymax = [] - dummymin = [] - clevels = [] - - for i in range(0,Nvar): - - dummymax.append([]) - dummymin.append([]) - for j in range(0,Nlines[i]): - dummymax[i].append(max(vars[i][j])) - dummymin[i].append(min(vars[i][j])) - - fmax.append(max(dummymax[i])) - fmin.append(min(dummymin[i])) - - if(symmetric_colors): - absmax =max(abs(array(fmax[i], fmin[i]))) - fmax[i] = absmax - fmin[i] = -absmax - - for j in range(0,Nlines[i]): - dummymax[i][j] = max(x[i][j]) - xmax.append(max(dummymax[i])) - - - if not (global_colors): - if isclose(fmin[i], fmax[i]): - # add/subtract very small constant in case fmin=fmax=0 - thiscontourmin = fmin[i] - 3.e-15*abs(fmin[i]) - 1.e-36 - thiscontourmax = fmax[i] + 3.e-15*abs(fmax[i]) + 1.e-36 - alwayswarn("Contour levels too close, adding padding to colorbar range") - clevels.append(linspace(thiscontourmin, thiscontourmax, Ncolors)) - else: - clevels.append(linspace(fmin[i], fmax[i], Ncolors)) - - if(global_colors): - fmaxglobal = max(fmax) - fminglobal = min(fmin) - if isclose(fminglobal, fmaxglobal): - fminglobal = fminglobal - 3.e-15*abs(fminglobal) - 1.e-36 - fmaxglobal = fmaxglobal + 3.e-15*abs(fmaxglobal) + 1.e-36 - for i in range(0,Nvar): - clevels.append(linspace(fminglobal, fmaxglobal, Ncolors)) - - # Create figures for animation plotting - if (Nvar < 2): - row = 1 - col = 1 - h = 6.0 - w = 8.0 - elif (Nvar <3): - row = 1 - col = 2 - h = 6.0 - w = 12.0 - elif (Nvar < 5): - row = 2 - col = 2 - h = 8.0 - w = 12.0 - - elif (Nvar < 7): - row = 2 - col = 3 - h = 8.0 - w = 14.0 - - elif (Nvar < 10) : - row = 3 - col = 3 - h = 12.0 - w = 14.0 - else: - raise ValueError('too many variables...') - - - fig = plt.figure(window_title, figsize=(w,h)) - title = fig.suptitle(r' ', fontsize=14 ) - - # Initiate all list variables required for plotting here - ax = [] - lines = [] - plots = [] - cbars = [] - xstride = [] - ystride = [] - r = [] - theta = [] - - - # Initiate figure frame - for i in range(0,Nvar): - lines.append([]) - if (lineplot[i] == 1): - ax.append(fig.add_subplot(row,col,i+1)) - ax[i].set_xlim((0,xmax[i])) - ax[i].set_ylim((fmin[i], fmax[i])) - for j in range(0,Nlines[i]): - lines[i].append(ax[i].plot([],[],lw=2, label = legendlabels[i][j])[0]) - #Need the [0] to 'unpack' the line object from tuple. Alternatively: - #lines[i], = lines[i] - ax[i].set_xlabel(r'x') - ax[i].set_ylabel(titles[i]) - if (Nlines[i] != 1): - legendneeded = 1 - for k in range(0,i): - if (Nlines[i] == Nlines[k]): - legendneeded = 0 - if (legendneeded == 1): - plt.axes(ax[i]) - plt.legend(loc = 0) - # Pad out unused list variables with zeros - plots.append(0) - cbars.append(0) - xstride.append(0) - ystride.append(0) - r.append(0) - theta.append(0) - - elif (contour[i] == 1): - ax.append(fig.add_subplot(row,col,i+1)) - #ax[i].set_xlim((0,Nx[i][0]-1)) - #ax[i].set_ylim((0,Ny[i][0]-1)) - ax[i].set_xlim(min(x[i]),max(x[i])) - ax[i].set_ylim(min(y[i]),max(y[i])) - ax[i].set_xlabel(r'x') - ax[i].set_ylabel(r'y') - ax[i].set_title(titles[i]) - if hold_aspect: - ax[i].set_aspect('equal') - thisx = x[i][0] - if len(thisx.shape) == 3: - thisx = thisx[0] - thisy = y[i] - if len(thisy.shape) == 3: - thisy = thisy[0] - plots.append(ax[i].contourf(thisx.T,thisy.T,vars[i][0][0,:,:].T, Ncolors, cmap=cmap, lw=0, levels=clevels[i] )) - plt.axes(ax[i]) - cbars.append(fig.colorbar(plots[i], format='%1.1e')) - # Pad out unused list variables with zeros - lines[i].append(0) - xstride.append(0) - ystride.append(0) - r.append(0) - theta.append(0) - - elif (surf[i] == 1): - if (len(x[i][0].shape)==1 and len(y[i].shape)==1): - # plot_wireframe() requires 2d arrays for x and y coordinates - x[i][0],y[i] = meshgrid(x[i][0],y[i]) - thisx = x[i][0] - if len(thisx.shape) == 3: - thisx = thisx[0] - thisy = y[i] - if len(thisy.shape) == 3: - thisy = thisy[0] - if (Nx[i][0]<= 20): - xstride.append(1) - else: - xstride.append(int(floor(Nx[i][0]/20))) - if (Ny[i][0]<=20): - ystride.append(1) - else: - ystride.append(int(floor(Ny[i][0]/20))) - ax.append(fig.add_subplot(row,col,i+1, projection='3d')) - plots.append(ax[i].plot_wireframe(thisx, thisy, vars[i][0][0,:,:].T, rstride=ystride[i], cstride=xstride[i])) - title = fig.suptitle(r'', fontsize=14 ) - ax[i].set_xlabel(r'x') - ax[i].set_ylabel(r'y') - ax[i].set_zlabel(titles[i]) - # Pad out unused list variables with zeros - lines[i].append(0) - cbars.append(0) - r.append(0) - theta.append(0) - - elif (polar[i] == 1): - r.append(linspace(1,Nx[i][0], Nx[i][0])) - theta.append(linspace(0,2*pi, Ny[i][0])) - r[i],theta[i] = meshgrid(r[i], theta[i]) - ax.append(fig.add_subplot(row,col,i+1, projection='polar')) - plots.append(ax[i].contourf(theta[i], r[i], vars[i][0][0,:,:].T, cmap=cmap, levels=clevels[i])) - plt.axes(ax[i]) - cbars.append(fig.colorbar(plots[i], format='%1.1e')) - ax[i].set_rmax(Nx[i][0]-1) - ax[i].set_title(titles[i]) - # Pad out unused list variables with zeros - lines[i].append(0) - xstride.append(0) - ystride.append(0) - - - - def onClick(event): - global pause - pause ^= True - - - def control(): - global j, pause - if j == Nframes-1 : j = -1 - if not pause: - j=j+1 - - return j - - - # Animation function - def animate(i): - j=control() - - index = j*intv - - for j in range(0,Nvar): - #Default to clearing axis between frames on all plots except line plots - if (clear_between_frames is None and lineplot[j] != 1 ) or clear_between_frames is True: - ax[j].cla() #Clear axis between frames so that masked arrays can be plotted - if (lineplot[j] == 1): - for k in range(0,Nlines[j]): - lines[j][k].set_data(x[j][k], vars[j][k][index,:]) - elif (contour[j] == 1): - thisx = x[j][0] - if len(thisx.shape) == 3: - thisx = thisx[index] - thisy = y[j] - if len(thisy.shape) == 3: - thisy = thisy[index] - plots[j] = ax[j].contourf(x[j][0].T,y[j].T,vars[j][0][index,:,:].T, Ncolors, cmap=cmap, lw=0, levels=clevels[j]) - ax[j].set_xlabel(r'x') - ax[j].set_ylabel(r'y') - ax[j].set_title(titles[j]) - elif (surf[j] == 1): - thisx = x[j][0] - if len(thisx.shape) == 3: - thisx = thisx[index] - thisy = y[j][0] - if len(thisy.shape) == 3: - thisy = thisy[index] - ax[j] = fig.add_subplot(row,col,j+1, projection='3d') - plots[j] = ax[j].plot_wireframe(thisx, thisy, vars[j][0][index,:,:].T, rstride=ystride[j], cstride=xstride[j]) - ax[j].set_zlim(fmin[j],fmax[j]) - ax[j].set_xlabel(r'x') - ax[j].set_ylabel(r'y') - ax[j].set_title(titles[j]) - elif (polar[j] == 1): - plots[j] = ax[j].contourf(theta[j], r[j], vars[j][0][index,:,:].T,cmap=cmap, levels=clevels[j]) - ax[j].set_rmax(Nx[j][0]-1) - ax[j].set_title(titles[j]) - - if t_array is not None: - title.set_text('t = %1.2e' % t_array[index]) - else: - title.set_text('t = %i' % index) - return plots - - def init(): - global j, pause - j=-2 - pause = False - return animate(0) - - - - - - - # Call Animation function - - fig.canvas.mpl_connect('button_press_event', onClick) - anim = animation.FuncAnimation(fig, animate, init_func=init, frames=Nframes) - - #If movie is not passed as a string assign the default filename - if (movie==1): - movie='animation.mp4' - - # Save movie with given or default name - if ((isinstance(movie,str)==1)): - movietype = movie.split('.')[-1] - if movietype == 'mp4': - try: - anim.save(movie,writer = FFwriter, fps=fps, dpi=dpi, extra_args=['-vcodec', 'libx264']) - except Exception: - #Try specifying writer by string if ffmpeg not found - try: - anim.save(movie,writer = 'ffmpeg', fps=fps, dpi=dpi, extra_args=['-vcodec', 'libx264']) - except Exception: - print('Save failed: Check ffmpeg path') - raise - elif movietype == 'gif': - anim.save(movie+'.gif',writer = 'imagemagick', fps=fps, dpi=dpi) - else: - raise ValueError("Unrecognized file type for movie. Supported types are .mp4 and .gif") - - # Show animation if not saved or returned, otherwise close the plot - if (movie==0 and return_animation == 0): - plt.show() - else: - plt.close() - # Return animation object - if(return_animation == 1): - return(anim) diff --git a/tools/pylib/boututils/spectrogram.py b/tools/pylib/boututils/spectrogram.py deleted file mode 100644 index d1c2a3617b..0000000000 --- a/tools/pylib/boututils/spectrogram.py +++ /dev/null @@ -1,163 +0,0 @@ -"""Creates spectrograms using the Gabor transform to maintain time and -frequency resolution - -written by: Jarrod Leddy -updated: 23/06/2016 - -""" -from __future__ import print_function -from __future__ import division -from builtins import range - -from numpy import arange, zeros, exp, power, transpose, sin, cos, linspace, min, max -from scipy import fftpack, pi - - -def spectrogram(data, dx, sigma, clip=1.0, optimise_clipping=True, nskip=1.0): - """Creates spectrograms using the Gabor transform to maintain time - and frequency resolution - - .. note:: Very early and very late times will have some issues due - to the method - truncate them after taking the spectrogram - if they are below your required standards - - .. note:: If you are seeing issues at the top or bottom of the - frequency range, you need a longer time series - - written by: Jarrod Leddy - updated: 23/06/2016 - - Parameters - ---------- - data : array_like - The time series you want spectrogrammed - dt : float - Time resolution - sigma : float - Used in the Gabor transform, will balance time and frequency - resolution suggested value is 1.0, but may need to be adjusted - manually until result is as desired: - - - If bands are too tall raise sigma - - If bands are too wide, lower sigma - clip : float, optional - Makes the spectrogram run faster, but decreases frequency - resolution. clip is by what factor the time spectrum should be - clipped by --> N_new = N / clip - optimise_clip : bool - If true (default) will change the data length to be 2^N - (rounded down from your inputed clip value) to make FFT's fast - nskip : float - Scales final time axis, skipping points over which to centre - the gaussian window for the FFTs - - Returns - ------- - tuple : (array_like, array_like, array_like) - A tuple containing the spectrogram, frequency and time - - """ - n = data.size - nnew = int(n/nskip) - xx = arange(n)*dx - xxnew = arange(nnew)*dx*nskip - sigma = sigma * dx - - n_clipped = int(n/clip) - - # check to see if n_clipped is near a 2^n factor for speed - if(optimise_clipping): - nn = n_clipped - two_count = 1 - while(1): - nn = nn/2.0 - if(nn <= 2.0): - n_clipped = 2**two_count - print('clipping window length from ',n,' to ',n_clipped,' points') - break - else: - two_count += 1 - else: - print('using full window length: ',n_clipped,' points') - - halfclip = int(n_clipped/2) - spectra = zeros((nnew,halfclip)) - - omega = fftpack.fftfreq(n_clipped, dx) - omega = omega[0:halfclip] - - for i in range(nnew): - beg = i*nskip-halfclip - end = i*nskip+halfclip-1 - - if beg < 0: - end = end-beg - beg = 0 - elif end >= n: - end = n-1 - beg = end - n_clipped + 1 - - gaussian = 1.0 / (sigma * 2.0 * pi) * exp(-0.5 * power(( xx[beg:end] - xx[i*nskip] ),2.0) / (2.0 * sigma) ) - fftt = abs(fftpack.fft(data[beg:end] * gaussian)) - fftt = fftt[:halfclip] - spectra[i,:] = fftt - - return (transpose(spectra), omega, xxnew) - - -def test_spectrogram(n, d, s): - """Function used to test the performance of spectrogram with various - values of sigma - - Parameters - ---------- - n : int - Number of points - d : float - Grid spacing - s : float - Initial sigma - - """ - - import matplotlib.pyplot as plt - - nskip = 10 - xx = arange(n)/d - test_data = sin(2.0*pi*512.0*xx * ( 1.0 + 0.005*cos(xx*50.0))) + 0.5*exp(xx)*cos(2.0*pi*100.0*power(xx,2)) - test_sigma = s - dx = 1.0/d - - s1 = test_sigma*0.1 - s2 = test_sigma - s3 = test_sigma*10.0 - - (spec2,omega2,xx) = spectrogram(test_data, dx, s2, clip=5.0, nskip=nskip) - (spec3,omega3,xx) = spectrogram(test_data, dx, s3, clip=5.0, nskip=nskip) - (spec1,omega1,xx) = spectrogram(test_data, dx, s1, clip=5.0, nskip=nskip) - - levels = linspace(min(spec1),max(spec1),100) - plt.subplot(311) - plt.contourf(xx,omega1,spec1,levels=levels) - plt.ylabel("frequency") - plt.xlabel(r"$t$") - plt.title(r"Spectrogram of $sin(t + cos(t) )$ with $\sigma=$%3.1f"%s1) - - levels = linspace(min(spec2),max(spec2),100) - plt.subplot(312) - plt.contourf(xx,omega2,spec2,levels=levels) - plt.ylabel("frequency") - plt.xlabel(r"$t$") - plt.title(r"Spectrogram of $sin(t + cos(t) )$ with $\sigma=$%3.1f"%s2) - - levels = linspace(min(spec3),max(spec3),100) - plt.subplot(313) - plt.contourf(xx,omega3,spec3,levels=levels) - plt.ylabel("frequency") - plt.xlabel(r"$t$") - plt.title(r"Spectrogram of $sin(t + cos(t) )$ with $\sigma=$%3.1f"%s3) - plt.tight_layout() - plt.show() - -if __name__ == "__main__": - test_spectrogram(2048, 2048.0, 0.01) # array size, divisions per unit, sigma of gaussian diff --git a/tools/pylib/boututils/surface_average.py b/tools/pylib/boututils/surface_average.py deleted file mode 100644 index d58e9a3dc5..0000000000 --- a/tools/pylib/boututils/surface_average.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Average over a surface - -""" - -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division -from builtins import range -from past.utils import old_div -import numpy as np -from boututils.calculus import deriv -from boututils.int_func import int_func -from .idl_tabulate import idl_tabulate -from bunch import bunchify - - -def surface_average(var, g, area=None): - """Average a variable over a surface - - Parameters - ---------- - var : array_like - 3-D or 4D variable to integrate (either [x,y,z] or [t,x,y,z]) - g : dict - A dictionary of various grid quantities - area : bool - Average by flux-surface area = (B/Bp)*dl * R*dz - - Returns - ------- - float - Surface average of variable - - """ - - s = np.ndim(var) - - - - if s == 4 : - nx = np.shape(var)[1] - ny = np.shape(var)[2] - nt = np.shape(var)[0] - - result = np.zeros((nx,nt)) - for t in range (nt): - - result[:,t] = surface_average(var[t,:,:,:], g, area=area) - - return result - elif s != 3 : - print("ERROR: surface_average var must be 3 or 4D") - return 0 - - - # 3D [x,y,z] - nx = np.shape(var)[0] - ny = np.shape(var)[1] -# nz = np.shape(var)[2] - -# Use bunch to create grid structure - grid=bunchify(g) - - - # Calculate poloidal angle from grid - theta = np.zeros((nx,ny)) - - #status = gen_surface(mesh=grid) ; Start generator - xi = -1 - yi = np.arange(0,ny,dtype=int) - last = 0 - while True: - #yi = gen_surface(last=last, xi=xi, period=periodic) - xi = xi + 1 - if xi == nx-1 : - last = 1 - - dtheta = 2.*np.pi / np.float(ny) - r = grid.Rxy[xi,yi] - z = grid.Zxy[xi,yi] - n = np.size(r) - - dl = old_div(np.sqrt( deriv(r)**2 + deriv(z)**2 ), dtheta) - if area: - dA = (old_div(grid.Bxy[xi,yi],grid.Bpxy[xi,yi]))*r*dl - A = int_func(np.arange(n),dA) - theta[xi,yi] = 2.*np.pi*A/A[n-1] - else: - nu = dl * (grid.Btxy[xi,yi]) / ((grid.Bpxy[xi,yi]) * r ) - theta[xi,yi] = int_func(np.arange(n)*dtheta,nu) - theta[xi,yi] = 2.*np.pi*theta[xi,yi] / theta[xi,yi[n-1]] - - if last==1 : break - - vy = np.zeros(ny) - result = np.zeros(nx) - for x in range(nx) : - for y in range(ny) : - vy[y] = np.mean(var[x,y,:]) - - result[x] = old_div(idl_tabulate(theta[x,:], vy), (2.*np.pi)) - - return result diff --git a/tools/pylib/boututils/volume_integral.py b/tools/pylib/boututils/volume_integral.py deleted file mode 100644 index 9fdb0833c4..0000000000 --- a/tools/pylib/boututils/volume_integral.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Integrate over a volume - -""" - -from __future__ import print_function -from __future__ import division -from builtins import range -from past.utils import old_div -import numpy as np -from boututils.calculus import deriv -from bunch import bunchify - - -def volume_integral(var, g, xr=False): - """Integrate a variable over a volume - - Parameters - ---------- - var : array_like - Variable to integrate - g : dict - A dictionary of various grid quantities - xr : (int, int), optional - Range of x indices (default: all of x) - - Returns - ------- - float - Volumne integral of variable - - """ - - s = np.ndim(var) - - grid=bunchify(g) - - - if s == 4 : - # 4D [t,x,y,z] - integrate for each t - nx = np.shape(var)[1] - ny = np.shape(var)[2] - nt = np.shape(var)[0] - - result = np.zeros(nt) - for t in range(nt) : - result[t] = volume_integral(var[t,:,:,:],g,xr=xr) - return result - - elif s == 3 : - # 3D [x,y,z] - average in Z - nx = np.shape(var)[0] - ny = np.shape(var)[1] - # nz = np.shape(var)[2] - - zi = np.zeros((nx, ny)) - for x in range(nx): - for y in range(ny): - zi[x,y] = np.mean(var[x,y,:]) - - return volume_integral(zi, g, xr=xr) - - - elif s != 2 : - print("ERROR: volume_integral var must be 2, 3 or 4D") - - - # 2D [x,y] - nx = np.shape(var)[0] - ny = np.shape(var)[1] - - if xr == False : xr=[0,nx-1] - - result = 0.0 - - #status = gen_surface(mesh=grid) ; Start generator - xi = -1 - yi = np.arange(0,ny,dtype=int) - last = 0 - # iy = np.zeros(nx) - while True: - #yi = gen_surface(last=last, xi=xi, period=periodic) - xi = xi + 1 - if xi == nx-1 : last = 1 - - if (xi >= np.min(xr)) & (xi <= np.max(xr)) : - dtheta = 2.*np.pi / np.float(ny) - r = grid.Rxy[xi,yi] - z = grid.Zxy[xi,yi] - n = np.size(r) - dl = old_div(np.sqrt( deriv(r)**2 + deriv(z)**2 ), dtheta) - - # Area of flux-surface - dA = (grid.Bxy[xi,yi]/grid.Bpxy[xi,yi]*dl) * (r*2.*np.pi) - # Volume - if xi == nx-1 : - dpsi = (grid.psixy[xi,yi] - grid.psixy[xi-1,yi]) - else: - dpsi = (grid.psixy[xi+1,yi] - grid.psixy[xi,yi]) - - dV = dA * dpsi / (r*(grid.Bpxy[xi,yi])) # May need factor of 2pi - dV = np.abs(dV) - - result = result + np.sum(var[xi,yi] * dV) - - if last==1 : break - - return result diff --git a/tools/pylib/boututils/watch.py b/tools/pylib/boututils/watch.py deleted file mode 100644 index e7d038c4e1..0000000000 --- a/tools/pylib/boututils/watch.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -Routines for watching files for changes - -""" -from __future__ import print_function -from builtins import zip - -import time -import os - - -def watch(files, timeout=None, poll=2): - """Watch a given file or collection of files until one changes. Uses - polling. - - Parameters - ---------- - files : str or list of str - Name of one or more files to watch - timeout : int, optional - Timeout in seconds (default is no timeout) - poll : int, optional - Polling interval in seconds (default: 2) - - Returns - ------- - str - The name of the first changed file, - or None if timed out before any changes - - Examples - -------- - - To watch one file, timing out after 60 seconds: - - >>> watch('file1', timeout=60) - - To watch 2 files, never timing out: - - >>> watch(['file1', 'file2']) - - Author: Ben Dudson - - """ - - # Get modification time of file(s) - try: - if hasattr(files, '__iter__'): - # Iterable - lastmod = [ os.stat(f).st_mtime for f in files ] - iterable = True - else: - # Not iterable -> just one file - lastmod = os.stat(files).st_mtime - iterable = False - except: - print("Can't test modified time. Wrong file name?") - raise - - start_time = time.time() - running = True - while running: - sleepfor = poll - if timeout: - # Check if timeout will be reached before next poll - if time.time() - start_time + sleepfor > timeout: - # Adjust time so that finish at timeout - sleepfor = timeout - (time.time() - start_time) - running = False # Stop after next test - - time.sleep(sleepfor) - - if iterable: - for last_t, f in zip(lastmod, files): - # Get the new modification time - t = os.stat(f).st_mtime - if t > last_t + 1.0: # +1 to reduce risk of false alarms - # File has been modified - return f - else: - t = os.stat(files).st_mtime - if t > lastmod + 1.0: - return files - return None From 6ec4984dbac86c35d98e9e7b61683fc4f3ac596c Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 4 Jan 2021 20:50:50 +0100 Subject: [PATCH 106/428] Install setuptools_scm in CI --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5df1145ea3..3fb347f49d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -135,7 +135,7 @@ jobs: - name: Install pip packages run: | - ./.pip_install_for_travis.sh 'cython~=0.29' 'netcdf4~=1.5' 'sympy~=1.5' 'gcovr' 'cmake' 'h5py' + ./.pip_install_for_travis.sh 'cython~=0.29' 'netcdf4~=1.5' 'sympy~=1.5' 'gcovr' 'cmake' 'h5py' 'setuptools_scm' # Add the pip install location to the runner's PATH echo ~/.local/bin >> $GITHUB_PATH From d275b4ca466f31a9004f330bacabda0bbe434878 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 4 Jan 2021 21:14:16 +0100 Subject: [PATCH 107/428] Update Python setup instructions --- manual/sphinx/user_docs/installing.rst | 44 ++++++++++++++++++-------- 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/manual/sphinx/user_docs/installing.rst b/manual/sphinx/user_docs/installing.rst index 6ad73d3149..1beaf90291 100644 --- a/manual/sphinx/user_docs/installing.rst +++ b/manual/sphinx/user_docs/installing.rst @@ -394,22 +394,34 @@ make a note of what configure printed out. Python configuration ~~~~~~~~~~~~~~~~~~~~ -To use Python, you will need the NumPy and SciPy libraries. On Debian or -Ubuntu these can be installed with:: +To use Python, you will need the dependencies of the `boututils +`__ and `boutdata +`__ libraries. The simplest way to get these is +to install the packages, plus additional developer dependencies, with pip:: - $ sudo apt-get install python-scipy + $ pip install --user boutdata setuptools_scm -which should then add all the other dependencies like NumPy. To test if -everything is installed, run:: +or conda:: - $ python -c "import scipy" + $ conda install boutdata setuptools_scm -If not, see the SciPy website https://www.scipy.org for instructions on -installing. +You can also install all the packages directly (see the documentation in the `boututils +`__ and `boutdata +`__ repos for the most up to date list) +using pip:: -To do this, the path to ``tools/pylib`` should be added to the -``PYTHONPATH`` environment variable. Instructions for doing this are -printed at the end of the configure script, for example:: + $ pip install --user numpy scipy matplotlib sympy netCDF4 h5py future importlib-metadata setuptools_scm + +or conda:: + + $ conda install numpy scipy matplotlib sympy netcdf4 h5py future importlib-metadata setuptools_scm + +They may also be available from your Linux system's package manager. + +To use the versions of ``boututils`` and ``boutdata`` provided by BOUT++, the path to +``tools/pylib`` should be added to the ``PYTHONPATH`` environment variable. This is not +necessary if you have installed the ``boututils`` and ``boutdata`` packages. Instructions +for doing this are printed at the end of the configure script, for example:: Make sure that the tools/pylib directory is in your PYTHONPATH e.g. by adding to your ~/.bashrc file @@ -420,8 +432,14 @@ To test if this command has worked, try running:: $ python -c "import boutdata" -If this doesn’t produce any error messages then Python is configured -correctly. +If this doesn’t produce any error messages then Python is configured correctly. + +Note that ``boututils`` and ``boutdata`` are provided by BOUT++ as submodules, so versions +compatible with the checked out version of BOUT++ are downloaded into the +``externalpackages`` directory. These are the versions used by the tests run by ``make +check`` even if you have installed ``boututils`` and ``boutdata`` on your system, so you +do need the 'developer' dependencies of the packages (e.g. ``setuptools_scm``). + .. _sec-config-idl: From 4ffdcacc3fa830a6e85a724cbc95c3035d7b3a90 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 5 Jan 2021 22:32:24 +0100 Subject: [PATCH 108/428] Update boututils and boutdata to remove setuptools_scm hard dependency --- externalpackages/boutdata | 2 +- externalpackages/boututils | 2 +- manual/sphinx/user_docs/installing.rst | 13 ++++++------- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/externalpackages/boutdata b/externalpackages/boutdata index 0b849bd326..211434161d 160000 --- a/externalpackages/boutdata +++ b/externalpackages/boutdata @@ -1 +1 @@ -Subproject commit 0b849bd3263574afd1d468e56a116d2956896fac +Subproject commit 211434161df05a85af4d152df44ed9a8225f170a diff --git a/externalpackages/boututils b/externalpackages/boututils index 1db58c0701..08b572d20a 160000 --- a/externalpackages/boututils +++ b/externalpackages/boututils @@ -1 +1 @@ -Subproject commit 1db58c0701823ca5ddb67c9b29be1643b3c604b6 +Subproject commit 08b572d20a6c693b051f6504c599c539f5a68e82 diff --git a/manual/sphinx/user_docs/installing.rst b/manual/sphinx/user_docs/installing.rst index 1beaf90291..8f28bba0c8 100644 --- a/manual/sphinx/user_docs/installing.rst +++ b/manual/sphinx/user_docs/installing.rst @@ -397,24 +397,24 @@ Python configuration To use Python, you will need the dependencies of the `boututils `__ and `boutdata `__ libraries. The simplest way to get these is -to install the packages, plus additional developer dependencies, with pip:: +to install the packages with pip:: - $ pip install --user boutdata setuptools_scm + $ pip install --user boutdata or conda:: - $ conda install boutdata setuptools_scm + $ conda install boutdata You can also install all the packages directly (see the documentation in the `boututils `__ and `boutdata `__ repos for the most up to date list) using pip:: - $ pip install --user numpy scipy matplotlib sympy netCDF4 h5py future importlib-metadata setuptools_scm + $ pip install --user numpy scipy matplotlib sympy netCDF4 h5py future importlib-metadata or conda:: - $ conda install numpy scipy matplotlib sympy netcdf4 h5py future importlib-metadata setuptools_scm + $ conda install numpy scipy matplotlib sympy netcdf4 h5py future importlib-metadata They may also be available from your Linux system's package manager. @@ -437,8 +437,7 @@ If this doesn’t produce any error messages then Python is configured correctly Note that ``boututils`` and ``boutdata`` are provided by BOUT++ as submodules, so versions compatible with the checked out version of BOUT++ are downloaded into the ``externalpackages`` directory. These are the versions used by the tests run by ``make -check`` even if you have installed ``boututils`` and ``boutdata`` on your system, so you -do need the 'developer' dependencies of the packages (e.g. ``setuptools_scm``). +check`` even if you have installed ``boututils`` and ``boutdata`` on your system. .. _sec-config-idl: From 05cbcbe06b3942866110dbfe862369a1d47889fd Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 5 Jan 2021 22:35:40 +0100 Subject: [PATCH 109/428] Don't install setuptools_scm for CI It is not required any more. --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3fb347f49d..5df1145ea3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -135,7 +135,7 @@ jobs: - name: Install pip packages run: | - ./.pip_install_for_travis.sh 'cython~=0.29' 'netcdf4~=1.5' 'sympy~=1.5' 'gcovr' 'cmake' 'h5py' 'setuptools_scm' + ./.pip_install_for_travis.sh 'cython~=0.29' 'netcdf4~=1.5' 'sympy~=1.5' 'gcovr' 'cmake' 'h5py' # Add the pip install location to the runner's PATH echo ~/.local/bin >> $GITHUB_PATH From 810a69e01df24f6b7d9bd611d6837e8bbeba8af5 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Tue, 22 Oct 2019 16:57:36 +0100 Subject: [PATCH 110/428] Add a method for getting the Timer dictionary and one from printing this. Just convenience methods that can be useful for rough profiling etc. --- include/bout/sys/timer.hxx | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/include/bout/sys/timer.hxx b/include/bout/sys/timer.hxx index dc527939c4..f8acbd810e 100644 --- a/include/bout/sys/timer.hxx +++ b/include/bout/sys/timer.hxx @@ -6,6 +6,8 @@ #include #include +#include "output.hxx" + /*! * Timing class for performance benchmarking and diagnosis * @@ -104,6 +106,29 @@ private: /// Get the elapsed time, reset timing info to zero static double resetTime(timer_info& info); + +public: + static std::map getAllInfo() { return info; } + + static void listAllInfo() { + const auto info = Timer::getAllInfo(); + const std::string headerOne = "Timer name"; + const std::string separator = " | "; + auto max_width = static_cast(headerOne.length()); + + for (const auto &kv: info) { + max_width = std::max(max_width, static_cast(kv.first.length())); + } + + output << "Timer report \n\n"; + output << std::setw(max_width) << headerOne << separator << "Time (s)" << "\n"; + output << std::setw(max_width) << std::string(max_width,'-') << separator << std::string(max_width,'-') << "\n"; + for (const auto &kv: info) { + output << std::setw(max_width) << kv.first << " | " << kv.second.time.count() << "\n"; + } + output << "\n"; + }; + }; #endif // __TIMER_H__ From 9977750f0c0af12ef9c91233fa5b848070f58edd Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Tue, 22 Oct 2019 19:40:13 +0100 Subject: [PATCH 111/428] Add missing include --- include/bout/sys/timer.hxx | 1 + 1 file changed, 1 insertion(+) diff --git a/include/bout/sys/timer.hxx b/include/bout/sys/timer.hxx index f8acbd810e..49584a27b2 100644 --- a/include/bout/sys/timer.hxx +++ b/include/bout/sys/timer.hxx @@ -5,6 +5,7 @@ #include #include #include +#include #include "output.hxx" From 7f001e85dff3efa4fc7061f2c9758c8a94044492 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Wed, 23 Oct 2019 13:54:01 +0100 Subject: [PATCH 112/428] Left align timer names for easier viewing --- include/bout/sys/timer.hxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/bout/sys/timer.hxx b/include/bout/sys/timer.hxx index 49584a27b2..526070ebe5 100644 --- a/include/bout/sys/timer.hxx +++ b/include/bout/sys/timer.hxx @@ -112,7 +112,6 @@ public: static std::map getAllInfo() { return info; } static void listAllInfo() { - const auto info = Timer::getAllInfo(); const std::string headerOne = "Timer name"; const std::string separator = " | "; auto max_width = static_cast(headerOne.length()); @@ -125,7 +124,8 @@ public: output << std::setw(max_width) << headerOne << separator << "Time (s)" << "\n"; output << std::setw(max_width) << std::string(max_width,'-') << separator << std::string(max_width,'-') << "\n"; for (const auto &kv: info) { - output << std::setw(max_width) << kv.first << " | " << kv.second.time.count() << "\n"; + output << std::left << std::setw(max_width) << kv.first << " | " << kv.second.time.count() + << " ("< Date: Wed, 23 Oct 2019 13:54:40 +0100 Subject: [PATCH 113/428] Add an AUTO_TIME macro --- include/bout/sys/timer.hxx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/bout/sys/timer.hxx b/include/bout/sys/timer.hxx index 526070ebe5..519ab61469 100644 --- a/include/bout/sys/timer.hxx +++ b/include/bout/sys/timer.hxx @@ -8,6 +8,7 @@ #include #include "output.hxx" +#include "msg_stack.hxx" /*! * Timing class for performance benchmarking and diagnosis @@ -132,4 +133,5 @@ public: }; +#define AUTO_TIME() Timer CONCATENATE(time_,__LINE__)(__thefunc__) #endif // __TIMER_H__ From 40ba9565f932ade700751d6b068eb2b4c7f7dc3f Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Wed, 23 Oct 2019 13:54:58 +0100 Subject: [PATCH 114/428] Keep track of how many times each timer is created --- include/bout/sys/timer.hxx | 1 + src/sys/timer.cxx | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/include/bout/sys/timer.hxx b/include/bout/sys/timer.hxx index 519ab61469..a588019695 100644 --- a/include/bout/sys/timer.hxx +++ b/include/bout/sys/timer.hxx @@ -92,6 +92,7 @@ private: clock_type::time_point started; ///< Start time unsigned int counter; ///< Number of Timer objects associated with this /// timer_info + unsigned int ntimes; }; /// Store of existing timing info objects diff --git a/src/sys/timer.cxx b/src/sys/timer.cxx index a98f45c7ad..3d2cd0bf7c 100644 --- a/src/sys/timer.cxx +++ b/src/sys/timer.cxx @@ -4,6 +4,7 @@ Timer::Timer() : timing(getInfo("")) { if (timing.counter == 0) { timing.started = clock_type::now(); timing.running = true; + timing.ntimes++; } timing.counter += 1; } @@ -12,6 +13,7 @@ Timer::Timer(const std::string& label) : timing(getInfo(label)) { if (timing.counter == 0) { timing.started = clock_type::now(); timing.running = true; + timing.ntimes++; } timing.counter += 1; } @@ -33,7 +35,7 @@ Timer::timer_info& Timer::getInfo(const std::string& label) { auto it = info.find(label); if (it == info.end()) { auto timer = info.emplace( - label, timer_info{seconds{0}, false, clock_type::now(), 0}); + label, timer_info{seconds{0}, false, clock_type::now(), 0, 0}); return timer.first->second; } return it->second; From 0bc3ea405f4af80f8cb42ee21c2083031d584a34 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 8 Jan 2020 15:06:06 +0000 Subject: [PATCH 115/428] Move Timer::listAllInfo definition to implementation file --- include/bout/sys/timer.hxx | 22 +++------------------- src/sys/timer.cxx | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/include/bout/sys/timer.hxx b/include/bout/sys/timer.hxx index a588019695..693f143d29 100644 --- a/include/bout/sys/timer.hxx +++ b/include/bout/sys/timer.hxx @@ -111,27 +111,11 @@ private: static double resetTime(timer_info& info); public: + /// Return the map of all the individual timers static std::map getAllInfo() { return info; } - static void listAllInfo() { - const std::string headerOne = "Timer name"; - const std::string separator = " | "; - auto max_width = static_cast(headerOne.length()); - - for (const auto &kv: info) { - max_width = std::max(max_width, static_cast(kv.first.length())); - } - - output << "Timer report \n\n"; - output << std::setw(max_width) << headerOne << separator << "Time (s)" << "\n"; - output << std::setw(max_width) << std::string(max_width,'-') << separator << std::string(max_width,'-') << "\n"; - for (const auto &kv: info) { - output << std::left << std::setw(max_width) << kv.first << " | " << kv.second.time.count() - << " ("<(headerOne.length()); + + for (const auto& kv : info) { + max_width = std::max(max_width, static_cast(kv.first.length())); + } + + output << "Timer report \n\n"; + output << std::setw(max_width) << headerOne << separator << "Time (s)" + << "\n"; + output << std::setw(max_width) << std::string(max_width, '-') << separator + << std::string(max_width, '-') << "\n"; + for (const auto& kv : info) { + output << std::left << std::setw(max_width) << kv.first << " | " + << kv.second.time.count() << " (" << kv.second.ntimes << ")\n"; + } + output << "\n"; +} From d0fe554dced8458923d7ad48601b15f16dc2c97c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 8 Jan 2020 17:17:53 +0000 Subject: [PATCH 116/428] Add option to write all timers at end of simulation --- src/bout++.cxx | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/bout++.cxx b/src/bout++.cxx index 9fe0eaa272..20890236c2 100644 --- a/src/bout++.cxx +++ b/src/bout++.cxx @@ -594,6 +594,10 @@ int BoutFinalise(bool write_settings) { } } + if (Options::root()["write_final_timings"].withDefault(false)) { + Timer::listAllInfo(); + } + // Delete the mesh delete bout::globals::mesh; From ad9b3a6cb2f6356b87cdf4c14df998f660d2eeea Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 9 Jan 2020 16:14:06 +0000 Subject: [PATCH 117/428] Keep track of total time in Timers --- include/bout/sys/timer.hxx | 14 ++++++-- src/bout++.cxx | 2 ++ src/sys/timer.cxx | 51 ++++++++++++++++---------- tests/unit/sys/test_timer.cxx | 67 +++++++++++++++++++++++++++++++++++ 4 files changed, 113 insertions(+), 21 deletions(-) diff --git a/include/bout/sys/timer.hxx b/include/bout/sys/timer.hxx index 693f143d29..542ab7d988 100644 --- a/include/bout/sys/timer.hxx +++ b/include/bout/sys/timer.hxx @@ -64,6 +64,9 @@ public: */ double getTime() { return getTime(timing); } + /// Get the total time in seconds since the very first initialisation + double getTotalTime() { return getTotalTime(timing); } + /*! * Get the time in seconds, reset timer to zero */ @@ -74,6 +77,9 @@ public: */ static double getTime(const std::string& label) { return getTime(getInfo(label)); } + /// Total time elapsed since the very first initialisation + static double getTotalTime(const std::string& label) { return getTotalTime(getInfo(label)); } + /*! * The total time in seconds, resets the timer to zero */ @@ -87,12 +93,13 @@ public: private: /// Structure to contain timing information struct timer_info { - seconds time; ///< Total time + seconds time; ///< Time of last duration/since last reset + seconds total_time; ///< Total time since initial creation bool running; ///< Is the timer currently running? clock_type::time_point started; ///< Start time unsigned int counter; ///< Number of Timer objects associated with this /// timer_info - unsigned int ntimes; + unsigned int hits; ///< Number of times this Timer was hit }; /// Store of existing timing info objects @@ -107,6 +114,9 @@ private: /// Get the elapsed time in seconds for timing info static double getTime(const timer_info& info); + /// Get the total elapsed time in seconds since the first initialisation + static double getTotalTime(const timer_info& info); + /// Get the elapsed time, reset timing info to zero static double resetTime(timer_info& info); diff --git a/src/bout++.cxx b/src/bout++.cxx index 20890236c2..8d63e6efe2 100644 --- a/src/bout++.cxx +++ b/src/bout++.cxx @@ -595,7 +595,9 @@ int BoutFinalise(bool write_settings) { } if (Options::root()["write_final_timings"].withDefault(false)) { + output.write("\nTimer report \n\n"); Timer::listAllInfo(); + output.write("\n"); } // Delete the mesh diff --git a/src/sys/timer.cxx b/src/sys/timer.cxx index 9164fcbf27..cc9f83e8ec 100644 --- a/src/sys/timer.cxx +++ b/src/sys/timer.cxx @@ -4,7 +4,7 @@ Timer::Timer() : timing(getInfo("")) { if (timing.counter == 0) { timing.started = clock_type::now(); timing.running = true; - timing.ntimes++; + ++timing.hits; } timing.counter += 1; } @@ -13,7 +13,7 @@ Timer::Timer(const std::string& label) : timing(getInfo(label)) { if (timing.counter == 0) { timing.started = clock_type::now(); timing.running = true; - timing.ntimes++; + ++timing.hits; } timing.counter += 1; } @@ -21,9 +21,10 @@ Timer::Timer(const std::string& label) : timing(getInfo(label)) { Timer::~Timer() { timing.counter -= 1; if (timing.counter == 0) { - auto finished = clock_type::now(); + const auto elapsed = clock_type::now() - timing.started; timing.running = false; - timing.time += finished - timing.started; + timing.time += elapsed; + timing.total_time += elapsed; } } @@ -35,7 +36,7 @@ Timer::timer_info& Timer::getInfo(const std::string& label) { auto it = info.find(label); if (it == info.end()) { auto timer = info.emplace( - label, timer_info{seconds{0}, false, clock_type::now(), 0, 0}); + label, timer_info{seconds{0}, seconds{0}, false, clock_type::now(), 0, 0}); return timer.first->second; } return it->second; @@ -48,34 +49,46 @@ double Timer::getTime(const Timer::timer_info& info) { return seconds{info.time}.count(); } +double Timer::getTotalTime(const Timer::timer_info& info) { + if (info.running) { + return seconds{info.total_time + (clock_type::now() - info.started)}.count(); + } + return seconds{info.total_time}.count(); +} + double Timer::resetTime(Timer::timer_info& info) { - auto val = info.time; + auto current_duration = info.time; info.time = clock_type::duration{0}; if (info.running) { - auto cur_time = clock_type::now(); - val += cur_time - info.started; - info.started = cur_time; + const auto current_time = clock_type::now(); + const auto elapsed = current_time - info.started; + current_duration += elapsed; + info.started = current_time; + info.total_time += elapsed; } - return seconds{val}.count(); + return seconds{current_duration}.count(); } void Timer::listAllInfo() { const std::string headerOne = "Timer name"; + const std::string headerTime = "Current time (s)"; + const std::string headerHits = "Hits"; + const std::string headerTotal = "Total time (s)"; const std::string separator = " | "; - auto max_width = static_cast(headerOne.length()); - for (const auto& kv : info) { - max_width = std::max(max_width, static_cast(kv.first.length())); - } + auto max_width = static_cast(headerTime.length()); output << "Timer report \n\n"; - output << std::setw(max_width) << headerOne << separator << "Time (s)" - << "\n"; + output << std::setw(max_width) << headerOne << separator << headerTime + << separator << headerHits << separator << headerTotal << "\n"; output << std::setw(max_width) << std::string(max_width, '-') << separator - << std::string(max_width, '-') << "\n"; + << std::string(max_width, '-') << separator + << std::string(max_width, '-') << separator + << std::string(max_width, '-') << "\n"; for (const auto& kv : info) { - output << std::left << std::setw(max_width) << kv.first << " | " - << kv.second.time.count() << " (" << kv.second.ntimes << ")\n"; + output << std::left << std::setw(max_width) << kv.first << separator + << kv.second.time.count() << separator << << kv.second.hits + << separator << kv.second.total_time.count() << ")\n"; } output << "\n"; } diff --git a/tests/unit/sys/test_timer.cxx b/tests/unit/sys/test_timer.cxx index 9d118c8d35..5fd5a9685d 100644 --- a/tests/unit/sys/test_timer.cxx +++ b/tests/unit/sys/test_timer.cxx @@ -1,4 +1,5 @@ #include "gtest/gtest.h" +#include "gmock/gmock.h" #include "bout/sys/timer.hxx" @@ -167,6 +168,39 @@ TEST(TimerTest, ResetTimeLabelOutOfScope) { bout::testing::TimerTolerance); } +TEST(TimerTest, GetTotalTime) { + const auto start = Timer::clock_type::now(); + Timer timer{"GetTotalTime test"}; + + std::this_thread::sleep_for(bout::testing::sleep_length); + + timer.resetTime(); + + std::this_thread::sleep_for(bout::testing::sleep_length); + + const auto end = Timer::clock_type::now(); + const Timer::seconds elapsed = end - start; + + EXPECT_NEAR(timer.getTotalTime(), elapsed.count(), bout::testing::TimerTolerance); +} + +TEST(TimerTest, GetTotalTimeFromLabel) { + const auto start = Timer::clock_type::now(); + Timer timer{"GetTotalTimeFromLabel test"}; + + std::this_thread::sleep_for(bout::testing::sleep_length); + + timer.resetTime(); + + std::this_thread::sleep_for(bout::testing::sleep_length); + + const auto end = Timer::clock_type::now(); + const Timer::seconds elapsed = end - start; + + EXPECT_NEAR(Timer::getTotalTime("GetTotalTimeFromLabel test"), elapsed.count(), + bout::testing::TimerTolerance); +} + TEST(TimerTest, Cleanup) { { Timer timer{"Cleanup test"}; @@ -190,3 +224,36 @@ TEST(TimerTest, Cleanup) { EXPECT_NEAR(Timer::getTime("Cleanup test"), elapsed.count(), bout::testing::TimerTolerance); } + +TEST(TimerTest, ListAllInfo) { + Timer::cleanup(); + + { + Timer time1{"one"}; + std::this_thread::sleep_for(bout::testing::sleep_length); + { + Timer time2{"two"}; + std::this_thread::sleep_for(bout::testing::sleep_length); + } + { + Timer time2{"two"}; + std::this_thread::sleep_for(bout::testing::sleep_length); + } + Timer time_long{"18 characters long"}; + } + + Timer::resetTime("two"); + + std::streambuf* old_cout_rdbuf(std::cout.rdbuf()); + std::stringstream cout_capture; + std::cout.rdbuf(cout_capture.rdbuf()); + + Timer::listAllInfo(); + + std::cout.rdbuf(old_cout_rdbuf); + + using namespace ::testing; + EXPECT_THAT(cout_capture.str(), HasSubstr("Timer name |")); + EXPECT_THAT(cout_capture.str(), ContainsRegex("one *| 0\\.\\d+ | 0\\.\\d+ | 1")); + EXPECT_THAT(cout_capture.str(), ContainsRegex("two *| 0 *| 0\\.\\d+ | 2")); +} From fc32a05432c8e0e828e1fb94c673d9c3f8e039be Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 9 Jan 2020 17:38:06 +0000 Subject: [PATCH 118/428] Change the timer report columns to include mean time per hit Also: - remove the "current time" column - add comments --- src/sys/timer.cxx | 17 +++++++++-------- tests/unit/sys/test_timer.cxx | 4 ++-- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/sys/timer.cxx b/src/sys/timer.cxx index cc9f83e8ec..76c3607554 100644 --- a/src/sys/timer.cxx +++ b/src/sys/timer.cxx @@ -70,25 +70,26 @@ double Timer::resetTime(Timer::timer_info& info) { } void Timer::listAllInfo() { - const std::string headerOne = "Timer name"; - const std::string headerTime = "Current time (s)"; + const std::string headerName = "Timer name"; + const std::string headerTime = "Total time (s)"; const std::string headerHits = "Hits"; - const std::string headerTotal = "Total time (s)"; + const std::string headerMean = "Mean time/hit (s)"; const std::string separator = " | "; - auto max_width = static_cast(headerTime.length()); + auto max_width = static_cast(headerMean.length()); output << "Timer report \n\n"; - output << std::setw(max_width) << headerOne << separator << headerTime - << separator << headerHits << separator << headerTotal << "\n"; + output << std::setw(max_width) << headerName << separator << headerTime + << separator << headerHits << separator << headerMean << "\n"; output << std::setw(max_width) << std::string(max_width, '-') << separator << std::string(max_width, '-') << separator << std::string(max_width, '-') << separator << std::string(max_width, '-') << "\n"; for (const auto& kv : info) { output << std::left << std::setw(max_width) << kv.first << separator - << kv.second.time.count() << separator << << kv.second.hits - << separator << kv.second.total_time.count() << ")\n"; + << kv.second.total.time.count() << separator << << kv.second.hits + << separator << kv.second.total_time.count() / kv.second.hits + << "\n"; } output << "\n"; } diff --git a/tests/unit/sys/test_timer.cxx b/tests/unit/sys/test_timer.cxx index 5fd5a9685d..9e11e2aa4f 100644 --- a/tests/unit/sys/test_timer.cxx +++ b/tests/unit/sys/test_timer.cxx @@ -254,6 +254,6 @@ TEST(TimerTest, ListAllInfo) { using namespace ::testing; EXPECT_THAT(cout_capture.str(), HasSubstr("Timer name |")); - EXPECT_THAT(cout_capture.str(), ContainsRegex("one *| 0\\.\\d+ | 0\\.\\d+ | 1")); - EXPECT_THAT(cout_capture.str(), ContainsRegex("two *| 0 *| 0\\.\\d+ | 2")); + EXPECT_THAT(cout_capture.str(), ContainsRegex("one *| 0\\.\\d+ | 1 | 0\\.\\d+")); + EXPECT_THAT(cout_capture.str(), ContainsRegex("two *| 0 * | 2 | 0\\.\\d+")); } From 576959b478afc5ce27b6c100da97fe8a5775682a Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 17 Jan 2020 12:05:37 +0000 Subject: [PATCH 119/428] Sort the time-report table by largest total time --- src/sys/timer.cxx | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/sys/timer.cxx b/src/sys/timer.cxx index 76c3607554..02789a0e95 100644 --- a/src/sys/timer.cxx +++ b/src/sys/timer.cxx @@ -78,6 +78,17 @@ void Timer::listAllInfo() { auto max_width = static_cast(headerMean.length()); + // To sort the map by the total time, we need to copy the key-value + // pairs into another container that we can sort ourselves + std::vector> sorted_info; + sorted_info.reserve(info.size()); + std::transform(begin(info), end(info), std::back_inserter(sorted_info), + [](const auto& it) { return it; }); + // Sort so that the largest total time is first + std::sort(begin(sorted_info), end(sorted_info), [](const auto& a, const auto& b) { + return a.second.total_time > b.second.total_time; + }); + output << "Timer report \n\n"; output << std::setw(max_width) << headerName << separator << headerTime << separator << headerHits << separator << headerMean << "\n"; @@ -85,7 +96,7 @@ void Timer::listAllInfo() { << std::string(max_width, '-') << separator << std::string(max_width, '-') << separator << std::string(max_width, '-') << "\n"; - for (const auto& kv : info) { + for (const auto& kv : sorted_info) { output << std::left << std::setw(max_width) << kv.first << separator << kv.second.total.time.count() << separator << << kv.second.hits << separator << kv.second.total_time.count() / kv.second.hits From 9bf893f53b815dee84ec37b9c3e71749ba15eb75 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 17 Jan 2020 13:16:03 +0000 Subject: [PATCH 120/428] Change time report option name Use a subsection to avoid cutting off future optional features --- src/bout++.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bout++.cxx b/src/bout++.cxx index 8d63e6efe2..826afa9042 100644 --- a/src/bout++.cxx +++ b/src/bout++.cxx @@ -594,7 +594,7 @@ int BoutFinalise(bool write_settings) { } } - if (Options::root()["write_final_timings"].withDefault(false)) { + if (Options::root()["time_report:show"].withDefault(false)) { output.write("\nTimer report \n\n"); Timer::listAllInfo(); output.write("\n"); From f4c68cf156583266688bb432c7ad35493941f28d Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 17 Jan 2020 13:22:45 +0000 Subject: [PATCH 121/428] Change name of Timer method to print timer report table --- include/bout/sys/timer.hxx | 8 ++++++-- src/bout++.cxx | 2 +- src/sys/timer.cxx | 2 +- tests/unit/sys/test_timer.cxx | 2 +- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/include/bout/sys/timer.hxx b/include/bout/sys/timer.hxx index 542ab7d988..9875af74fa 100644 --- a/include/bout/sys/timer.hxx +++ b/include/bout/sys/timer.hxx @@ -124,8 +124,12 @@ public: /// Return the map of all the individual timers static std::map getAllInfo() { return info; } - /// Print all the timers - static void listAllInfo(); + /// Print a table listing all known timers to `output` + /// + /// Table is sorted by descending largest total time and has columns + /// for total time, percentage of largest total time, total number + /// of hits, and mean time per hit + static void printTimeReport(); }; #define AUTO_TIME() Timer CONCATENATE(time_,__LINE__)(__thefunc__) diff --git a/src/bout++.cxx b/src/bout++.cxx index 826afa9042..ff117907fd 100644 --- a/src/bout++.cxx +++ b/src/bout++.cxx @@ -596,7 +596,7 @@ int BoutFinalise(bool write_settings) { if (Options::root()["time_report:show"].withDefault(false)) { output.write("\nTimer report \n\n"); - Timer::listAllInfo(); + Timer::printTimeReport(); output.write("\n"); } diff --git a/src/sys/timer.cxx b/src/sys/timer.cxx index 02789a0e95..b98a73a6e7 100644 --- a/src/sys/timer.cxx +++ b/src/sys/timer.cxx @@ -69,7 +69,7 @@ double Timer::resetTime(Timer::timer_info& info) { return seconds{current_duration}.count(); } -void Timer::listAllInfo() { +void Timer::printTimeReport() { const std::string headerName = "Timer name"; const std::string headerTime = "Total time (s)"; const std::string headerHits = "Hits"; diff --git a/tests/unit/sys/test_timer.cxx b/tests/unit/sys/test_timer.cxx index 9e11e2aa4f..44ed102f9f 100644 --- a/tests/unit/sys/test_timer.cxx +++ b/tests/unit/sys/test_timer.cxx @@ -248,7 +248,7 @@ TEST(TimerTest, ListAllInfo) { std::stringstream cout_capture; std::cout.rdbuf(cout_capture.rdbuf()); - Timer::listAllInfo(); + Timer::printTimeReport(); std::cout.rdbuf(old_cout_rdbuf); From 570fed00c322c069a7e156b816b5af1449264473 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 21 Jan 2021 15:40:46 +0000 Subject: [PATCH 122/428] Fix typo in Timer --- src/sys/timer.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sys/timer.cxx b/src/sys/timer.cxx index b98a73a6e7..346bea5421 100644 --- a/src/sys/timer.cxx +++ b/src/sys/timer.cxx @@ -98,7 +98,7 @@ void Timer::printTimeReport() { << std::string(max_width, '-') << "\n"; for (const auto& kv : sorted_info) { output << std::left << std::setw(max_width) << kv.first << separator - << kv.second.total.time.count() << separator << << kv.second.hits + << kv.second.total_time.count() << separator << << kv.second.hits << separator << kv.second.total_time.count() / kv.second.hits << "\n"; } From 7fb72e68415e0afdc57e64300cfc5ede8467951b Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 21 Jan 2021 15:51:43 +0000 Subject: [PATCH 123/428] Fix typo in Timer --- src/sys/timer.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sys/timer.cxx b/src/sys/timer.cxx index 346bea5421..1685b3a124 100644 --- a/src/sys/timer.cxx +++ b/src/sys/timer.cxx @@ -98,7 +98,7 @@ void Timer::printTimeReport() { << std::string(max_width, '-') << "\n"; for (const auto& kv : sorted_info) { output << std::left << std::setw(max_width) << kv.first << separator - << kv.second.total_time.count() << separator << << kv.second.hits + << kv.second.total_time.count() << separator << kv.second.hits << separator << kv.second.total_time.count() / kv.second.hits << "\n"; } From fbf268a63d6ee8ebef644a0afc2c7daa7fb5ceb1 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 21 Jan 2021 16:35:53 +0000 Subject: [PATCH 124/428] Include gmock in tests/unit/makefile --- tests/unit/makefile | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tests/unit/makefile b/tests/unit/makefile index 6bc5bd1fe5..0615702306 100644 --- a/tests/unit/makefile +++ b/tests/unit/makefile @@ -16,6 +16,7 @@ GTEST_SOURCES = $(GTEST_DIR)/src/gtest-all.cc # Set Google Test's header directory as a system directory, such that # the compiler doesn't generate warnings in Google Test headers. CXXFLAGS += -isystem ${GTEST_DIR}/include -I${GTEST_DIR} -I${BOUT_TEST_DIR} +CXXFLAGS += -isystem ${GMOCK_DIR}/include -I${GMOCK_DIR} # Flags passed to the C++ compiler. CXXFLAGS += -g -Wall -Wextra -pthread @@ -60,6 +61,11 @@ gtest-all.o : $(GTEST_SENTINEL) @$(CXX) $(CPPFLAGS) -I$(GTEST_DIR) $(BOUT_FLAGS) -c \ $(GTEST_DIR)/src/gtest-all.cc +gmock-all.o : $(GTEST_SENTINEL) + @echo " Compiling" $@ + @$(CXX) $(CPPFLAGS) -I$(GTEST_DIR) $(BOUT_FLAGS) -c \ + $(GMOCK_DIR)/src/gmock-all.cc + bout_test_main.o : $(GTEST_SENTINEL) $(BOUT_TEST_DIR)/bout_test_main.cxx @echo " Compiling" $@ @$(CXX) $(CPPFLAGS) -I$(GTEST_DIR) $(BOUT_INCLUDE) $(BOUT_FLAGS) -c $(@:.o=.cxx) @@ -68,7 +74,11 @@ gtest.a : gtest-all.o @echo " Linking" $@ @$(AR) $(ARFLAGS) $@ $^ -bout_test_main.a : gtest-all.o bout_test_main.o +gmock.a : gmock-all.o + @echo " Linking" $@ + @$(AR) $(ARFLAGS) $@ $^ + +bout_test_main.a : gtest-all.o gmock-all.o bout_test_main.o @echo " Linking" $@ @$(AR) $(ARFLAGS) $@ $^ From 5ce4738d3f5d2c1039b94deaa022a4eb84804f71 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 21 Jan 2021 16:38:19 +0000 Subject: [PATCH 125/428] Fix expected value in TimerTest::ListAllInfo --- tests/unit/sys/test_timer.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/sys/test_timer.cxx b/tests/unit/sys/test_timer.cxx index 44ed102f9f..c9b46e0eba 100644 --- a/tests/unit/sys/test_timer.cxx +++ b/tests/unit/sys/test_timer.cxx @@ -253,7 +253,7 @@ TEST(TimerTest, ListAllInfo) { std::cout.rdbuf(old_cout_rdbuf); using namespace ::testing; - EXPECT_THAT(cout_capture.str(), HasSubstr("Timer name |")); + EXPECT_THAT(cout_capture.str(), HasSubstr("Timer name |")); EXPECT_THAT(cout_capture.str(), ContainsRegex("one *| 0\\.\\d+ | 1 | 0\\.\\d+")); EXPECT_THAT(cout_capture.str(), ContainsRegex("two *| 0 * | 2 | 0\\.\\d+")); } From 3ff05d0ded4cc90e24058a809a29acf988055ef2 Mon Sep 17 00:00:00 2001 From: Ben Dudson Date: Fri, 20 Nov 2020 17:27:58 +0000 Subject: [PATCH 126/428] Generate random run ID, track restarts Improve data provenance tracking, so that sets of runs can be linked to each other, and the run used to generate data identified. Each time the solver is run, generate a random number which is broadcast to all processors. This is stored as "run_id" in: - log files - BOUT.settings, though at the moment only if the run finishes - restart files - dump files When a simulation is restarted, the ID of the run it started from is also recorded as "run_restart_from". If restart files are used which don't have a run_id, then this will either cause the run to fail, or set run_id to 0. The `run_restart_from` ID therefore has two special values: - 1 means no restart, the run was started from scratch - 0 means restart from unknown run_id (missing -> run_id set to 0) There may well be a better way to handle missing run_id values in restart files. --- include/bout/solver.hxx | 6 ++++++ src/solver/solver.cxx | 24 ++++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index 6f1396f969..e093921ebe 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -409,6 +409,12 @@ protected: /// Current iteration (output time-step) number int iteration{0}; + /// Randomly generated run ID + int run_id {1}; // 0 = unknown restart, 1 = not restarted + /// The run from which this was restarted. + /// Set to zero if no restart + int run_restart_from {1}; + /// Run the user's RHS function int run_rhs(BoutReal t); /// Calculate only the convective parts diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 0d1bd1053c..da74f3a2d7 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -453,8 +453,28 @@ int Solver::solve(int NOUT, BoutReal TIMESTEP) { throw BoutException(_("Failed to initialise solver-> Aborting\n")); } + // Set the run ID to a random number + run_restart_from = run_id; // Restarting from the previous run ID + + if (MYPE == 0) { + srand(static_cast(time(nullptr))); + do { + run_id = rand(); // Different each time the simulation is run + } while((run_id == 0) || (run_id == 1)); // Ensure that run_id != 0 or 1 + } + MPI_Bcast(&run_id, 1, MPI_INT, 0, BoutComm::get()); // All ranks have same run_id + + // Put the run ID into the options tree + // Forcing in case the value has been previously set + Options::root()["run"]["run_id"].force(run_id, "Solver"); + Options::root()["run"]["run_restart_from"].force(run_restart_from, "Solver"); + /// Run the solver output_info.write(_("Running simulation\n\n")); + output_info.write("Run ID: {:d}\n", run_id); + if (run_restart_from != 0) { + output_info.write("Restarting from ID: {:d}\n", run_restart_from); + } time_t start_time = time(nullptr); output_progress.write(_("\nRun started at : %s\n"), toString(start_time).c_str()); @@ -543,6 +563,10 @@ void Solver::outputVars(Datafile &outputfile, bool save_repeat) { outputfile.addOnce(simtime, "tt"); outputfile.addOnce(iteration, "hist_hi"); + // Add run information + outputfile.addOnce(run_id, "run_id"); + outputfile.addOnce(run_restart_from, "run_restart_from"); + // Add 2D and 3D evolving fields to output file for(const auto& f : f2d) { // Add to dump file (appending) From 43ab7e36bd2c75f9dac60246ad88f6f732866725 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 2 Jan 2021 12:14:19 +0000 Subject: [PATCH 127/428] UUID library, based on github.com/mariusbancila/stduuid https://github.com/mariusbancila/stduuid is a header-only library for creating UUIDs, but requires C++20 features. This commit copies the header, but hacked to work with C++11. When BOUT++ requires C++20, we should replace this header with mariusbancila/stduuid in a submodule. --- include/bout/sys/uuid.h | 969 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 969 insertions(+) create mode 100644 include/bout/sys/uuid.h diff --git a/include/bout/sys/uuid.h b/include/bout/sys/uuid.h new file mode 100644 index 0000000000..d45fa4801c --- /dev/null +++ b/include/bout/sys/uuid.h @@ -0,0 +1,969 @@ +// Based on stduuid by Marius Bancila: https://github.com/mariusbancila/stduuid +// Originially copied from version at commit 5890c94bfac2f00f22a1c1481e5839c51d6a6f3f +// This version hacked by J. Omotani to make it C++14 compatible: +// - remove uses of span +// - brace-initialise std::atomic_short clock_sequence to avoid deleted +// move-constructor error +// - remove use of std::optional. Throw exceptions instead. +// - replace std::bytes with char +// - replace 'if constexpr' with plain 'if' +// - replace 'is_same_v<>' with 'is_same<>::value' +// - replace std::copy with strncpy in uuid_name_generator::reset() +// +// Copyright (c) 2017 +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _WIN32 +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#ifndef NOMINMAX +#define NOMINMAX +#endif + +#ifdef UUID_SYSTEM_GENERATOR +#include +#endif + +#include +#include +#include +#pragma comment(lib, "IPHLPAPI.lib") + +#elif defined(__linux__) || defined(__unix__) + +#ifdef UUID_SYSTEM_GENERATOR +#include +#endif + +#elif defined(__APPLE__) + +#ifdef UUID_SYSTEM_GENERATOR +#include +#endif + +#endif + +namespace uuids +{ + + namespace detail + { + template + constexpr inline unsigned char hex2char(TChar const ch) + { + if (ch >= static_cast('0') && ch <= static_cast('9')) + return ch - static_cast('0'); + if (ch >= static_cast('a') && ch <= static_cast('f')) + return 10 + ch - static_cast('a'); + if (ch >= static_cast('A') && ch <= static_cast('F')) + return 10 + ch - static_cast('A'); + return 0; + } + + template + constexpr inline bool is_hex(TChar const ch) + { + return + (ch >= static_cast('0') && ch <= static_cast('9')) || + (ch >= static_cast('a') && ch <= static_cast('f')) || + (ch >= static_cast('A') && ch <= static_cast('F')); + } + + template + constexpr inline unsigned char hexpair2char(TChar const a, TChar const b) + { + return (hex2char(a) << 4) | hex2char(b); + } + + class sha1 + { + public: + using digest32_t = uint32_t[5]; + using digest8_t = uint8_t[20]; + + static constexpr unsigned int block_bytes = 64; + + inline static uint32_t left_rotate(uint32_t value, size_t const count) + { + return (value << count) ^ (value >> (32 - count)); + } + + sha1() { reset(); } + + void reset() + { + m_digest[0] = 0x67452301; + m_digest[1] = 0xEFCDAB89; + m_digest[2] = 0x98BADCFE; + m_digest[3] = 0x10325476; + m_digest[4] = 0xC3D2E1F0; + m_blockByteIndex = 0; + m_byteCount = 0; + } + + void process_byte(uint8_t octet) + { + this->m_block[this->m_blockByteIndex++] = octet; + ++this->m_byteCount; + if (m_blockByteIndex == block_bytes) + { + this->m_blockByteIndex = 0; + process_block(); + } + } + + void process_block(void const * const start, void const * const end) + { + const uint8_t* begin = static_cast(start); + const uint8_t* finish = static_cast(end); + while (begin != finish) + { + process_byte(*begin); + begin++; + } + } + + void process_bytes(void const * const data, size_t const len) + { + const uint8_t* block = static_cast(data); + process_block(block, block + len); + } + + uint32_t const * get_digest(digest32_t digest) + { + size_t const bitCount = this->m_byteCount * 8; + process_byte(0x80); + if (this->m_blockByteIndex > 56) { + while (m_blockByteIndex != 0) { + process_byte(0); + } + while (m_blockByteIndex < 56) { + process_byte(0); + } + } + else { + while (m_blockByteIndex < 56) { + process_byte(0); + } + } + process_byte(0); + process_byte(0); + process_byte(0); + process_byte(0); + process_byte(static_cast((bitCount >> 24) & 0xFF)); + process_byte(static_cast((bitCount >> 16) & 0xFF)); + process_byte(static_cast((bitCount >> 8) & 0xFF)); + process_byte(static_cast((bitCount) & 0xFF)); + + memcpy(digest, m_digest, 5 * sizeof(uint32_t)); + return digest; + } + + uint8_t const * get_digest_bytes(digest8_t digest) + { + digest32_t d32; + get_digest(d32); + size_t di = 0; + digest[di++] = (uint8_t)(d32[0] >> 24); + digest[di++] = (uint8_t)(d32[0] >> 16); + digest[di++] = (uint8_t)(d32[0] >> 8); + digest[di++] = (uint8_t)(d32[0] >> 0); + + digest[di++] = (uint8_t)(d32[1] >> 24); + digest[di++] = (uint8_t)(d32[1] >> 16); + digest[di++] = (uint8_t)(d32[1] >> 8); + digest[di++] = (uint8_t)(d32[1] >> 0); + + digest[di++] = (uint8_t)(d32[2] >> 24); + digest[di++] = (uint8_t)(d32[2] >> 16); + digest[di++] = (uint8_t)(d32[2] >> 8); + digest[di++] = (uint8_t)(d32[2] >> 0); + + digest[di++] = (uint8_t)(d32[3] >> 24); + digest[di++] = (uint8_t)(d32[3] >> 16); + digest[di++] = (uint8_t)(d32[3] >> 8); + digest[di++] = (uint8_t)(d32[3] >> 0); + + digest[di++] = (uint8_t)(d32[4] >> 24); + digest[di++] = (uint8_t)(d32[4] >> 16); + digest[di++] = (uint8_t)(d32[4] >> 8); + digest[di++] = (uint8_t)(d32[4] >> 0); + + return digest; + } + + private: + void process_block() + { + uint32_t w[80]; + for (size_t i = 0; i < 16; i++) { + w[i] = (m_block[i * 4 + 0] << 24); + w[i] |= (m_block[i * 4 + 1] << 16); + w[i] |= (m_block[i * 4 + 2] << 8); + w[i] |= (m_block[i * 4 + 3]); + } + for (size_t i = 16; i < 80; i++) { + w[i] = left_rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1); + } + + uint32_t a = m_digest[0]; + uint32_t b = m_digest[1]; + uint32_t c = m_digest[2]; + uint32_t d = m_digest[3]; + uint32_t e = m_digest[4]; + + for (std::size_t i = 0; i < 80; ++i) + { + uint32_t f = 0; + uint32_t k = 0; + + if (i < 20) { + f = (b & c) | (~b & d); + k = 0x5A827999; + } + else if (i < 40) { + f = b ^ c ^ d; + k = 0x6ED9EBA1; + } + else if (i < 60) { + f = (b & c) | (b & d) | (c & d); + k = 0x8F1BBCDC; + } + else { + f = b ^ c ^ d; + k = 0xCA62C1D6; + } + uint32_t temp = left_rotate(a, 5) + f + e + k + w[i]; + e = d; + d = c; + c = left_rotate(b, 30); + b = a; + a = temp; + } + + m_digest[0] += a; + m_digest[1] += b; + m_digest[2] += c; + m_digest[3] += d; + m_digest[4] += e; + } + + private: + digest32_t m_digest; + uint8_t m_block[64]; + size_t m_blockByteIndex; + size_t m_byteCount; + }; + + static std::mt19937 clock_gen(std::random_device{}()); + static std::uniform_int_distribution clock_dis{ -32768, 32767 }; + static std::atomic_short clock_sequence{clock_dis(clock_gen)}; + } + + // -------------------------------------------------------------------------------------------------------------------------- + // UUID format https://tools.ietf.org/html/rfc4122 + // -------------------------------------------------------------------------------------------------------------------------- + + // -------------------------------------------------------------------------------------------------------------------------- + // Field NDR Data Type Octet # Note + // -------------------------------------------------------------------------------------------------------------------------- + // time_low unsigned long 0 - 3 The low field of the timestamp. + // time_mid unsigned short 4 - 5 The middle field of the timestamp. + // time_hi_and_version unsigned short 6 - 7 The high field of the timestamp multiplexed with the version number. + // clock_seq_hi_and_reserved unsigned small 8 The high field of the clock sequence multiplexed with the variant. + // clock_seq_low unsigned small 9 The low field of the clock sequence. + // node character 10 - 15 The spatially unique node identifier. + // -------------------------------------------------------------------------------------------------------------------------- + // 0 1 2 3 + // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // | time_low | + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // | time_mid | time_hi_and_version | + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // |clk_seq_hi_res | clk_seq_low | node (0-1) | + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // | node (2-5) | + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + // -------------------------------------------------------------------------------------------------------------------------- + // enumerations + // -------------------------------------------------------------------------------------------------------------------------- + + // indicated by a bit pattern in octet 8, marked with N in xxxxxxxx-xxxx-xxxx-Nxxx-xxxxxxxxxxxx + enum class uuid_variant + { + // NCS backward compatibility (with the obsolete Apollo Network Computing System 1.5 UUID format) + // N bit pattern: 0xxx + // > the first 6 octets of the UUID are a 48-bit timestamp (the number of 4 microsecond units of time since 1 Jan 1980 UTC); + // > the next 2 octets are reserved; + // > the next octet is the "address family"; + // > the final 7 octets are a 56-bit host ID in the form specified by the address family + ncs, + + // RFC 4122/DCE 1.1 + // N bit pattern: 10xx + // > big-endian byte order + rfc, + + // Microsoft Corporation backward compatibility + // N bit pattern: 110x + // > little endian byte order + // > formely used in the Component Object Model (COM) library + microsoft, + + // reserved for possible future definition + // N bit pattern: 111x + reserved + }; + + // indicated by a bit pattern in octet 6, marked with M in xxxxxxxx-xxxx-Mxxx-xxxx-xxxxxxxxxxxx + enum class uuid_version + { + none = 0, // only possible for nil or invalid uuids + time_based = 1, // The time-based version specified in RFC 4122 + dce_security = 2, // DCE Security version, with embedded POSIX UIDs. + name_based_md5 = 3, // The name-based version specified in RFS 4122 with MD5 hashing + random_number_based = 4, // The randomly or pseudo-randomly generated version specified in RFS 4122 + name_based_sha1 = 5 // The name-based version specified in RFS 4122 with SHA1 hashing + }; + + // -------------------------------------------------------------------------------------------------------------------------- + // uuid class + // -------------------------------------------------------------------------------------------------------------------------- + class uuid + { + public: + using value_type = uint8_t; + + constexpr uuid() noexcept : data({}) {}; + + uuid(value_type(&arr)[16]) noexcept + { + std::copy(std::cbegin(arr), std::cend(arr), std::begin(data)); + } + + uuid(std::array const & arr) noexcept + { + std::copy(std::cbegin(arr), std::cend(arr), std::begin(data)); + } + + template + explicit uuid(ForwardIterator first, ForwardIterator last) + { + if (std::distance(first, last) == 16) + std::copy(first, last, std::begin(data)); + } + + constexpr uuid_variant variant() const noexcept + { + if ((data[8] & 0x80) == 0x00) + return uuid_variant::ncs; + else if ((data[8] & 0xC0) == 0x80) + return uuid_variant::rfc; + else if ((data[8] & 0xE0) == 0xC0) + return uuid_variant::microsoft; + else + return uuid_variant::reserved; + } + + constexpr uuid_version version() const noexcept + { + if ((data[6] & 0xF0) == 0x10) + return uuid_version::time_based; + else if ((data[6] & 0xF0) == 0x20) + return uuid_version::dce_security; + else if ((data[6] & 0xF0) == 0x30) + return uuid_version::name_based_md5; + else if ((data[6] & 0xF0) == 0x40) + return uuid_version::random_number_based; + else if ((data[6] & 0xF0) == 0x50) + return uuid_version::name_based_sha1; + else + return uuid_version::none; + } + + constexpr bool is_nil() const noexcept + { + for (size_t i = 0; i < data.size(); ++i) if (data[i] != 0) return false; + return true; + } + + void swap(uuid & other) noexcept + { + data.swap(other.data); + } + + inline char const* as_bytes() const + { + return reinterpret_cast(data.data()); + } + + template + static bool is_valid_uuid(CharT const * str) noexcept + { + bool firstDigit = true; + int hasBraces = 0; + size_t index = 0; + size_t size = 0; + if (std::is_same::value) + size = strlen(str); + else + size = wcslen(str); + + if (str == nullptr || size == 0) + return false; + + if (str[0] == static_cast('{')) + hasBraces = 1; + if (hasBraces && str[size - 1] != static_cast('}')) + return false; + + for (size_t i = hasBraces; i < size - hasBraces; ++i) + { + if (str[i] == static_cast('-')) continue; + + if (index >= 16 || !detail::is_hex(str[i])) + { + return false; + } + + if (firstDigit) + { + firstDigit = false; + } + else + { + index++; + firstDigit = true; + } + } + + if (index < 16) + { + return false; + } + + return true; + } + + template, + class Allocator = std::allocator> + static bool is_valid_uuid(std::basic_string const & str) noexcept + { + return is_valid_uuid(str.c_str()); + } + + template + static uuid from_string(CharT const * str) noexcept + { + CharT digit = 0; + bool firstDigit = true; + int hasBraces = 0; + size_t index = 0; + size_t size = 0; + if (std::is_same::value) + size = strlen(str); + else + size = wcslen(str); + + std::array data{ { 0 } }; + + if (str == nullptr || size == 0) + { + throw std::runtime_error( + "trying to construct uuid from null or empty char* array" + ); + } + + if (str[0] == static_cast('{')) + hasBraces = 1; + if (hasBraces && str[size - 1] != static_cast('}')) + { + throw std::runtime_error( + "uuid::from_string: input has opening brace but no closing brace" + ); + } + + + for (size_t i = hasBraces; i < size - hasBraces; ++i) + { + if (str[i] == static_cast('-')) continue; + + if (index >= 16 || !detail::is_hex(str[i])) + { + throw std::runtime_error( + "uuid::from_string: too many characters or character not a hex character" + ); + } + + if (firstDigit) + { + digit = str[i]; + firstDigit = false; + } + else + { + data[index++] = detail::hexpair2char(digit, str[i]); + firstDigit = true; + } + } + + if (index < 16) + { + throw std::runtime_error( + "uuid::from_string: not enough characters in input" + ); + } + + return uuid{ std::cbegin(data), std::cend(data) }; + } + + template, + class Allocator = std::allocator> + static uuid from_string(std::basic_string const & str) noexcept + { + return from_string(str.c_str()); + } + + private: + std::array data{ { 0 } }; + + friend bool operator==(uuid const & lhs, uuid const & rhs) noexcept; + friend bool operator<(uuid const & lhs, uuid const & rhs) noexcept; + + template + friend std::basic_ostream & operator<<(std::basic_ostream &s, uuid const & id); + }; + + // -------------------------------------------------------------------------------------------------------------------------- + // operators and non-member functions + // -------------------------------------------------------------------------------------------------------------------------- + + inline bool operator== (uuid const& lhs, uuid const& rhs) noexcept + { + return lhs.data == rhs.data; + } + + inline bool operator!= (uuid const& lhs, uuid const& rhs) noexcept + { + return !(lhs == rhs); + } + + inline bool operator< (uuid const& lhs, uuid const& rhs) noexcept + { + return lhs.data < rhs.data; + } + + template + std::basic_ostream & operator<<(std::basic_ostream &s, uuid const & id) + { + // save current flags + std::ios_base::fmtflags f(s.flags()); + + // manipulate stream as needed + s << std::hex << std::setfill(static_cast('0')) + << std::setw(2) << (int)id.data[0] + << std::setw(2) << (int)id.data[1] + << std::setw(2) << (int)id.data[2] + << std::setw(2) << (int)id.data[3] + << '-' + << std::setw(2) << (int)id.data[4] + << std::setw(2) << (int)id.data[5] + << '-' + << std::setw(2) << (int)id.data[6] + << std::setw(2) << (int)id.data[7] + << '-' + << std::setw(2) << (int)id.data[8] + << std::setw(2) << (int)id.data[9] + << '-' + << std::setw(2) << (int)id.data[10] + << std::setw(2) << (int)id.data[11] + << std::setw(2) << (int)id.data[12] + << std::setw(2) << (int)id.data[13] + << std::setw(2) << (int)id.data[14] + << std::setw(2) << (int)id.data[15]; + + // restore original flags + s.flags(f); + + return s; + } + + template, + class Allocator = std::allocator> + inline std::basic_string to_string(uuid const & id) + { + std::basic_stringstream sstr; + sstr << id; + return sstr.str(); + } + + inline void swap(uuids::uuid & lhs, uuids::uuid & rhs) noexcept + { + lhs.swap(rhs); + } + + // -------------------------------------------------------------------------------------------------------------------------- + // namespace IDs that could be used for generating name-based uuids + // -------------------------------------------------------------------------------------------------------------------------- + + // Name string is a fully-qualified domain name + static uuid uuid_namespace_dns{ {0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} }; + + // Name string is a URL + static uuid uuid_namespace_url{ {0x6b, 0xa7, 0xb8, 0x11, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} }; + + // Name string is an ISO OID (See https://oidref.com/, https://en.wikipedia.org/wiki/Object_identifier) + static uuid uuid_namespace_oid{ {0x6b, 0xa7, 0xb8, 0x12, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} }; + + // Name string is an X.500 DN, in DER or a text output format (See https://en.wikipedia.org/wiki/X.500, https://en.wikipedia.org/wiki/Abstract_Syntax_Notation_One) + static uuid uuid_namespace_x500{ {0x6b, 0xa7, 0xb8, 0x14, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} }; + + // -------------------------------------------------------------------------------------------------------------------------- + // uuid generators + // -------------------------------------------------------------------------------------------------------------------------- + +#ifdef UUID_SYSTEM_GENERATOR + class uuid_system_generator + { + public: + using result_type = uuid; + + uuid operator()() + { +#ifdef _WIN32 + + GUID newId; + ::CoCreateGuid(&newId); + + std::array bytes = + { { + (unsigned char)((newId.Data1 >> 24) & 0xFF), + (unsigned char)((newId.Data1 >> 16) & 0xFF), + (unsigned char)((newId.Data1 >> 8) & 0xFF), + (unsigned char)((newId.Data1) & 0xFF), + + (unsigned char)((newId.Data2 >> 8) & 0xFF), + (unsigned char)((newId.Data2) & 0xFF), + + (unsigned char)((newId.Data3 >> 8) & 0xFF), + (unsigned char)((newId.Data3) & 0xFF), + + newId.Data4[0], + newId.Data4[1], + newId.Data4[2], + newId.Data4[3], + newId.Data4[4], + newId.Data4[5], + newId.Data4[6], + newId.Data4[7] + } }; + + return uuid{ std::begin(bytes), std::end(bytes) }; + +#elif defined(__linux__) || defined(__unix__) + + uuid_t id; + uuid_generate(id); + + std::array bytes = + { { + id[0], + id[1], + id[2], + id[3], + id[4], + id[5], + id[6], + id[7], + id[8], + id[9], + id[10], + id[11], + id[12], + id[13], + id[14], + id[15] + } }; + + return uuid{ std::begin(bytes), std::end(bytes) }; + +#elif defined(__APPLE__) + auto newId = CFUUIDCreate(NULL); + auto bytes = CFUUIDGetUUIDBytes(newId); + CFRelease(newId); + + std::array arrbytes = + { { + bytes.byte0, + bytes.byte1, + bytes.byte2, + bytes.byte3, + bytes.byte4, + bytes.byte5, + bytes.byte6, + bytes.byte7, + bytes.byte8, + bytes.byte9, + bytes.byte10, + bytes.byte11, + bytes.byte12, + bytes.byte13, + bytes.byte14, + bytes.byte15 + } }; + return uuid{ std::begin(arrbytes), std::end(arrbytes) }; +#else + return uuid{}; +#endif + } + }; +#endif + + template + class basic_uuid_random_generator + { + public: + using engine_type = UniformRandomNumberGenerator; + + explicit basic_uuid_random_generator(engine_type& gen) : + generator(&gen, [](auto) {}) {} + explicit basic_uuid_random_generator(engine_type* gen) : + generator(gen, [](auto) {}) {} + + uuid operator()() + { + uint8_t bytes[16]; + for (int i = 0; i < 16; i += 4) + *reinterpret_cast(bytes + i) = distribution(*generator); + + // variant must be 10xxxxxx + bytes[8] &= 0xBF; + bytes[8] |= 0x80; + + // version must be 0100xxxx + bytes[6] &= 0x4F; + bytes[6] |= 0x40; + + return uuid{std::begin(bytes), std::end(bytes)}; + } + + private: + std::uniform_int_distribution distribution; + std::shared_ptr generator; + }; + + using uuid_random_generator = basic_uuid_random_generator; + + class uuid_name_generator + { + public: + explicit uuid_name_generator(uuid const& namespace_uuid) noexcept + : nsuuid(namespace_uuid) + {} + + template + uuid operator()(CharT const * name) + { + size_t size = 0; + if (std::is_same::value) + size = strlen(name); + else + size = wcslen(name); + + reset(); + process_characters(name, size); + return make_uuid(); + } + + template, + class Allocator = std::allocator> + uuid operator()(std::basic_string const & name) + { + reset(); + process_characters(name.data(), name.size()); + return make_uuid(); + } + + private: + void reset() + { + hasher.reset(); + char bytes[16]; + auto nsbytes = nsuuid.as_bytes(); + strncpy(bytes, nsbytes, 16); + hasher.process_bytes(bytes, 16); + } + + template ::value>> + void process_characters(char_type const * const characters, size_t const count) + { + for (size_t i = 0; i < count; i++) + { + uint32_t c = characters[i]; + hasher.process_byte(static_cast((c >> 0) & 0xFF)); + hasher.process_byte(static_cast((c >> 8) & 0xFF)); + hasher.process_byte(static_cast((c >> 16) & 0xFF)); + hasher.process_byte(static_cast((c >> 24) & 0xFF)); + } + } + + void process_characters(const char * const characters, size_t const count) + { + hasher.process_bytes(characters, count); + } + + uuid make_uuid() + { + detail::sha1::digest8_t digest; + hasher.get_digest_bytes(digest); + + // variant must be 0b10xxxxxx + digest[8] &= 0xBF; + digest[8] |= 0x80; + + // version must be 0b0101xxxx + digest[6] &= 0x5F; + digest[6] |= 0x50; + + return uuid{ digest, digest + 16 }; + } + + private: + uuid nsuuid; + detail::sha1 hasher; + }; + + // !!! DO NOT USE THIS IN PRODUCTION + // this implementation is unreliable for good uuids + class uuid_time_generator + { + using mac_address = std::array; + + mac_address device_address; + bool has_mac_address = false; + + bool get_mac_address() + { + if (has_mac_address) + { + return true; + } + +#ifdef _WIN32 + DWORD len = 0; + auto ret = GetAdaptersInfo(nullptr, &len); + if (ret != ERROR_BUFFER_OVERFLOW) return false; + std::vector buf(len); + auto pips = reinterpret_cast(&buf.front()); + ret = GetAdaptersInfo(pips, &len); + if (ret != ERROR_SUCCESS) return false; + mac_address addr; + std::copy(pips->Address, pips->Address + 6, std::begin(addr)); + device_address = addr; + has_mac_address = true +#endif + + return has_mac_address; + } + + long long get_time_intervals() + { + auto start = std::chrono::system_clock::from_time_t(time_t(-12219292800)); + auto diff = std::chrono::system_clock::now() - start; + auto ns = std::chrono::duration_cast(diff).count(); + return ns / 100; + } + + public: + uuid_time_generator() + { + } + + uuid operator()() + { + if (get_mac_address()) + { + std::array data; + + auto tm = get_time_intervals(); + + short clock_seq = detail::clock_sequence++; + + clock_seq &= 0x3FFF; + + auto ptm = reinterpret_cast(&tm); + ptm[0] &= 0x0F; + + memcpy(&data[0], ptm + 4, 4); + memcpy(&data[4], ptm + 2, 2); + memcpy(&data[6], ptm, 2); + + memcpy(&data[8], reinterpret_cast(&clock_seq), 2); + + // variant must be 0b10xxxxxx + data[8] &= 0xBF; + data[8] |= 0x80; + + // version must be 0b0001xxxx + data[6] &= 0x5F; + data[6] |= 0x10; + + memcpy(&data[10], &device_address[0], 6); + has_mac_address = true; + + return uuids::uuid{std::cbegin(data), std::cend(data)}; + } + + throw std::runtime_error("Could not get MAC address"); + } + }; +} + +namespace std +{ + template <> + struct hash + { + using argument_type = uuids::uuid; + using result_type = std::size_t; + + result_type operator()(argument_type const &uuid) const + { + std::hash hasher; + return static_cast(hasher(uuids::to_string(uuid))); + } + }; +} From a662f1fac7c61627da5ad9ed8ef39fdfc915df61 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 2 Jan 2021 13:31:22 +0000 Subject: [PATCH 128/428] Use a UUID for Solver::run_id --- include/bout/solver.hxx | 8 +++++--- src/solver/solver.cxx | 25 ++++++++++++++++--------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index e093921ebe..488a15c424 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -42,6 +42,7 @@ #include "options.hxx" #include "unused.hxx" #include "bout/monitor.hxx" +#include "bout/sys/uuid.h" #include @@ -410,10 +411,11 @@ protected: int iteration{0}; /// Randomly generated run ID - int run_id {1}; // 0 = unknown restart, 1 = not restarted + /// Initialise with 36 characters so the allocated array is the right size + /// Use 'z' because it is not a valid hex character, so this is an invalid UUID + std::string run_id = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"; /// The run from which this was restarted. - /// Set to zero if no restart - int run_restart_from {1}; + std::string run_restart_from = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; /// Run the user's RHS function int run_rhs(BoutReal t); diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index da74f3a2d7..c21396a955 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -453,16 +453,23 @@ int Solver::solve(int NOUT, BoutReal TIMESTEP) { throw BoutException(_("Failed to initialise solver-> Aborting\n")); } - // Set the run ID to a random number + // Set the run ID run_restart_from = run_id; // Restarting from the previous run ID if (MYPE == 0) { - srand(static_cast(time(nullptr))); - do { - run_id = rand(); // Different each time the simulation is run - } while((run_id == 0) || (run_id == 1)); // Ensure that run_id != 0 or 1 + std::random_device rd; + auto seed_data = std::array {}; + std::generate(std::begin(seed_data), std::end(seed_data), std::ref(rd)); + std::seed_seq seq(std::begin(seed_data), std::end(seed_data)); + std::mt19937 generator(seq); + uuids::uuid_random_generator gen{generator}; + + run_id = uuids::to_string(gen()); // Different each time the simulation is run } - MPI_Bcast(&run_id, 1, MPI_INT, 0, BoutComm::get()); // All ranks have same run_id + + // All ranks have same run_id + // Standard representation of UUID is always 36 characters + MPI_Bcast(const_cast(run_id.data()), 36, MPI_CHAR, 0, BoutComm::get()); // Put the run ID into the options tree // Forcing in case the value has been previously set @@ -471,9 +478,9 @@ int Solver::solve(int NOUT, BoutReal TIMESTEP) { /// Run the solver output_info.write(_("Running simulation\n\n")); - output_info.write("Run ID: {:d}\n", run_id); - if (run_restart_from != 0) { - output_info.write("Restarting from ID: {:d}\n", run_restart_from); + output_info.write("Run ID: {:s}\n", run_id); + if (run_restart_from != "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz") { + output_info.write("Restarting from ID: {:s}\n", run_restart_from); } time_t start_time = time(nullptr); From 3fe4524d733a7f49d55a0694f0b1a153ca4eaa4b Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 3 Jan 2021 00:08:29 +0100 Subject: [PATCH 129/428] Move UUID creation into a utility method Solver::createRunId() Keeps Solver::solve() cleaner. --- include/bout/solver.hxx | 2 ++ src/solver/solver.cxx | 39 ++++++++++++++++++++++++--------------- 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index 488a15c424..3413fb2a82 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -416,6 +416,8 @@ protected: std::string run_id = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"; /// The run from which this was restarted. std::string run_restart_from = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; + /// Generate a random UUID and broadcast it to all processors + std::string createRunId(); /// Run the user's RHS function int run_rhs(BoutReal t); diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index c21396a955..8c1afccc41 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -455,21 +455,7 @@ int Solver::solve(int NOUT, BoutReal TIMESTEP) { // Set the run ID run_restart_from = run_id; // Restarting from the previous run ID - - if (MYPE == 0) { - std::random_device rd; - auto seed_data = std::array {}; - std::generate(std::begin(seed_data), std::end(seed_data), std::ref(rd)); - std::seed_seq seq(std::begin(seed_data), std::end(seed_data)); - std::mt19937 generator(seq); - uuids::uuid_random_generator gen{generator}; - - run_id = uuids::to_string(gen()); // Different each time the simulation is run - } - - // All ranks have same run_id - // Standard representation of UUID is always 36 characters - MPI_Bcast(const_cast(run_id.data()), 36, MPI_CHAR, 0, BoutComm::get()); + run_id = createRunId(); // Put the run ID into the options tree // Forcing in case the value has been previously set @@ -543,6 +529,29 @@ int Solver::solve(int NOUT, BoutReal TIMESTEP) { return status; } +std::string Solver::createRunId() { + + std::string result; + result.resize(36); + + if (MYPE == 0) { + std::random_device rd; + auto seed_data = std::array {}; + std::generate(std::begin(seed_data), std::end(seed_data), std::ref(rd)); + std::seed_seq seq(std::begin(seed_data), std::end(seed_data)); + std::mt19937 generator(seq); + uuids::uuid_random_generator gen{generator}; + + result = uuids::to_string(gen()); // Different each time the simulation is run + } + + // All ranks have same run_id + // Standard representation of UUID is always 36 characters + MPI_Bcast(const_cast(result.data()), 36, MPI_CHAR, 0, BoutComm::get()); + + return result; +} + /************************************************************************** * Initialisation **************************************************************************/ From de6927885ff786482507818b11af863337b47dcf Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 3 Jan 2021 00:45:56 +0100 Subject: [PATCH 130/428] Option to save run_id as a time-dependent variable; add descriptions Saving run_id and run_restart_from as time-evolving variables would make it easier to concatenate data from several consecutive runs. A bit hacky, but do not have a better way to create a per-run dimension at the moment. --- src/solver/solver.cxx | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 8c1afccc41..3e55d84d3a 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -580,8 +580,16 @@ void Solver::outputVars(Datafile &outputfile, bool save_repeat) { outputfile.addOnce(iteration, "hist_hi"); // Add run information - outputfile.addOnce(run_id, "run_id"); - outputfile.addOnce(run_restart_from, "run_restart_from"); + bool save_repeat_run_id = (*options)["save_repeat_run_id"] + .doc("Write run_id and run_restart_from at every output " + "timestep, to make it easier to concatenate output " + "data sets in time") + .withDefault(false); + outputfile.add(run_id, "run_id", save_repeat_run_id, "UUID for this simulation"); + outputfile.add(run_restart_from, "run_restart_from", save_repeat_run_id, + "run_id of the simulation this one was restarted from." + "'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' means the run is not a restart, " + "or the previous run did not have a run_id."); // Add 2D and 3D evolving fields to output file for(const auto& f : f2d) { From 1473422953c8db7b13e75148a44140c910896508 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 3 Jan 2021 16:36:54 +0100 Subject: [PATCH 131/428] Create run_id in test-restart-io and test-restart-io_hdf5 --- tests/integrated/test-restart-io/runtest | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/integrated/test-restart-io/runtest b/tests/integrated/test-restart-io/runtest index 737092b98e..62f73c2a19 100755 --- a/tests/integrated/test-restart-io/runtest +++ b/tests/integrated/test-restart-io/runtest @@ -62,6 +62,14 @@ with DataFile(os.path.join(restartdir, 'BOUT.restart.0.nc'), create=True) as bas base_restart.write('fperp_lower', testvars['fperp_lower']) base_restart.write('fperp_upper', testvars['fperp_upper']) + # make run_id an array of characters because NetCDF doesn't like generic strings + run_id = numpy.array(list('36 character run_id test string ****'), dtype='S1') + run_id = BoutArray(run_id, attributes = {'bout_type': 'string'}) + base_restart.write('run_id', run_id) + run_restart_from = numpy.array(list('36 character run_restart_from string'), dtype='S1') + run_restart_from = BoutArray(run_id, attributes = {'bout_type': 'string'}) + base_restart.write('run_restart_from', run_restart_from) + success = True # Note: expect this to fail for 16 processors, because when there are 2 From dd09d4b83f113785c9739eb360eb8451aed52ac9 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 3 Jan 2021 16:53:56 +0100 Subject: [PATCH 132/428] Test run_id, run_restart_from in test-restart-io, test-restart-io_hdf5 Test that run_id is a valid UUID, and run_restart_from is equal to the original run_id. --- tests/integrated/test-restart-io/runtest | 28 +++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/tests/integrated/test-restart-io/runtest b/tests/integrated/test-restart-io/runtest index 62f73c2a19..23b348521f 100755 --- a/tests/integrated/test-restart-io/runtest +++ b/tests/integrated/test-restart-io/runtest @@ -12,6 +12,7 @@ from boututils.run_wrapper import shell, shell_safe, launch_safe import numpy import os from sys import exit +import uuid nx = 8 ny = 16 @@ -63,11 +64,16 @@ with DataFile(os.path.join(restartdir, 'BOUT.restart.0.nc'), create=True) as bas base_restart.write('fperp_upper', testvars['fperp_upper']) # make run_id an array of characters because NetCDF doesn't like generic strings - run_id = numpy.array(list('36 character run_id test string ****'), dtype='S1') + run_id_string = '36 character run_id test string ****' + run_id = numpy.array(list(run_id_string), dtype='S1') run_id = BoutArray(run_id, attributes = {'bout_type': 'string'}) base_restart.write('run_id', run_id) - run_restart_from = numpy.array(list('36 character run_restart_from string'), dtype='S1') - run_restart_from = BoutArray(run_id, attributes = {'bout_type': 'string'}) + run_restart_from = numpy.array( + list('36 character run_restart_from string'), dtype='S1' + ) + run_restart_from = BoutArray( + run_restart_from, attributes = {'bout_type': 'string'} + ) base_restart.write('run_restart_from', run_restart_from) success = True @@ -123,6 +129,22 @@ for nproc in [1, 2, 4]: success = False print('Fail: yindex_global of '+name+' evolving version is '+str(yindex_result)+' should be '+str(yindex_test)) + # check the run_id + run_id = collect('run_id', path='data', info=False) + # check run_id can be converted to a valid UUID + try: + uuid.UUID(run_id.tobytes().decode()) + except ValueError: + success = False + print('run_id=' + str(run_id) + ' is not a valid UUID') + run_restart_from = collect('run_restart_from', path='data', info=False) + if not run_restart_from.tobytes().decode() == run_id_string: + success = False + print( + 'incorrect run_restart_from=' + run_restart_from.tobytes().decode() + + '. Expected ' + run_id_string + ) + if success: print('pass') From f74bd222e879aa559a0e708674002a3d7b4ff72c Mon Sep 17 00:00:00 2001 From: johnomotani Date: Tue, 5 Jan 2021 12:19:07 +0000 Subject: [PATCH 133/428] Remove #include sting_view is C++17, and not used in this version of uuid.h. --- include/bout/sys/uuid.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/bout/sys/uuid.h b/include/bout/sys/uuid.h index d45fa4801c..f48e81006f 100644 --- a/include/bout/sys/uuid.h +++ b/include/bout/sys/uuid.h @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include From 0d6b0c130a045045219c978a0089b9ef09a3c12c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 4 Feb 2021 13:31:21 +0000 Subject: [PATCH 134/428] Add configure option to use system UUID generator --- CMakeLists.txt | 13 ++ cmake/FindLibuuid.cmake | 58 +++++++++ configure | 277 ++++++++++++++++++++++++++++++++++++++++ configure.ac | 24 ++++ include/bout/sys/uuid.h | 14 +- 5 files changed, 382 insertions(+), 4 deletions(-) create mode 100644 cmake/FindLibuuid.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 74ffebfad0..360227e50c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -554,6 +554,19 @@ if (USE_SCOREP) endif() set(BOUT_HAS_SCOREP ${USE_SCOREP}) +option(BOUT_USE_UUID_SYSTEM_GENERATOR "Enable support for using a system UUID generator" ON) +if (BOUT_USE_UUID_SYSTEM_GENERATOR) + find_package(Libuuid QUIET) + if (Libuuid_FOUND) + target_link_libraries(bout++ + PUBLIC Libuuid::libuuid) + else() + message(STATUS "libuuid not found, using fallback UUID generator") + endif() +endif() +message(STATUS "UUID_SYSTEM_GENERATOR: ${BOUT_USE_UUID_SYSTEM_GENERATOR}") +set(BOUT_HAS_UUID_SYSTEM_GENERATOR ${BOUT_USE_UUID_SYSTEM_GENERATOR}) + include(CheckCXXSourceCompiles) check_cxx_source_compiles("int main() { const char* name = __PRETTY_FUNCTION__; }" HAS_PRETTY_FUNCTION) diff --git a/cmake/FindLibuuid.cmake b/cmake/FindLibuuid.cmake new file mode 100644 index 0000000000..91880487a2 --- /dev/null +++ b/cmake/FindLibuuid.cmake @@ -0,0 +1,58 @@ +# FindLibuuid +# ---------- +# +# Find a libuuid UUID generator library +# +# This module will define the following variables: +# +# :: +# +# Libuuid_FOUND - true if Libuuid was found +# Libuuid_INCLUDE_DIRS - Location of the Libuuid includes +# Libuuid_LIBRARIES - Required libraries +# +# This module will also export the ``Libuuid::libuuid`` target. +# +# You can also set the following variables: +# +# ``Libuuid_ROOT`` +# Specify the path to the Libuuid installation to use +# +# ``Libuuid_DEBUG`` +# Set to TRUE to get extra debugging output + +include (FindPackageHandleStandardArgs) + +if (WIN32) + find_package_handle_standard_args(Libuuid DEFAULT_MSG) + return() +endif() + +if (APPLE) + find_library(CFLIB CoreFoundation) + find_package_handle_standard_args(Libuuid DEFAULT_MSG CFLIB) + mark_as_advanced(${CFLIB}) + + if (Libuuid_FOUND AND NOT TARGET Libuuid::libuuid) + add_library(Libuuid::libuuid UNKNOWN IMPORTED) + set_target_properties(Libuuid::libuuid PROPERTIES + IMPORTED_LOCATION ${CFLIB} + ) + endif() + return() +endif () + +find_path(Libuuid_INCLUDE_DIRS uuid/uuid.h) +find_library(Libuuid_LIBRARIES uuid) + +find_package_handle_standard_args(Libuuid DEFAULT_MSG Libuuid_LIBRARIES Libuuid_INCLUDE_DIRS) + +mark_as_advanced(Libuuid_LIBRARIES Libuuid_INCLUDE_DIRS) + +if (Libuuid_FOUND AND NOT TARGET Libuuid::libuuid) + add_library(Libuuid::libuuid UNKNOWN IMPORTED) + set_target_properties(Libuuid::libuuid PROPERTIES + IMPORTED_LOCATION "${Libuuid_LIBRARIES}" + INTERFACE_INCLUDE_DIRECTORIES "${Libuuid_INCLUDE_DIRS}" + ) +endif() diff --git a/configure b/configure index 7e6f5cf52b..793b485aff 100755 --- a/configure +++ b/configure @@ -632,6 +632,7 @@ PETSC_ARCH PETSC_DIR PETSC_MAKE_INCLUDE PETSC_HAS_SUNDIALS +BOUT_HAS_UUID_SYSTEM_GENERATOR HAS_SLEPC HAS_PETSC HAS_LAPACK @@ -806,6 +807,7 @@ with_mumps with_arkode with_scorep with_system_mpark +with_system_uuid enable_warnings enable_checks enable_signal @@ -1495,6 +1497,7 @@ Optional Packages: --with-scorep Enable support for scorep based instrumentation --with-system-mpark Use mpark.variant already installed rather then the bundled one + --with-system-uuid Use libuuid to generate UUIDs --with-openmp-schedule=static/dynamic/guided/auto Set OpenMP schedule (default: static) --with-gcov=GCOV use given GCOV for coverage (GCOV=gcov). @@ -2644,6 +2647,13 @@ else fi +# Check whether --with-system_uuid was given. +if test "${with_system_uuid+set}" = set; then : + withval=$with_system_uuid; +else + with_system_uuid=auto +fi + # Check whether --enable-warnings was given. if test "${enable_warnings+set}" = set; then : @@ -12155,6 +12165,273 @@ fi OWN_MPARK=yes fi + +############################################################# +# Check for libuuid +############################################################# + +if test ".$with_system_uuid" = "no"; then : + + BOUT_HAS_UUID_SYSTEM_GENERATOR=no + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for system UUID generator" >&5 +$as_echo_n "checking for system UUID generator... " >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid/uuid.h" >&5 +$as_echo_n "checking for uuid/uuid.h... " >&6; } + + save_CPPFLAGS=$CPPFLAGS + BACH_found=no + + if test ."$with_system_uuid" != .yes; then : + extra_prefix="$with_system_uuid" +else + extra_prefix="" +fi + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + BACH_found=yes + + + break +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + if test $BACH_found != yes; then : + + for search_prefix in $extra_prefix /usr /opt $HOME $HOME/local /usr/local + do + for path in $search_prefix $search_prefix/include + do + if test -d $path; then : + + CPPFLAGS="$save_CPPFLAGS -I$path" + + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + BACH_found=yes + EXTRA_INCS="$EXTRA_INCS -I$path" + + + break +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi + done + if test .$BACH_found = .yes; then : + break; +fi + done + +fi + + if test $BACH_found = yes; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + CPPFLAGS=$save_CPPFLAGS + +fi + if test .$BACH_found = .yes; then : + + save_LIBS=$LIBS + save_LDFLAGS=$LDFLAGS + save_CPPFLAGS=$CPPFLAGS + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libuuid" >&5 +$as_echo_n "checking for libuuid... " >&6; } + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + BACL_found=no + + # Try with no extra libraries first + if test ."$with_system_uuid" = .yes; then : + extra_prefix="" +else + extra_prefix="$with_system_uuid" +fi + LIBS="$save_LIBS $EXTRA_LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + extern "C" + char uuid_generate(); + +int +main () +{ +return uuid_generate(); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + BACL_found=yes + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LIBS=$save_LIBS + + # Now try with explicitly linking library + if test $BACL_found != yes; then : + + LIBS="$save_LIBS $EXTRA_LIBS -luuid" + if test ."$with_system_uuid" = .yes; then : + extra_prefix="" +else + extra_prefix="$with_system_uuid" +fi + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + extern "C" + char uuid_generate(); + +int +main () +{ +return uuid_generate(); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + BACL_found=yes + EXTRA_LIBS="$EXTRA_LIBS -luuid" + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi + + if test $BACL_found != yes; then : + + for search_prefix in $extra_prefix /usr /opt $HOME $HOME/local /usr/local ; do + for path in $search_prefix $search_prefix/lib $search_prefix/lib64 $search_prefix/x86_64-linux-gnu + do + if test -d $path; then : + + LIBS="$save_LIBS $EXTRA_LIBS -luuid" + LDFLAGS="$save_LDFLAGS -L$path" + + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + extern "C" + char uuid_generate(); + +int +main () +{ +return uuid_generate(); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + BACL_found=yes + EXTRA_LIBS="$EXTRA_LIBS -L$path -luuid" + + + break +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi + done + if test .$BACL_found = .yes; then : + break; +fi + done + +fi + + if test $BACL_found = yes; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + + if test $BACL_found = yes; then : + BOUT_HAS_UUID_SYSTEM_GENERATOR=yes +else + BOUT_HAS_UUID_SYSTEM_GENERATOR=no +fi + + LIBS=$save_LIBS + LDFLAGS=$save_LDFLAGS + CPPFLAGS=$save_CPPFLAGS + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +else + BOUT_HAS_UUID_SYSTEM_GENERATOR=no +fi + + if test "$with_system_uuid" = "yes"; then : + + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "*** System UUID generator not found, but explicitly requested +See \`config.log' for more details" "$LINENO" 5; } + +fi + +fi + ############################################################# # Download + Build PVODE '98 ############################################################# diff --git a/configure.ac b/configure.ac index 130394d8b7..f8615c55f0 100644 --- a/configure.ac +++ b/configure.ac @@ -64,6 +64,8 @@ AC_ARG_WITH(scorep, [AS_HELP_STRING([--with-scorep], [Enable support for scorep based instrumentation])],,[with_scorep=no]) AC_ARG_WITH(system_mpark, [AS_HELP_STRING([--with-system-mpark], [Use mpark.variant already installed rather then the bundled one])],,[with_system_mpark=auto]) +AC_ARG_WITH(system_uuid, [AS_HELP_STRING([--with-system-uuid], + [Use libuuid to generate UUIDs])],,[with_system_uuid=auto]) dnl --with-hdf5 flags are set in AX_LIB_{PARALLEL}HDF5 @@ -1180,6 +1182,27 @@ AS_IF([test "$SYSTEM_HAS_MPARK" = "yes"], [ MPARK_INCLUDE="-I$MPARK_VARIANT_INCLUDE_PATH" OWN_MPARK=yes ]) + +############################################################# +# Check for libuuid +############################################################# + +AS_IF([test ".$with_system_uuid" = "no"], [ + BOUT_HAS_UUID_SYSTEM_GENERATOR=no +], [ + AC_MSG_CHECKING([for system UUID generator]) + BOUT_ADDPATH_CHECK_HEADER(uuid/uuid.h, + BOUT_ADDPATH_CHECK_LIB(uuid, uuid_generate, + BOUT_HAS_UUID_SYSTEM_GENERATOR=yes, + BOUT_HAS_UUID_SYSTEM_GENERATOR=no, + [$with_system_uuid]), + BOUT_HAS_UUID_SYSTEM_GENERATOR=no, + [$with_system_uuid]) + AS_IF([test "$with_system_uuid" = "yes"], [ + AC_MSG_FAILURE([*** System UUID generator not found, but explicitly requested]) + ]) +]) + ############################################################# # Download + Build PVODE '98 ############################################################# @@ -1372,6 +1395,7 @@ AC_SUBST(HAS_MUMPS) AC_SUBST(HAS_LAPACK) AC_SUBST(HAS_PETSC) AC_SUBST(HAS_SLEPC) +AC_SUBST(BOUT_HAS_UUID_SYSTEM_GENERATOR) AC_SUBST(PETSC_HAS_SUNDIALS) AC_SUBST(PETSC_MAKE_INCLUDE) AC_SUBST(PETSC_DIR) diff --git a/include/bout/sys/uuid.h b/include/bout/sys/uuid.h index f48e81006f..de8adf9d20 100644 --- a/include/bout/sys/uuid.h +++ b/include/bout/sys/uuid.h @@ -21,6 +21,10 @@ // SOFTWARE. #pragma once +#ifndef BOUT_UUID_H +#define BOUT_UUID_H + +#include "bout/build_config.hxx" #include #include @@ -45,7 +49,7 @@ #define NOMINMAX #endif -#ifdef UUID_SYSTEM_GENERATOR +#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR #include #endif @@ -56,13 +60,13 @@ #elif defined(__linux__) || defined(__unix__) -#ifdef UUID_SYSTEM_GENERATOR +#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR #include #endif #elif defined(__APPLE__) -#ifdef UUID_SYSTEM_GENERATOR +#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR #include #endif @@ -652,7 +656,7 @@ namespace uuids // uuid generators // -------------------------------------------------------------------------------------------------------------------------- -#ifdef UUID_SYSTEM_GENERATOR +#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR class uuid_system_generator { public: @@ -966,3 +970,5 @@ namespace std } }; } + +#endif // BOUT_UUID_H From 9d07413813b7610dc38a8b372e01ef7490c1b0a4 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 4 Feb 2021 13:40:34 +0000 Subject: [PATCH 135/428] Use system UUID generator for run ID if available --- include/bout/solver.hxx | 4 ++-- src/solver/solver.cxx | 24 +++++++++++++++--------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index 3413fb2a82..d8d3318151 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -416,8 +416,8 @@ protected: std::string run_id = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"; /// The run from which this was restarted. std::string run_restart_from = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; - /// Generate a random UUID and broadcast it to all processors - std::string createRunId(); + /// Generate a random UUID (version 4) and broadcast it to all processors + std::string createRunId() const; /// Run the user's RHS function int run_rhs(BoutReal t); diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 3e55d84d3a..1c51016db7 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -529,20 +529,26 @@ int Solver::solve(int NOUT, BoutReal TIMESTEP) { return status; } -std::string Solver::createRunId() { +std::string Solver::createRunId() const { std::string result; result.resize(36); if (MYPE == 0) { - std::random_device rd; - auto seed_data = std::array {}; - std::generate(std::begin(seed_data), std::end(seed_data), std::ref(rd)); - std::seed_seq seq(std::begin(seed_data), std::end(seed_data)); - std::mt19937 generator(seq); - uuids::uuid_random_generator gen{generator}; - - result = uuids::to_string(gen()); // Different each time the simulation is run + // Generate a unique ID for this run + if (bout::build::has_uuid_system_generator) { + uuids::uuid_system_generator gen{}; + result = uuids::to_string(gen()); + } else { + std::random_device rd; + auto seed_data = std::array {}; + std::generate(std::begin(seed_data), std::end(seed_data), std::ref(rd)); + std::seed_seq seq(std::begin(seed_data), std::end(seed_data)); + std::mt19937 generator(seq); + uuids::uuid_random_generator gen{generator}; + + result = uuids::to_string(gen()); + } } // All ranks have same run_id From 3ac9c2199f5ad6e1fc294401ccbf573d3d15b0a2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 4 Feb 2021 13:45:16 +0000 Subject: [PATCH 136/428] [clang-format-command] fixes --- include/bout/sys/uuid.h | 1541 ++++++++++++++++++--------------------- src/solver/solver.cxx | 2 +- 2 files changed, 708 insertions(+), 835 deletions(-) diff --git a/include/bout/sys/uuid.h b/include/bout/sys/uuid.h index de8adf9d20..2396697e0a 100644 --- a/include/bout/sys/uuid.h +++ b/include/bout/sys/uuid.h @@ -10,7 +10,7 @@ // - replace 'is_same_v<>' with 'is_same<>::value' // - replace std::copy with strncpy in uuid_name_generator::reset() // -// Copyright (c) 2017 +// Copyright (c) 2017 // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, @@ -26,20 +26,20 @@ #include "bout/build_config.hxx" +#include +#include +#include #include -#include -#include +#include #include -#include #include -#include #include -#include -#include -#include -#include #include -#include +#include +#include +#include +#include +#include #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN @@ -53,9 +53,9 @@ #include #endif +#include +#include #include -#include -#include #pragma comment(lib, "IPHLPAPI.lib") #elif defined(__linux__) || defined(__unix__) @@ -72,903 +72,776 @@ #endif -namespace uuids -{ - - namespace detail - { - template - constexpr inline unsigned char hex2char(TChar const ch) - { - if (ch >= static_cast('0') && ch <= static_cast('9')) - return ch - static_cast('0'); - if (ch >= static_cast('a') && ch <= static_cast('f')) - return 10 + ch - static_cast('a'); - if (ch >= static_cast('A') && ch <= static_cast('F')) - return 10 + ch - static_cast('A'); - return 0; - } +namespace uuids { + +namespace detail { +template +constexpr inline unsigned char hex2char(TChar const ch) { + if (ch >= static_cast('0') && ch <= static_cast('9')) + return ch - static_cast('0'); + if (ch >= static_cast('a') && ch <= static_cast('f')) + return 10 + ch - static_cast('a'); + if (ch >= static_cast('A') && ch <= static_cast('F')) + return 10 + ch - static_cast('A'); + return 0; +} - template - constexpr inline bool is_hex(TChar const ch) - { - return - (ch >= static_cast('0') && ch <= static_cast('9')) || - (ch >= static_cast('a') && ch <= static_cast('f')) || - (ch >= static_cast('A') && ch <= static_cast('F')); - } +template +constexpr inline bool is_hex(TChar const ch) { + return (ch >= static_cast('0') && ch <= static_cast('9')) + || (ch >= static_cast('a') && ch <= static_cast('f')) + || (ch >= static_cast('A') && ch <= static_cast('F')); +} - template - constexpr inline unsigned char hexpair2char(TChar const a, TChar const b) - { - return (hex2char(a) << 4) | hex2char(b); - } +template +constexpr inline unsigned char hexpair2char(TChar const a, TChar const b) { + return (hex2char(a) << 4) | hex2char(b); +} - class sha1 - { - public: - using digest32_t = uint32_t[5]; - using digest8_t = uint8_t[20]; - - static constexpr unsigned int block_bytes = 64; - - inline static uint32_t left_rotate(uint32_t value, size_t const count) - { - return (value << count) ^ (value >> (32 - count)); - } - - sha1() { reset(); } - - void reset() - { - m_digest[0] = 0x67452301; - m_digest[1] = 0xEFCDAB89; - m_digest[2] = 0x98BADCFE; - m_digest[3] = 0x10325476; - m_digest[4] = 0xC3D2E1F0; - m_blockByteIndex = 0; - m_byteCount = 0; - } - - void process_byte(uint8_t octet) - { - this->m_block[this->m_blockByteIndex++] = octet; - ++this->m_byteCount; - if (m_blockByteIndex == block_bytes) - { - this->m_blockByteIndex = 0; - process_block(); - } - } - - void process_block(void const * const start, void const * const end) - { - const uint8_t* begin = static_cast(start); - const uint8_t* finish = static_cast(end); - while (begin != finish) - { - process_byte(*begin); - begin++; - } - } - - void process_bytes(void const * const data, size_t const len) - { - const uint8_t* block = static_cast(data); - process_block(block, block + len); - } - - uint32_t const * get_digest(digest32_t digest) - { - size_t const bitCount = this->m_byteCount * 8; - process_byte(0x80); - if (this->m_blockByteIndex > 56) { - while (m_blockByteIndex != 0) { - process_byte(0); - } - while (m_blockByteIndex < 56) { - process_byte(0); - } - } - else { - while (m_blockByteIndex < 56) { - process_byte(0); - } - } - process_byte(0); - process_byte(0); - process_byte(0); - process_byte(0); - process_byte(static_cast((bitCount >> 24) & 0xFF)); - process_byte(static_cast((bitCount >> 16) & 0xFF)); - process_byte(static_cast((bitCount >> 8) & 0xFF)); - process_byte(static_cast((bitCount) & 0xFF)); - - memcpy(digest, m_digest, 5 * sizeof(uint32_t)); - return digest; - } - - uint8_t const * get_digest_bytes(digest8_t digest) - { - digest32_t d32; - get_digest(d32); - size_t di = 0; - digest[di++] = (uint8_t)(d32[0] >> 24); - digest[di++] = (uint8_t)(d32[0] >> 16); - digest[di++] = (uint8_t)(d32[0] >> 8); - digest[di++] = (uint8_t)(d32[0] >> 0); - - digest[di++] = (uint8_t)(d32[1] >> 24); - digest[di++] = (uint8_t)(d32[1] >> 16); - digest[di++] = (uint8_t)(d32[1] >> 8); - digest[di++] = (uint8_t)(d32[1] >> 0); - - digest[di++] = (uint8_t)(d32[2] >> 24); - digest[di++] = (uint8_t)(d32[2] >> 16); - digest[di++] = (uint8_t)(d32[2] >> 8); - digest[di++] = (uint8_t)(d32[2] >> 0); - - digest[di++] = (uint8_t)(d32[3] >> 24); - digest[di++] = (uint8_t)(d32[3] >> 16); - digest[di++] = (uint8_t)(d32[3] >> 8); - digest[di++] = (uint8_t)(d32[3] >> 0); - - digest[di++] = (uint8_t)(d32[4] >> 24); - digest[di++] = (uint8_t)(d32[4] >> 16); - digest[di++] = (uint8_t)(d32[4] >> 8); - digest[di++] = (uint8_t)(d32[4] >> 0); - - return digest; - } - - private: - void process_block() - { - uint32_t w[80]; - for (size_t i = 0; i < 16; i++) { - w[i] = (m_block[i * 4 + 0] << 24); - w[i] |= (m_block[i * 4 + 1] << 16); - w[i] |= (m_block[i * 4 + 2] << 8); - w[i] |= (m_block[i * 4 + 3]); - } - for (size_t i = 16; i < 80; i++) { - w[i] = left_rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1); - } - - uint32_t a = m_digest[0]; - uint32_t b = m_digest[1]; - uint32_t c = m_digest[2]; - uint32_t d = m_digest[3]; - uint32_t e = m_digest[4]; - - for (std::size_t i = 0; i < 80; ++i) - { - uint32_t f = 0; - uint32_t k = 0; - - if (i < 20) { - f = (b & c) | (~b & d); - k = 0x5A827999; - } - else if (i < 40) { - f = b ^ c ^ d; - k = 0x6ED9EBA1; - } - else if (i < 60) { - f = (b & c) | (b & d) | (c & d); - k = 0x8F1BBCDC; - } - else { - f = b ^ c ^ d; - k = 0xCA62C1D6; - } - uint32_t temp = left_rotate(a, 5) + f + e + k + w[i]; - e = d; - d = c; - c = left_rotate(b, 30); - b = a; - a = temp; - } - - m_digest[0] += a; - m_digest[1] += b; - m_digest[2] += c; - m_digest[3] += d; - m_digest[4] += e; - } - - private: - digest32_t m_digest; - uint8_t m_block[64]; - size_t m_blockByteIndex; - size_t m_byteCount; - }; - - static std::mt19937 clock_gen(std::random_device{}()); - static std::uniform_int_distribution clock_dis{ -32768, 32767 }; - static std::atomic_short clock_sequence{clock_dis(clock_gen)}; - } - - // -------------------------------------------------------------------------------------------------------------------------- - // UUID format https://tools.ietf.org/html/rfc4122 - // -------------------------------------------------------------------------------------------------------------------------- - - // -------------------------------------------------------------------------------------------------------------------------- - // Field NDR Data Type Octet # Note - // -------------------------------------------------------------------------------------------------------------------------- - // time_low unsigned long 0 - 3 The low field of the timestamp. - // time_mid unsigned short 4 - 5 The middle field of the timestamp. - // time_hi_and_version unsigned short 6 - 7 The high field of the timestamp multiplexed with the version number. - // clock_seq_hi_and_reserved unsigned small 8 The high field of the clock sequence multiplexed with the variant. - // clock_seq_low unsigned small 9 The low field of the clock sequence. - // node character 10 - 15 The spatially unique node identifier. - // -------------------------------------------------------------------------------------------------------------------------- - // 0 1 2 3 - // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - // | time_low | - // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - // | time_mid | time_hi_and_version | - // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - // |clk_seq_hi_res | clk_seq_low | node (0-1) | - // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - // | node (2-5) | - // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - // -------------------------------------------------------------------------------------------------------------------------- - // enumerations - // -------------------------------------------------------------------------------------------------------------------------- - - // indicated by a bit pattern in octet 8, marked with N in xxxxxxxx-xxxx-xxxx-Nxxx-xxxxxxxxxxxx - enum class uuid_variant - { - // NCS backward compatibility (with the obsolete Apollo Network Computing System 1.5 UUID format) - // N bit pattern: 0xxx - // > the first 6 octets of the UUID are a 48-bit timestamp (the number of 4 microsecond units of time since 1 Jan 1980 UTC); - // > the next 2 octets are reserved; - // > the next octet is the "address family"; - // > the final 7 octets are a 56-bit host ID in the form specified by the address family - ncs, - - // RFC 4122/DCE 1.1 - // N bit pattern: 10xx - // > big-endian byte order - rfc, - - // Microsoft Corporation backward compatibility - // N bit pattern: 110x - // > little endian byte order - // > formely used in the Component Object Model (COM) library - microsoft, - - // reserved for possible future definition - // N bit pattern: 111x - reserved - }; - - // indicated by a bit pattern in octet 6, marked with M in xxxxxxxx-xxxx-Mxxx-xxxx-xxxxxxxxxxxx - enum class uuid_version - { - none = 0, // only possible for nil or invalid uuids - time_based = 1, // The time-based version specified in RFC 4122 - dce_security = 2, // DCE Security version, with embedded POSIX UIDs. - name_based_md5 = 3, // The name-based version specified in RFS 4122 with MD5 hashing - random_number_based = 4, // The randomly or pseudo-randomly generated version specified in RFS 4122 - name_based_sha1 = 5 // The name-based version specified in RFS 4122 with SHA1 hashing - }; - - // -------------------------------------------------------------------------------------------------------------------------- - // uuid class - // -------------------------------------------------------------------------------------------------------------------------- - class uuid - { - public: - using value_type = uint8_t; - - constexpr uuid() noexcept : data({}) {}; - - uuid(value_type(&arr)[16]) noexcept - { - std::copy(std::cbegin(arr), std::cend(arr), std::begin(data)); +class sha1 { +public: + using digest32_t = uint32_t[5]; + using digest8_t = uint8_t[20]; + + static constexpr unsigned int block_bytes = 64; + + inline static uint32_t left_rotate(uint32_t value, size_t const count) { + return (value << count) ^ (value >> (32 - count)); + } + + sha1() { reset(); } + + void reset() { + m_digest[0] = 0x67452301; + m_digest[1] = 0xEFCDAB89; + m_digest[2] = 0x98BADCFE; + m_digest[3] = 0x10325476; + m_digest[4] = 0xC3D2E1F0; + m_blockByteIndex = 0; + m_byteCount = 0; + } + + void process_byte(uint8_t octet) { + this->m_block[this->m_blockByteIndex++] = octet; + ++this->m_byteCount; + if (m_blockByteIndex == block_bytes) { + this->m_blockByteIndex = 0; + process_block(); + } + } + + void process_block(void const* const start, void const* const end) { + const uint8_t* begin = static_cast(start); + const uint8_t* finish = static_cast(end); + while (begin != finish) { + process_byte(*begin); + begin++; + } + } + + void process_bytes(void const* const data, size_t const len) { + const uint8_t* block = static_cast(data); + process_block(block, block + len); + } + + uint32_t const* get_digest(digest32_t digest) { + size_t const bitCount = this->m_byteCount * 8; + process_byte(0x80); + if (this->m_blockByteIndex > 56) { + while (m_blockByteIndex != 0) { + process_byte(0); } - - uuid(std::array const & arr) noexcept - { - std::copy(std::cbegin(arr), std::cend(arr), std::begin(data)); + while (m_blockByteIndex < 56) { + process_byte(0); } - - template - explicit uuid(ForwardIterator first, ForwardIterator last) - { - if (std::distance(first, last) == 16) - std::copy(first, last, std::begin(data)); + } else { + while (m_blockByteIndex < 56) { + process_byte(0); } - - constexpr uuid_variant variant() const noexcept - { - if ((data[8] & 0x80) == 0x00) - return uuid_variant::ncs; - else if ((data[8] & 0xC0) == 0x80) - return uuid_variant::rfc; - else if ((data[8] & 0xE0) == 0xC0) - return uuid_variant::microsoft; - else - return uuid_variant::reserved; + } + process_byte(0); + process_byte(0); + process_byte(0); + process_byte(0); + process_byte(static_cast((bitCount >> 24) & 0xFF)); + process_byte(static_cast((bitCount >> 16) & 0xFF)); + process_byte(static_cast((bitCount >> 8) & 0xFF)); + process_byte(static_cast((bitCount)&0xFF)); + + memcpy(digest, m_digest, 5 * sizeof(uint32_t)); + return digest; + } + + uint8_t const* get_digest_bytes(digest8_t digest) { + digest32_t d32; + get_digest(d32); + size_t di = 0; + digest[di++] = (uint8_t)(d32[0] >> 24); + digest[di++] = (uint8_t)(d32[0] >> 16); + digest[di++] = (uint8_t)(d32[0] >> 8); + digest[di++] = (uint8_t)(d32[0] >> 0); + + digest[di++] = (uint8_t)(d32[1] >> 24); + digest[di++] = (uint8_t)(d32[1] >> 16); + digest[di++] = (uint8_t)(d32[1] >> 8); + digest[di++] = (uint8_t)(d32[1] >> 0); + + digest[di++] = (uint8_t)(d32[2] >> 24); + digest[di++] = (uint8_t)(d32[2] >> 16); + digest[di++] = (uint8_t)(d32[2] >> 8); + digest[di++] = (uint8_t)(d32[2] >> 0); + + digest[di++] = (uint8_t)(d32[3] >> 24); + digest[di++] = (uint8_t)(d32[3] >> 16); + digest[di++] = (uint8_t)(d32[3] >> 8); + digest[di++] = (uint8_t)(d32[3] >> 0); + + digest[di++] = (uint8_t)(d32[4] >> 24); + digest[di++] = (uint8_t)(d32[4] >> 16); + digest[di++] = (uint8_t)(d32[4] >> 8); + digest[di++] = (uint8_t)(d32[4] >> 0); + + return digest; + } + +private: + void process_block() { + uint32_t w[80]; + for (size_t i = 0; i < 16; i++) { + w[i] = (m_block[i * 4 + 0] << 24); + w[i] |= (m_block[i * 4 + 1] << 16); + w[i] |= (m_block[i * 4 + 2] << 8); + w[i] |= (m_block[i * 4 + 3]); + } + for (size_t i = 16; i < 80; i++) { + w[i] = left_rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1); + } + + uint32_t a = m_digest[0]; + uint32_t b = m_digest[1]; + uint32_t c = m_digest[2]; + uint32_t d = m_digest[3]; + uint32_t e = m_digest[4]; + + for (std::size_t i = 0; i < 80; ++i) { + uint32_t f = 0; + uint32_t k = 0; + + if (i < 20) { + f = (b & c) | (~b & d); + k = 0x5A827999; + } else if (i < 40) { + f = b ^ c ^ d; + k = 0x6ED9EBA1; + } else if (i < 60) { + f = (b & c) | (b & d) | (c & d); + k = 0x8F1BBCDC; + } else { + f = b ^ c ^ d; + k = 0xCA62C1D6; } - - constexpr uuid_version version() const noexcept - { - if ((data[6] & 0xF0) == 0x10) - return uuid_version::time_based; - else if ((data[6] & 0xF0) == 0x20) - return uuid_version::dce_security; - else if ((data[6] & 0xF0) == 0x30) - return uuid_version::name_based_md5; - else if ((data[6] & 0xF0) == 0x40) - return uuid_version::random_number_based; - else if ((data[6] & 0xF0) == 0x50) - return uuid_version::name_based_sha1; - else - return uuid_version::none; + uint32_t temp = left_rotate(a, 5) + f + e + k + w[i]; + e = d; + d = c; + c = left_rotate(b, 30); + b = a; + a = temp; + } + + m_digest[0] += a; + m_digest[1] += b; + m_digest[2] += c; + m_digest[3] += d; + m_digest[4] += e; + } + +private: + digest32_t m_digest; + uint8_t m_block[64]; + size_t m_blockByteIndex; + size_t m_byteCount; +}; + +static std::mt19937 clock_gen(std::random_device{}()); +static std::uniform_int_distribution clock_dis{-32768, 32767}; +static std::atomic_short clock_sequence{clock_dis(clock_gen)}; +} // namespace detail + +// -------------------------------------------------------------------------------------------------------------------------- +// UUID format https://tools.ietf.org/html/rfc4122 +// -------------------------------------------------------------------------------------------------------------------------- + +// -------------------------------------------------------------------------------------------------------------------------- +// Field NDR Data Type Octet # Note +// -------------------------------------------------------------------------------------------------------------------------- +// time_low unsigned long 0 - 3 The low field of the +// timestamp. +// time_mid unsigned short 4 - 5 The middle field of +// the timestamp. time_hi_and_version unsigned short 6 - 7 The +// high field of the timestamp multiplexed with the version number. +// clock_seq_hi_and_reserved unsigned small 8 The high field of the clock +// sequence multiplexed with the variant. clock_seq_low unsigned small 9 +// The low field of the clock sequence. node character +// 10 - 15 The spatially unique node identifier. +// -------------------------------------------------------------------------------------------------------------------------- +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | time_low | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | time_mid | time_hi_and_version | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |clk_seq_hi_res | clk_seq_low | node (0-1) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | node (2-5) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// -------------------------------------------------------------------------------------------------------------------------- +// enumerations +// -------------------------------------------------------------------------------------------------------------------------- + +// indicated by a bit pattern in octet 8, marked with N in +// xxxxxxxx-xxxx-xxxx-Nxxx-xxxxxxxxxxxx +enum class uuid_variant { + // NCS backward compatibility (with the obsolete Apollo Network Computing System 1.5 + // UUID format) N bit pattern: 0xxx > the first 6 octets of the UUID are a 48-bit + // timestamp (the number of 4 microsecond units of time since 1 Jan 1980 UTC); > the + // next 2 octets are reserved; > the next octet is the "address family"; > the final 7 + // octets are a 56-bit host ID in the form specified by the address family + ncs, + + // RFC 4122/DCE 1.1 + // N bit pattern: 10xx + // > big-endian byte order + rfc, + + // Microsoft Corporation backward compatibility + // N bit pattern: 110x + // > little endian byte order + // > formely used in the Component Object Model (COM) library + microsoft, + + // reserved for possible future definition + // N bit pattern: 111x + reserved +}; + +// indicated by a bit pattern in octet 6, marked with M in +// xxxxxxxx-xxxx-Mxxx-xxxx-xxxxxxxxxxxx +enum class uuid_version { + none = 0, // only possible for nil or invalid uuids + time_based = 1, // The time-based version specified in RFC 4122 + dce_security = 2, // DCE Security version, with embedded POSIX UIDs. + name_based_md5 = 3, // The name-based version specified in RFS 4122 with MD5 hashing + random_number_based = + 4, // The randomly or pseudo-randomly generated version specified in RFS 4122 + name_based_sha1 = 5 // The name-based version specified in RFS 4122 with SHA1 hashing +}; + +// -------------------------------------------------------------------------------------------------------------------------- +// uuid class +// -------------------------------------------------------------------------------------------------------------------------- +class uuid { +public: + using value_type = uint8_t; + + constexpr uuid() noexcept : data({}){}; + + uuid(value_type (&arr)[16]) noexcept { + std::copy(std::cbegin(arr), std::cend(arr), std::begin(data)); + } + + uuid(std::array const& arr) noexcept { + std::copy(std::cbegin(arr), std::cend(arr), std::begin(data)); + } + + template + explicit uuid(ForwardIterator first, ForwardIterator last) { + if (std::distance(first, last) == 16) + std::copy(first, last, std::begin(data)); + } + + constexpr uuid_variant variant() const noexcept { + if ((data[8] & 0x80) == 0x00) + return uuid_variant::ncs; + else if ((data[8] & 0xC0) == 0x80) + return uuid_variant::rfc; + else if ((data[8] & 0xE0) == 0xC0) + return uuid_variant::microsoft; + else + return uuid_variant::reserved; + } + + constexpr uuid_version version() const noexcept { + if ((data[6] & 0xF0) == 0x10) + return uuid_version::time_based; + else if ((data[6] & 0xF0) == 0x20) + return uuid_version::dce_security; + else if ((data[6] & 0xF0) == 0x30) + return uuid_version::name_based_md5; + else if ((data[6] & 0xF0) == 0x40) + return uuid_version::random_number_based; + else if ((data[6] & 0xF0) == 0x50) + return uuid_version::name_based_sha1; + else + return uuid_version::none; + } + + constexpr bool is_nil() const noexcept { + for (size_t i = 0; i < data.size(); ++i) + if (data[i] != 0) + return false; + return true; + } + + void swap(uuid& other) noexcept { data.swap(other.data); } + + inline char const* as_bytes() const { + return reinterpret_cast(data.data()); + } + + template + static bool is_valid_uuid(CharT const* str) noexcept { + bool firstDigit = true; + int hasBraces = 0; + size_t index = 0; + size_t size = 0; + if (std::is_same::value) + size = strlen(str); + else + size = wcslen(str); + + if (str == nullptr || size == 0) + return false; + + if (str[0] == static_cast('{')) + hasBraces = 1; + if (hasBraces && str[size - 1] != static_cast('}')) + return false; + + for (size_t i = hasBraces; i < size - hasBraces; ++i) { + if (str[i] == static_cast('-')) + continue; + + if (index >= 16 || !detail::is_hex(str[i])) { + return false; } - constexpr bool is_nil() const noexcept - { - for (size_t i = 0; i < data.size(); ++i) if (data[i] != 0) return false; - return true; + if (firstDigit) { + firstDigit = false; + } else { + index++; + firstDigit = true; } + } - void swap(uuid & other) noexcept - { - data.swap(other.data); - } + if (index < 16) { + return false; + } - inline char const* as_bytes() const - { - return reinterpret_cast(data.data()); - } + return true; + } - template - static bool is_valid_uuid(CharT const * str) noexcept - { - bool firstDigit = true; - int hasBraces = 0; - size_t index = 0; - size_t size = 0; - if (std::is_same::value) - size = strlen(str); - else - size = wcslen(str); - - if (str == nullptr || size == 0) - return false; - - if (str[0] == static_cast('{')) - hasBraces = 1; - if (hasBraces && str[size - 1] != static_cast('}')) - return false; - - for (size_t i = hasBraces; i < size - hasBraces; ++i) - { - if (str[i] == static_cast('-')) continue; - - if (index >= 16 || !detail::is_hex(str[i])) - { - return false; - } - - if (firstDigit) - { - firstDigit = false; - } - else - { - index++; - firstDigit = true; - } - } - - if (index < 16) - { - return false; - } - - return true; + template , + class Allocator = std::allocator> + static bool + is_valid_uuid(std::basic_string const& str) noexcept { + return is_valid_uuid(str.c_str()); + } + + template + static uuid from_string(CharT const* str) noexcept { + CharT digit = 0; + bool firstDigit = true; + int hasBraces = 0; + size_t index = 0; + size_t size = 0; + if (std::is_same::value) + size = strlen(str); + else + size = wcslen(str); + + std::array data{{0}}; + + if (str == nullptr || size == 0) { + throw std::runtime_error("trying to construct uuid from null or empty char* array"); + } + + if (str[0] == static_cast('{')) + hasBraces = 1; + if (hasBraces && str[size - 1] != static_cast('}')) { + throw std::runtime_error( + "uuid::from_string: input has opening brace but no closing brace"); + } + + for (size_t i = hasBraces; i < size - hasBraces; ++i) { + if (str[i] == static_cast('-')) + continue; + + if (index >= 16 || !detail::is_hex(str[i])) { + throw std::runtime_error( + "uuid::from_string: too many characters or character not a hex character"); } - template, - class Allocator = std::allocator> - static bool is_valid_uuid(std::basic_string const & str) noexcept - { - return is_valid_uuid(str.c_str()); + if (firstDigit) { + digit = str[i]; + firstDigit = false; + } else { + data[index++] = detail::hexpair2char(digit, str[i]); + firstDigit = true; } + } - template - static uuid from_string(CharT const * str) noexcept - { - CharT digit = 0; - bool firstDigit = true; - int hasBraces = 0; - size_t index = 0; - size_t size = 0; - if (std::is_same::value) - size = strlen(str); - else - size = wcslen(str); - - std::array data{ { 0 } }; - - if (str == nullptr || size == 0) - { - throw std::runtime_error( - "trying to construct uuid from null or empty char* array" - ); - } - - if (str[0] == static_cast('{')) - hasBraces = 1; - if (hasBraces && str[size - 1] != static_cast('}')) - { - throw std::runtime_error( - "uuid::from_string: input has opening brace but no closing brace" - ); - } - - - for (size_t i = hasBraces; i < size - hasBraces; ++i) - { - if (str[i] == static_cast('-')) continue; - - if (index >= 16 || !detail::is_hex(str[i])) - { - throw std::runtime_error( - "uuid::from_string: too many characters or character not a hex character" - ); - } - - if (firstDigit) - { - digit = str[i]; - firstDigit = false; - } - else - { - data[index++] = detail::hexpair2char(digit, str[i]); - firstDigit = true; - } - } - - if (index < 16) - { - throw std::runtime_error( - "uuid::from_string: not enough characters in input" - ); - } - - return uuid{ std::cbegin(data), std::cend(data) }; - } + if (index < 16) { + throw std::runtime_error("uuid::from_string: not enough characters in input"); + } - template, - class Allocator = std::allocator> - static uuid from_string(std::basic_string const & str) noexcept - { - return from_string(str.c_str()); - } + return uuid{std::cbegin(data), std::cend(data)}; + } - private: - std::array data{ { 0 } }; - - friend bool operator==(uuid const & lhs, uuid const & rhs) noexcept; - friend bool operator<(uuid const & lhs, uuid const & rhs) noexcept; - - template - friend std::basic_ostream & operator<<(std::basic_ostream &s, uuid const & id); - }; - - // -------------------------------------------------------------------------------------------------------------------------- - // operators and non-member functions - // -------------------------------------------------------------------------------------------------------------------------- - - inline bool operator== (uuid const& lhs, uuid const& rhs) noexcept - { - return lhs.data == rhs.data; - } - - inline bool operator!= (uuid const& lhs, uuid const& rhs) noexcept - { - return !(lhs == rhs); - } - - inline bool operator< (uuid const& lhs, uuid const& rhs) noexcept - { - return lhs.data < rhs.data; - } - - template - std::basic_ostream & operator<<(std::basic_ostream &s, uuid const & id) - { - // save current flags - std::ios_base::fmtflags f(s.flags()); - - // manipulate stream as needed - s << std::hex << std::setfill(static_cast('0')) - << std::setw(2) << (int)id.data[0] - << std::setw(2) << (int)id.data[1] - << std::setw(2) << (int)id.data[2] - << std::setw(2) << (int)id.data[3] - << '-' - << std::setw(2) << (int)id.data[4] - << std::setw(2) << (int)id.data[5] - << '-' - << std::setw(2) << (int)id.data[6] - << std::setw(2) << (int)id.data[7] - << '-' - << std::setw(2) << (int)id.data[8] - << std::setw(2) << (int)id.data[9] - << '-' - << std::setw(2) << (int)id.data[10] - << std::setw(2) << (int)id.data[11] - << std::setw(2) << (int)id.data[12] - << std::setw(2) << (int)id.data[13] - << std::setw(2) << (int)id.data[14] - << std::setw(2) << (int)id.data[15]; - - // restore original flags - s.flags(f); - - return s; - } - - template, + template , class Allocator = std::allocator> - inline std::basic_string to_string(uuid const & id) - { - std::basic_stringstream sstr; - sstr << id; - return sstr.str(); - } + static uuid + from_string(std::basic_string const& str) noexcept { + return from_string(str.c_str()); + } - inline void swap(uuids::uuid & lhs, uuids::uuid & rhs) noexcept - { - lhs.swap(rhs); - } +private: + std::array data{{0}}; - // -------------------------------------------------------------------------------------------------------------------------- - // namespace IDs that could be used for generating name-based uuids - // -------------------------------------------------------------------------------------------------------------------------- + friend bool operator==(uuid const& lhs, uuid const& rhs) noexcept; + friend bool operator<(uuid const& lhs, uuid const& rhs) noexcept; - // Name string is a fully-qualified domain name - static uuid uuid_namespace_dns{ {0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} }; + template + friend std::basic_ostream& operator<<(std::basic_ostream& s, + uuid const& id); +}; - // Name string is a URL - static uuid uuid_namespace_url{ {0x6b, 0xa7, 0xb8, 0x11, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} }; +// -------------------------------------------------------------------------------------------------------------------------- +// operators and non-member functions +// -------------------------------------------------------------------------------------------------------------------------- - // Name string is an ISO OID (See https://oidref.com/, https://en.wikipedia.org/wiki/Object_identifier) - static uuid uuid_namespace_oid{ {0x6b, 0xa7, 0xb8, 0x12, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} }; +inline bool operator==(uuid const& lhs, uuid const& rhs) noexcept { + return lhs.data == rhs.data; +} - // Name string is an X.500 DN, in DER or a text output format (See https://en.wikipedia.org/wiki/X.500, https://en.wikipedia.org/wiki/Abstract_Syntax_Notation_One) - static uuid uuid_namespace_x500{ {0x6b, 0xa7, 0xb8, 0x14, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} }; +inline bool operator!=(uuid const& lhs, uuid const& rhs) noexcept { + return !(lhs == rhs); +} - // -------------------------------------------------------------------------------------------------------------------------- - // uuid generators - // -------------------------------------------------------------------------------------------------------------------------- +inline bool operator<(uuid const& lhs, uuid const& rhs) noexcept { + return lhs.data < rhs.data; +} + +template +std::basic_ostream& operator<<(std::basic_ostream& s, + uuid const& id) { + // save current flags + std::ios_base::fmtflags f(s.flags()); + + // manipulate stream as needed + s << std::hex << std::setfill(static_cast('0')) << std::setw(2) << (int)id.data[0] + << std::setw(2) << (int)id.data[1] << std::setw(2) << (int)id.data[2] << std::setw(2) + << (int)id.data[3] << '-' << std::setw(2) << (int)id.data[4] << std::setw(2) + << (int)id.data[5] << '-' << std::setw(2) << (int)id.data[6] << std::setw(2) + << (int)id.data[7] << '-' << std::setw(2) << (int)id.data[8] << std::setw(2) + << (int)id.data[9] << '-' << std::setw(2) << (int)id.data[10] << std::setw(2) + << (int)id.data[11] << std::setw(2) << (int)id.data[12] << std::setw(2) + << (int)id.data[13] << std::setw(2) << (int)id.data[14] << std::setw(2) + << (int)id.data[15]; + + // restore original flags + s.flags(f); + + return s; +} + +template , + class Allocator = std::allocator> +inline std::basic_string to_string(uuid const& id) { + std::basic_stringstream sstr; + sstr << id; + return sstr.str(); +} + +inline void swap(uuids::uuid& lhs, uuids::uuid& rhs) noexcept { lhs.swap(rhs); } + +// -------------------------------------------------------------------------------------------------------------------------- +// namespace IDs that could be used for generating name-based uuids +// -------------------------------------------------------------------------------------------------------------------------- + +// Name string is a fully-qualified domain name +static uuid uuid_namespace_dns{{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, + 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}}; + +// Name string is a URL +static uuid uuid_namespace_url{{0x6b, 0xa7, 0xb8, 0x11, 0x9d, 0xad, 0x11, 0xd1, 0x80, + 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}}; + +// Name string is an ISO OID (See https://oidref.com/, +// https://en.wikipedia.org/wiki/Object_identifier) +static uuid uuid_namespace_oid{{0x6b, 0xa7, 0xb8, 0x12, 0x9d, 0xad, 0x11, 0xd1, 0x80, + 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}}; + +// Name string is an X.500 DN, in DER or a text output format (See +// https://en.wikipedia.org/wiki/X.500, +// https://en.wikipedia.org/wiki/Abstract_Syntax_Notation_One) +static uuid uuid_namespace_x500{{0x6b, 0xa7, 0xb8, 0x14, 0x9d, 0xad, 0x11, 0xd1, 0x80, + 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}}; + +// -------------------------------------------------------------------------------------------------------------------------- +// uuid generators +// -------------------------------------------------------------------------------------------------------------------------- #ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR - class uuid_system_generator - { - public: - using result_type = uuid; +class uuid_system_generator { +public: + using result_type = uuid; - uuid operator()() - { + uuid operator()() { #ifdef _WIN32 - GUID newId; - ::CoCreateGuid(&newId); + GUID newId; + ::CoCreateGuid(&newId); - std::array bytes = - { { - (unsigned char)((newId.Data1 >> 24) & 0xFF), - (unsigned char)((newId.Data1 >> 16) & 0xFF), - (unsigned char)((newId.Data1 >> 8) & 0xFF), - (unsigned char)((newId.Data1) & 0xFF), + std::array bytes = {{(unsigned char)((newId.Data1 >> 24) & 0xFF), + (unsigned char)((newId.Data1 >> 16) & 0xFF), + (unsigned char)((newId.Data1 >> 8) & 0xFF), + (unsigned char)((newId.Data1) & 0xFF), - (unsigned char)((newId.Data2 >> 8) & 0xFF), - (unsigned char)((newId.Data2) & 0xFF), + (unsigned char)((newId.Data2 >> 8) & 0xFF), + (unsigned char)((newId.Data2) & 0xFF), - (unsigned char)((newId.Data3 >> 8) & 0xFF), - (unsigned char)((newId.Data3) & 0xFF), + (unsigned char)((newId.Data3 >> 8) & 0xFF), + (unsigned char)((newId.Data3) & 0xFF), - newId.Data4[0], - newId.Data4[1], - newId.Data4[2], - newId.Data4[3], - newId.Data4[4], - newId.Data4[5], - newId.Data4[6], - newId.Data4[7] - } }; + newId.Data4[0], newId.Data4[1], newId.Data4[2], + newId.Data4[3], newId.Data4[4], newId.Data4[5], + newId.Data4[6], newId.Data4[7]}}; - return uuid{ std::begin(bytes), std::end(bytes) }; + return uuid{std::begin(bytes), std::end(bytes)}; #elif defined(__linux__) || defined(__unix__) - uuid_t id; - uuid_generate(id); - - std::array bytes = - { { - id[0], - id[1], - id[2], - id[3], - id[4], - id[5], - id[6], - id[7], - id[8], - id[9], - id[10], - id[11], - id[12], - id[13], - id[14], - id[15] - } }; - - return uuid{ std::begin(bytes), std::end(bytes) }; + uuid_t id; + uuid_generate(id); + + std::array bytes = {{id[0], id[1], id[2], id[3], id[4], id[5], id[6], + id[7], id[8], id[9], id[10], id[11], id[12], id[13], + id[14], id[15]}}; + + return uuid{std::begin(bytes), std::end(bytes)}; #elif defined(__APPLE__) - auto newId = CFUUIDCreate(NULL); - auto bytes = CFUUIDGetUUIDBytes(newId); - CFRelease(newId); - - std::array arrbytes = - { { - bytes.byte0, - bytes.byte1, - bytes.byte2, - bytes.byte3, - bytes.byte4, - bytes.byte5, - bytes.byte6, - bytes.byte7, - bytes.byte8, - bytes.byte9, - bytes.byte10, - bytes.byte11, - bytes.byte12, - bytes.byte13, - bytes.byte14, - bytes.byte15 - } }; - return uuid{ std::begin(arrbytes), std::end(arrbytes) }; + auto newId = CFUUIDCreate(NULL); + auto bytes = CFUUIDGetUUIDBytes(newId); + CFRelease(newId); + + std::array arrbytes = { + {bytes.byte0, bytes.byte1, bytes.byte2, bytes.byte3, bytes.byte4, bytes.byte5, + bytes.byte6, bytes.byte7, bytes.byte8, bytes.byte9, bytes.byte10, bytes.byte11, + bytes.byte12, bytes.byte13, bytes.byte14, bytes.byte15}}; + return uuid{std::begin(arrbytes), std::end(arrbytes)}; #else - return uuid{}; + return uuid{}; #endif - } - }; + } +}; #endif - template - class basic_uuid_random_generator - { - public: - using engine_type = UniformRandomNumberGenerator; +template +class basic_uuid_random_generator { +public: + using engine_type = UniformRandomNumberGenerator; - explicit basic_uuid_random_generator(engine_type& gen) : - generator(&gen, [](auto) {}) {} - explicit basic_uuid_random_generator(engine_type* gen) : - generator(gen, [](auto) {}) {} + explicit basic_uuid_random_generator(engine_type& gen) : generator(&gen, [](auto) {}) {} + explicit basic_uuid_random_generator(engine_type* gen) : generator(gen, [](auto) {}) {} - uuid operator()() - { - uint8_t bytes[16]; - for (int i = 0; i < 16; i += 4) - *reinterpret_cast(bytes + i) = distribution(*generator); + uuid operator()() { + uint8_t bytes[16]; + for (int i = 0; i < 16; i += 4) + *reinterpret_cast(bytes + i) = distribution(*generator); - // variant must be 10xxxxxx - bytes[8] &= 0xBF; - bytes[8] |= 0x80; + // variant must be 10xxxxxx + bytes[8] &= 0xBF; + bytes[8] |= 0x80; - // version must be 0100xxxx - bytes[6] &= 0x4F; - bytes[6] |= 0x40; + // version must be 0100xxxx + bytes[6] &= 0x4F; + bytes[6] |= 0x40; - return uuid{std::begin(bytes), std::end(bytes)}; - } + return uuid{std::begin(bytes), std::end(bytes)}; + } - private: - std::uniform_int_distribution distribution; - std::shared_ptr generator; - }; - - using uuid_random_generator = basic_uuid_random_generator; - - class uuid_name_generator - { - public: - explicit uuid_name_generator(uuid const& namespace_uuid) noexcept - : nsuuid(namespace_uuid) - {} - - template - uuid operator()(CharT const * name) - { - size_t size = 0; - if (std::is_same::value) - size = strlen(name); - else - size = wcslen(name); - - reset(); - process_characters(name, size); - return make_uuid(); - } +private: + std::uniform_int_distribution distribution; + std::shared_ptr generator; +}; - template, - class Allocator = std::allocator> - uuid operator()(std::basic_string const & name) - { - reset(); - process_characters(name.data(), name.size()); - return make_uuid(); - } - - private: - void reset() - { - hasher.reset(); - char bytes[16]; - auto nsbytes = nsuuid.as_bytes(); - strncpy(bytes, nsbytes, 16); - hasher.process_bytes(bytes, 16); - } - - template ::value>> - void process_characters(char_type const * const characters, size_t const count) - { - for (size_t i = 0; i < count; i++) - { - uint32_t c = characters[i]; - hasher.process_byte(static_cast((c >> 0) & 0xFF)); - hasher.process_byte(static_cast((c >> 8) & 0xFF)); - hasher.process_byte(static_cast((c >> 16) & 0xFF)); - hasher.process_byte(static_cast((c >> 24) & 0xFF)); - } - } +using uuid_random_generator = basic_uuid_random_generator; - void process_characters(const char * const characters, size_t const count) - { - hasher.process_bytes(characters, count); - } - - uuid make_uuid() - { - detail::sha1::digest8_t digest; - hasher.get_digest_bytes(digest); +class uuid_name_generator { +public: + explicit uuid_name_generator(uuid const& namespace_uuid) noexcept + : nsuuid(namespace_uuid) {} - // variant must be 0b10xxxxxx - digest[8] &= 0xBF; - digest[8] |= 0x80; + template + uuid operator()(CharT const* name) { + size_t size = 0; + if (std::is_same::value) + size = strlen(name); + else + size = wcslen(name); - // version must be 0b0101xxxx - digest[6] &= 0x5F; - digest[6] |= 0x50; + reset(); + process_characters(name, size); + return make_uuid(); + } - return uuid{ digest, digest + 16 }; - } + template , + class Allocator = std::allocator> + uuid operator()(std::basic_string const& name) { + reset(); + process_characters(name.data(), name.size()); + return make_uuid(); + } + +private: + void reset() { + hasher.reset(); + char bytes[16]; + auto nsbytes = nsuuid.as_bytes(); + strncpy(bytes, nsbytes, 16); + hasher.process_bytes(bytes, 16); + } + + template ::value>> + void process_characters(char_type const* const characters, size_t const count) { + for (size_t i = 0; i < count; i++) { + uint32_t c = characters[i]; + hasher.process_byte(static_cast((c >> 0) & 0xFF)); + hasher.process_byte(static_cast((c >> 8) & 0xFF)); + hasher.process_byte(static_cast((c >> 16) & 0xFF)); + hasher.process_byte(static_cast((c >> 24) & 0xFF)); + } + } + + void process_characters(const char* const characters, size_t const count) { + hasher.process_bytes(characters, count); + } + + uuid make_uuid() { + detail::sha1::digest8_t digest; + hasher.get_digest_bytes(digest); + + // variant must be 0b10xxxxxx + digest[8] &= 0xBF; + digest[8] |= 0x80; + + // version must be 0b0101xxxx + digest[6] &= 0x5F; + digest[6] |= 0x50; + + return uuid{digest, digest + 16}; + } + +private: + uuid nsuuid; + detail::sha1 hasher; +}; + +// !!! DO NOT USE THIS IN PRODUCTION +// this implementation is unreliable for good uuids +class uuid_time_generator { + using mac_address = std::array; + + mac_address device_address; + bool has_mac_address = false; + + bool get_mac_address() { + if (has_mac_address) { + return true; + } - private: - uuid nsuuid; - detail::sha1 hasher; - }; - - // !!! DO NOT USE THIS IN PRODUCTION - // this implementation is unreliable for good uuids - class uuid_time_generator - { - using mac_address = std::array; - - mac_address device_address; - bool has_mac_address = false; - - bool get_mac_address() - { - if (has_mac_address) - { - return true; - } - #ifdef _WIN32 - DWORD len = 0; - auto ret = GetAdaptersInfo(nullptr, &len); - if (ret != ERROR_BUFFER_OVERFLOW) return false; - std::vector buf(len); - auto pips = reinterpret_cast(&buf.front()); - ret = GetAdaptersInfo(pips, &len); - if (ret != ERROR_SUCCESS) return false; - mac_address addr; - std::copy(pips->Address, pips->Address + 6, std::begin(addr)); - device_address = addr; - has_mac_address = true + DWORD len = 0; + auto ret = GetAdaptersInfo(nullptr, &len); + if (ret != ERROR_BUFFER_OVERFLOW) + return false; + std::vector buf(len); + auto pips = reinterpret_cast(&buf.front()); + ret = GetAdaptersInfo(pips, &len); + if (ret != ERROR_SUCCESS) + return false; + mac_address addr; + std::copy(pips->Address, pips->Address + 6, std::begin(addr)); + device_address = addr; + has_mac_address = true #endif - return has_mac_address; - } + return has_mac_address; + } - long long get_time_intervals() - { - auto start = std::chrono::system_clock::from_time_t(time_t(-12219292800)); - auto diff = std::chrono::system_clock::now() - start; - auto ns = std::chrono::duration_cast(diff).count(); - return ns / 100; - } + long long get_time_intervals() { + auto start = std::chrono::system_clock::from_time_t(time_t(-12219292800)); + auto diff = std::chrono::system_clock::now() - start; + auto ns = std::chrono::duration_cast(diff).count(); + return ns / 100; + } - public: - uuid_time_generator() - { - } +public: + uuid_time_generator() {} - uuid operator()() - { - if (get_mac_address()) - { - std::array data; + uuid operator()() { + if (get_mac_address()) { + std::array data; - auto tm = get_time_intervals(); + auto tm = get_time_intervals(); - short clock_seq = detail::clock_sequence++; + short clock_seq = detail::clock_sequence++; - clock_seq &= 0x3FFF; + clock_seq &= 0x3FFF; - auto ptm = reinterpret_cast(&tm); - ptm[0] &= 0x0F; + auto ptm = reinterpret_cast(&tm); + ptm[0] &= 0x0F; - memcpy(&data[0], ptm + 4, 4); - memcpy(&data[4], ptm + 2, 2); - memcpy(&data[6], ptm, 2); + memcpy(&data[0], ptm + 4, 4); + memcpy(&data[4], ptm + 2, 2); + memcpy(&data[6], ptm, 2); - memcpy(&data[8], reinterpret_cast(&clock_seq), 2); + memcpy(&data[8], reinterpret_cast(&clock_seq), 2); - // variant must be 0b10xxxxxx - data[8] &= 0xBF; - data[8] |= 0x80; + // variant must be 0b10xxxxxx + data[8] &= 0xBF; + data[8] |= 0x80; - // version must be 0b0001xxxx - data[6] &= 0x5F; - data[6] |= 0x10; + // version must be 0b0001xxxx + data[6] &= 0x5F; + data[6] |= 0x10; - memcpy(&data[10], &device_address[0], 6); - has_mac_address = true; + memcpy(&data[10], &device_address[0], 6); + has_mac_address = true; - return uuids::uuid{std::cbegin(data), std::cend(data)}; - } + return uuids::uuid{std::cbegin(data), std::cend(data)}; + } - throw std::runtime_error("Could not get MAC address"); - } - }; -} + throw std::runtime_error("Could not get MAC address"); + } +}; +} // namespace uuids -namespace std -{ - template <> - struct hash - { - using argument_type = uuids::uuid; - using result_type = std::size_t; - - result_type operator()(argument_type const &uuid) const - { - std::hash hasher; - return static_cast(hasher(uuids::to_string(uuid))); - } - }; -} +namespace std { +template <> +struct hash { + using argument_type = uuids::uuid; + using result_type = std::size_t; + + result_type operator()(argument_type const& uuid) const { + std::hash hasher; + return static_cast(hasher(uuids::to_string(uuid))); + } +}; +} // namespace std #endif // BOUT_UUID_H diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 1c51016db7..d571b80c8b 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -541,7 +541,7 @@ std::string Solver::createRunId() const { result = uuids::to_string(gen()); } else { std::random_device rd; - auto seed_data = std::array {}; + auto seed_data = std::array{}; std::generate(std::begin(seed_data), std::end(seed_data), std::ref(rd)); std::seed_seq seq(std::begin(seed_data), std::end(seed_data)); std::mt19937 generator(seq); From 183747d4ca4ef34f0e68372f15d303b4cbf8578b Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 4 Feb 2021 16:21:55 +0000 Subject: [PATCH 137/428] Fix build when system UUID generator isn't available --- configure | 4 ++-- configure.ac | 2 +- include/bout/sys/uuid.h | 8 ++++---- src/solver/solver.cxx | 25 ++++++++++++------------- 4 files changed, 19 insertions(+), 20 deletions(-) diff --git a/configure b/configure index 793b485aff..0e456cfce0 100755 --- a/configure +++ b/configure @@ -12176,8 +12176,6 @@ if test ".$with_system_uuid" = "no"; then : else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for system UUID generator" >&5 -$as_echo_n "checking for system UUID generator... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid/uuid.h" >&5 $as_echo_n "checking for uuid/uuid.h... " >&6; } @@ -17947,6 +17945,8 @@ $as_echo "$as_me: Scorep support : $HAS_SCOREP" >&6;} $as_echo "$as_me: OpenMP support : $HAS_OPENMP (schedule: $OPENMP_SCHEDULE)" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: Natural language support: $HAS_NLS (path: $localedir)" >&5 $as_echo "$as_me: Natural language support: $HAS_NLS (path: $localedir)" >&6;} +{ $as_echo "$as_me:${as_lineno-$LINENO}: System UUID generator : $BOUT_HAS_UUID_SYSTEM_GENERATOR" >&5 +$as_echo "$as_me: System UUID generator : $BOUT_HAS_UUID_SYSTEM_GENERATOR" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: " >&5 $as_echo "$as_me: " >&6;} diff --git a/configure.ac b/configure.ac index f8615c55f0..5a780f0a46 100644 --- a/configure.ac +++ b/configure.ac @@ -1190,7 +1190,6 @@ AS_IF([test "$SYSTEM_HAS_MPARK" = "yes"], [ AS_IF([test ".$with_system_uuid" = "no"], [ BOUT_HAS_UUID_SYSTEM_GENERATOR=no ], [ - AC_MSG_CHECKING([for system UUID generator]) BOUT_ADDPATH_CHECK_HEADER(uuid/uuid.h, BOUT_ADDPATH_CHECK_LIB(uuid, uuid_generate, BOUT_HAS_UUID_SYSTEM_GENERATOR=yes, @@ -1433,6 +1432,7 @@ AC_MSG_NOTICE([ Lapack support : $HAS_LAPACK]) AC_MSG_NOTICE([ Scorep support : $HAS_SCOREP]) AC_MSG_NOTICE([ OpenMP support : $HAS_OPENMP (schedule: $OPENMP_SCHEDULE)]) AC_MSG_NOTICE([ Natural language support: $HAS_NLS (path: $localedir)]) +AC_MSG_NOTICE([ System UUID generator : $BOUT_HAS_UUID_SYSTEM_GENERATOR]) AC_MSG_NOTICE([]) AC_MSG_NOTICE([-------------------------------]) diff --git a/include/bout/sys/uuid.h b/include/bout/sys/uuid.h index 2396697e0a..c9c6ba9237 100644 --- a/include/bout/sys/uuid.h +++ b/include/bout/sys/uuid.h @@ -49,7 +49,7 @@ #define NOMINMAX #endif -#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR +#if BOUT_HAS_UUID_SYSTEM_GENERATOR #include #endif @@ -60,13 +60,13 @@ #elif defined(__linux__) || defined(__unix__) -#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR +#if BOUT_HAS_UUID_SYSTEM_GENERATOR #include #endif #elif defined(__APPLE__) -#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR +#if BOUT_HAS_UUID_SYSTEM_GENERATOR #include #endif @@ -593,7 +593,7 @@ static uuid uuid_namespace_x500{{0x6b, 0xa7, 0xb8, 0x14, 0x9d, 0xad, 0x11, 0xd1, // uuid generators // -------------------------------------------------------------------------------------------------------------------------- -#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR +#if BOUT_HAS_UUID_SYSTEM_GENERATOR class uuid_system_generator { public: using result_type = uuid; diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index d571b80c8b..b762cd3c2e 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -536,19 +536,18 @@ std::string Solver::createRunId() const { if (MYPE == 0) { // Generate a unique ID for this run - if (bout::build::has_uuid_system_generator) { - uuids::uuid_system_generator gen{}; - result = uuids::to_string(gen()); - } else { - std::random_device rd; - auto seed_data = std::array{}; - std::generate(std::begin(seed_data), std::end(seed_data), std::ref(rd)); - std::seed_seq seq(std::begin(seed_data), std::end(seed_data)); - std::mt19937 generator(seq); - uuids::uuid_random_generator gen{generator}; - - result = uuids::to_string(gen()); - } +#if BOUT_HAS_UUID_SYSTEM_GENERATOR + uuids::uuid_system_generator gen{}; +#else + std::random_device rd; + auto seed_data = std::array{}; + std::generate(std::begin(seed_data), std::end(seed_data), std::ref(rd)); + std::seed_seq seq(std::begin(seed_data), std::end(seed_data)); + std::mt19937 generator(seq); + uuids::uuid_random_generator gen{generator}; +#endif + + result = uuids::to_string(gen()); } // All ranks have same run_id From ee0609ad0adae7669bade029048035c9e6bd29d8 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 4 Feb 2021 16:26:06 +0000 Subject: [PATCH 138/428] Move UUID header include to solver implementation file --- include/bout/solver.hxx | 1 - src/solver/solver.cxx | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index d8d3318151..bc1ef5eb4f 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -42,7 +42,6 @@ #include "options.hxx" #include "unused.hxx" #include "bout/monitor.hxx" -#include "bout/sys/uuid.h" #include diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index b762cd3c2e..c2b6e14271 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -33,6 +33,7 @@ #include "bout/region.hxx" #include "bout/solverfactory.hxx" #include "bout/sys/timer.hxx" +#include "bout/sys/uuid.h" #include #include From 1366adfa2d709db05aa5f0dd44b8559d0516c5ba Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 4 Feb 2021 16:46:56 +0000 Subject: [PATCH 139/428] Ignore run IDs in test-squash --- tests/integrated/test-squash/runtest | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/integrated/test-squash/runtest b/tests/integrated/test-squash/runtest index 691f7be057..d5b309c372 100755 --- a/tests/integrated/test-squash/runtest +++ b/tests/integrated/test-squash/runtest @@ -6,8 +6,10 @@ import time import numpy as np from boututils.run_wrapper import launch_safe, shell_safe -#requires: all_tests -#requires: netcdf +# requires: all_tests +# requires: netcdf + +IGNORED_VARS_PATTERN = re.compile("(wtime|ncalls|run_id|run_restart_from).*") class timer(object): @@ -53,7 +55,7 @@ def verify(f1, f2): raise RuntimeError("shape mismatch in ", v, d1[v], d2[v]) if v in ["MXSUB", "MYSUB", "NXPE", "NYPE", "iteration","wall_time"]: continue - if v.startswith("wtime") or v.startswith("ncalls"): + if IGNORED_VARS_PATTERN.match(v): continue if not np.allclose(d1[v], d2[v], equal_nan=True): err = "" From 2b388da28b30cb7725a4448f4c9a586276949646 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 4 Feb 2021 16:54:34 +0000 Subject: [PATCH 140/428] Preserve formatting in uuid header --- include/bout/sys/uuid.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/include/bout/sys/uuid.h b/include/bout/sys/uuid.h index c9c6ba9237..9b8f9d4aba 100644 --- a/include/bout/sys/uuid.h +++ b/include/bout/sys/uuid.h @@ -271,18 +271,16 @@ static std::atomic_short clock_sequence{clock_dis(clock_gen)}; // UUID format https://tools.ietf.org/html/rfc4122 // -------------------------------------------------------------------------------------------------------------------------- +// clang-format off // -------------------------------------------------------------------------------------------------------------------------- -// Field NDR Data Type Octet # Note +// Field NDR Data Type Octet # Note // -------------------------------------------------------------------------------------------------------------------------- -// time_low unsigned long 0 - 3 The low field of the -// timestamp. -// time_mid unsigned short 4 - 5 The middle field of -// the timestamp. time_hi_and_version unsigned short 6 - 7 The -// high field of the timestamp multiplexed with the version number. -// clock_seq_hi_and_reserved unsigned small 8 The high field of the clock -// sequence multiplexed with the variant. clock_seq_low unsigned small 9 -// The low field of the clock sequence. node character -// 10 - 15 The spatially unique node identifier. +// time_low unsigned long 0 - 3 The low field of the timestamp. +// time_mid unsigned short 4 - 5 The middle field of the timestamp. +// time_hi_and_version unsigned short 6 - 7 The high field of the timestamp multiplexed with the version number. +// clock_seq_hi_and_reserved unsigned small 8 The high field of the clock sequence multiplexed with the variant. +// clock_seq_low unsigned small 9 The low field of the clock sequence. +// node character 10 - 15 The spatially unique node identifier. // -------------------------------------------------------------------------------------------------------------------------- // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 @@ -295,6 +293,7 @@ static std::atomic_short clock_sequence{clock_dis(clock_gen)}; // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // | node (2-5) | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// clang-format on // -------------------------------------------------------------------------------------------------------------------------- // enumerations From 0db3913921812dc3524f743fd776ea26bf17a671 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 4 Feb 2021 17:17:52 +0000 Subject: [PATCH 141/428] CMake: Fix fallback when libuuid not available --- CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 360227e50c..5486e98b4a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -562,6 +562,7 @@ if (BOUT_USE_UUID_SYSTEM_GENERATOR) PUBLIC Libuuid::libuuid) else() message(STATUS "libuuid not found, using fallback UUID generator") + set(BOUT_USE_UUID_SYSTEM_GENERATOR FALSE) endif() endif() message(STATUS "UUID_SYSTEM_GENERATOR: ${BOUT_USE_UUID_SYSTEM_GENERATOR}") @@ -716,6 +717,7 @@ message(" OpenMP support : ${BOUT_USE_OPENMP} Natural language support : ${BOUT_HAS_GETTEXT} ScoreP support : ${BOUT_HAS_SCOREP} + System UUID generator : ${BOUT_HAS_UUID_SYSTEM_GENERATOR} Extra debug output : ${BOUT_USE_OUTPUT_DEBUG} CHECK level : ${BOUT_CHECK_LEVEL} Signal handling : ${BOUT_USE_SIGNAL} From 6fab37b9fccf1824e8c7a441d8457254ad12f564 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 4 Feb 2021 17:22:53 +0000 Subject: [PATCH 142/428] Don't use const_cast to broadcast run_id --- src/solver/solver.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index c2b6e14271..d9635496a9 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -553,7 +553,7 @@ std::string Solver::createRunId() const { // All ranks have same run_id // Standard representation of UUID is always 36 characters - MPI_Bcast(const_cast(result.data()), 36, MPI_CHAR, 0, BoutComm::get()); + MPI_Bcast(&result[0], 36, MPI_CHAR, 0, BoutComm::get()); return result; } From c7dbca271dcb6541d897cc8b73f314d16b662cfc Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 5 Feb 2021 12:05:05 +0000 Subject: [PATCH 143/428] Make solver run_id private with public getter Likewise for run_restart_from Also make createRunID public --- include/bout/solver.hxx | 23 ++++++++++++++--------- src/solver/solver.cxx | 4 ++-- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index bc1ef5eb4f..9e5dfdc24f 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -319,6 +319,13 @@ public: pargv = &v; } + /// Generate a random UUID (version 4) and broadcast it to all processors + std::string createRunID() const; + /// A unique identifier for this run + std::string getRunID() const { return run_id; } + /// The run from which this was restarted. + std::string getRunRestartFrom() const { return run_restart_from; } + protected: /// Number of command-line arguments static int* pargc; @@ -409,15 +416,6 @@ protected: /// Current iteration (output time-step) number int iteration{0}; - /// Randomly generated run ID - /// Initialise with 36 characters so the allocated array is the right size - /// Use 'z' because it is not a valid hex character, so this is an invalid UUID - std::string run_id = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"; - /// The run from which this was restarted. - std::string run_restart_from = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; - /// Generate a random UUID (version 4) and broadcast it to all processors - std::string createRunId() const; - /// Run the user's RHS function int run_rhs(BoutReal t); /// Calculate only the convective parts @@ -469,6 +467,13 @@ protected: auto getMonitors() const -> const std::list& { return monitors; } private: + /// Randomly generated run ID + /// Initialise with 36 characters so the allocated array is the right size + /// Use 'z' because it is not a valid hex character, so this is an invalid UUID + std::string run_id = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"; + /// The run from which this was restarted. + std::string run_restart_from = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; + /// Number of calls to the RHS function int rhs_ncalls{0}; /// Number of calls to the explicit (convective) RHS function diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index d9635496a9..a51b642274 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -456,7 +456,7 @@ int Solver::solve(int NOUT, BoutReal TIMESTEP) { // Set the run ID run_restart_from = run_id; // Restarting from the previous run ID - run_id = createRunId(); + run_id = createRunID(); // Put the run ID into the options tree // Forcing in case the value has been previously set @@ -530,7 +530,7 @@ int Solver::solve(int NOUT, BoutReal TIMESTEP) { return status; } -std::string Solver::createRunId() const { +std::string Solver::createRunID() const { std::string result; result.resize(36); From c153260ff92ea5a02d2c4acfd0697ca5709959ba Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 5 Feb 2021 12:06:19 +0000 Subject: [PATCH 144/428] Install uuid-dev for clang-tidy-review --- .github/workflows/clang-tidy-review.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/clang-tidy-review.yml b/.github/workflows/clang-tidy-review.yml index c3310a948d..072902d340 100644 --- a/.github/workflows/clang-tidy-review.yml +++ b/.github/workflows/clang-tidy-review.yml @@ -49,5 +49,6 @@ jobs: id: review with: build_dir: build - apt_packages: "libfftw3-dev,libnetcdf-c++4-dev,libhdf5-mpi-dev,libopenmpi-dev,petsc-dev,slepc-dev,liblapack-dev,libparpack2-dev,libsundials-dev" + apt_packages: "libfftw3-dev,libnetcdf-c++4-dev,libhdf5-mpi-dev,libopenmpi-dev,petsc-dev,slepc-dev,liblapack-dev,libparpack2-dev,libsundials-dev,uuid-dev" + clang_tidy_checks: '-*,performance-*,readability-*,bugprone-*,clang-analyzer-*,cppcoreguidelines-*,mpi-*,misc-*,-readability-magic-numbers,-cppcoreguidelines-avoid-magic-numbers' From cd3c7f5de189cc9f34d771356e7ca3c4419f808d Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 5 Feb 2021 13:04:38 +0000 Subject: [PATCH 145/428] Make Solver::createRunID private --- include/bout/solver.hxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index 9e5dfdc24f..b2f876bc8e 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -319,8 +319,6 @@ public: pargv = &v; } - /// Generate a random UUID (version 4) and broadcast it to all processors - std::string createRunID() const; /// A unique identifier for this run std::string getRunID() const { return run_id; } /// The run from which this was restarted. @@ -467,6 +465,8 @@ protected: auto getMonitors() const -> const std::list& { return monitors; } private: + /// Generate a random UUID (version 4) and broadcast it to all processors + std::string createRunID() const; /// Randomly generated run ID /// Initialise with 36 characters so the allocated array is the right size /// Use 'z' because it is not a valid hex character, so this is an invalid UUID From 82c2aaba960a349ff92254714f28462ece062143 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 5 Feb 2021 15:56:35 +0000 Subject: [PATCH 146/428] Make run_id getter throw if it hasn't been set yet --- include/bout/solver.hxx | 16 ++++++++++------ src/solver/solver.cxx | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index b2f876bc8e..6930ce1af5 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -319,10 +319,10 @@ public: pargv = &v; } - /// A unique identifier for this run - std::string getRunID() const { return run_id; } - /// The run from which this was restarted. - std::string getRunRestartFrom() const { return run_restart_from; } + /// A unique identifier for this run. Throws if the identifier hasn't been set yet. + std::string getRunID() const; + /// The run from which this was restarted. Throws if the identifier hasn't been set yet. + std::string getRunRestartFrom() const; protected: /// Number of command-line arguments @@ -467,10 +467,14 @@ protected: private: /// Generate a random UUID (version 4) and broadcast it to all processors std::string createRunID() const; + + /// Default value for `run_id`. Use 'z' because it is not a valid + /// hex character, so this is an invalid UUID + static constexpr auto default_run_id = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"; + /// Randomly generated run ID /// Initialise with 36 characters so the allocated array is the right size - /// Use 'z' because it is not a valid hex character, so this is an invalid UUID - std::string run_id = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"; + std::string run_id = default_run_id; /// The run from which this was restarted. std::string run_restart_from = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index a51b642274..618f85b837 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -558,6 +558,23 @@ std::string Solver::createRunID() const { return result; } +std::string Solver::getRunID() const { + AUTO_TRACE(); + if (run_id == default_run_id) { + throw BoutException("run_id not set!"); + } + return run_id; +} + +std::string Solver::getRunRestartFrom() const { + AUTO_TRACE(); + // Check against run_id, because this might not be a restarted run + if (run_id == default_run_id) { + throw BoutException("run_restart_from not set!"); + } + return run_restart_from; +} + /************************************************************************** * Initialisation **************************************************************************/ From 6b8f696bd724be5514560b4366926dee90aa0624 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 5 Feb 2021 16:10:19 +0000 Subject: [PATCH 147/428] Fix uuids::uuid::is_valid_uuid for C++14 --- include/bout/sys/uuid.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/include/bout/sys/uuid.h b/include/bout/sys/uuid.h index 9b8f9d4aba..eefedef269 100644 --- a/include/bout/sys/uuid.h +++ b/include/bout/sys/uuid.h @@ -405,10 +405,21 @@ class uuid { int hasBraces = 0; size_t index = 0; size_t size = 0; - if (std::is_same::value) +#if __cpp_if_constexpr >= 201606L + if constexpr (std::is_same::value) { size = strlen(str); - else + } else { size = wcslen(str); + } +#else + if (std::is_same::value) { + size = strlen(str); + } else { + // This will presumably fail, but you'll get an error message + // Try compiling with C++17 + size = strlen(str); + } +#endif if (str == nullptr || size == 0) return false; From 8c490aab18c63bfde5cd8ce1bec769a75ed1e28f Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 5 Feb 2021 16:10:51 +0000 Subject: [PATCH 148/428] Add unit tests for Solver::getRunID and getRunRestartFrom --- tests/unit/solver/test_solver.cxx | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/unit/solver/test_solver.cxx b/tests/unit/solver/test_solver.cxx index fba99fb75e..02e415f2cd 100644 --- a/tests/unit/solver/test_solver.cxx +++ b/tests/unit/solver/test_solver.cxx @@ -7,6 +7,7 @@ #include "test_fakesolver.hxx" #include "bout/solver.hxx" #include "bout/solverfactory.hxx" +#include "bout/sys/uuid.h" #include #include @@ -847,6 +848,34 @@ TEST_F(SolverTest, BasicSolve) { EXPECT_TRUE(solver.run_called); } +TEST_F(SolverTest, GetRunID) { + Options options; + FakeSolver solver{&options}; + + Options::root()["dump_on_restart"] = false; + + EXPECT_THROW(solver.getRunID(), BoutException); + solver.solve(); + + EXPECT_NO_THROW(solver.getRunID()); + EXPECT_TRUE(uuids::uuid::is_valid_uuid(solver.getRunID())); +} + +TEST_F(SolverTest, GetRunRestartFrom) { + Options options; + FakeSolver solver{&options}; + + Options::root()["dump_on_restart"] = false; + + EXPECT_THROW(solver.getRunRestartFrom(), BoutException); + solver.solve(); + + EXPECT_NO_THROW(solver.getRunRestartFrom()); + // It would be valid if this was a restart + // But hard to check that case without mocking DataFile + EXPECT_FALSE(uuids::uuid::is_valid_uuid(solver.getRunRestartFrom())); +} + TEST_F(SolverTest, SolveBadInit) { Options options; options["fail_init"] = -1; From 9a3206092012470205a11d27a8a75ddba8d4a2ab Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 8 Feb 2021 17:56:34 +0000 Subject: [PATCH 149/428] Fix check of run_restart_from being default value Co-authored-by: johnomotani --- src/solver/solver.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 618f85b837..50d1e14718 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -466,7 +466,7 @@ int Solver::solve(int NOUT, BoutReal TIMESTEP) { /// Run the solver output_info.write(_("Running simulation\n\n")); output_info.write("Run ID: {:s}\n", run_id); - if (run_restart_from != "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz") { + if (run_restart_from != default_run_id) { output_info.write("Restarting from ID: {:s}\n", run_restart_from); } From 9d66543637b9d84bb95025c874de9cdcfa14dbe1 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 9 Feb 2021 21:04:21 +0100 Subject: [PATCH 150/428] Fix backport of run_id BOUT_HAS_UUID_SYSTEM_GENERATOR is just defined/not-defined. Replace fmt-style formatting. --- include/bout/sys/uuid.h | 10 ++++------ src/solver/solver.cxx | 4 ++-- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/include/bout/sys/uuid.h b/include/bout/sys/uuid.h index eefedef269..688a7c1fad 100644 --- a/include/bout/sys/uuid.h +++ b/include/bout/sys/uuid.h @@ -24,8 +24,6 @@ #ifndef BOUT_UUID_H #define BOUT_UUID_H -#include "bout/build_config.hxx" - #include #include #include @@ -49,7 +47,7 @@ #define NOMINMAX #endif -#if BOUT_HAS_UUID_SYSTEM_GENERATOR +#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR #include #endif @@ -60,13 +58,13 @@ #elif defined(__linux__) || defined(__unix__) -#if BOUT_HAS_UUID_SYSTEM_GENERATOR +#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR #include #endif #elif defined(__APPLE__) -#if BOUT_HAS_UUID_SYSTEM_GENERATOR +#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR #include #endif @@ -603,7 +601,7 @@ static uuid uuid_namespace_x500{{0x6b, 0xa7, 0xb8, 0x14, 0x9d, 0xad, 0x11, 0xd1, // uuid generators // -------------------------------------------------------------------------------------------------------------------------- -#if BOUT_HAS_UUID_SYSTEM_GENERATOR +#ifdef BOUT_HAS_UUID_SYSTEM_GENERATOR class uuid_system_generator { public: using result_type = uuid; diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 50d1e14718..03c7b24a7d 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -465,9 +465,9 @@ int Solver::solve(int NOUT, BoutReal TIMESTEP) { /// Run the solver output_info.write(_("Running simulation\n\n")); - output_info.write("Run ID: {:s}\n", run_id); + output_info.write("Run ID: %s\n", run_id.c_str()); if (run_restart_from != default_run_id) { - output_info.write("Restarting from ID: {:s}\n", run_restart_from); + output_info.write("Restarting from ID: %s\n", run_restart_from.c_str()); } time_t start_time = time(nullptr); From b0b3b8d1b9ad16af5b4430adf38f95abfc09f8dd Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 9 Feb 2021 21:16:00 +0100 Subject: [PATCH 151/428] Add method to check if DataFile can write strings Slightly hacky implementation as it relies on checking the type of the DataFormat pointer using dynamic_cast, but DataFile/DataFormat will be replaced in v5, so implement this way for simplicity. --- include/datafile.hxx | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/datafile.hxx b/include/datafile.hxx index 579a192554..edbd005b1b 100644 --- a/include/datafile.hxx +++ b/include/datafile.hxx @@ -18,6 +18,7 @@ class Datafile; #include "bout/macro_for_each.hxx" #include "dataformat.hxx" +#include "../src/fileio/impls/hdf5/h5_format.hxx" #include "bout/format.hxx" #include @@ -99,6 +100,15 @@ class Datafile { void setAttribute(const std::string &varname, const std::string &attrname, int value); void setAttribute(const std::string &varname, const std::string &attrname, BoutReal value); + bool can_write_strings() { + // NetCDF DataFormat subclasses can read/write strings, but the HDF5 subclass is + // buggy. If the DataFormat is not an H5Format, it can write strings correctly. + auto p = dynamic_cast(file.get()); + + // If p is not null, then file points to an H5Format object. + return (p == nullptr); + } + private: Mesh* mesh; bool parallel{false}; // Use parallel formats? From 7ba85afdd442c05cfc4b70aea4bf6a6696cb835e Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 9 Feb 2021 22:05:30 +0100 Subject: [PATCH 152/428] Do not write run_id when using HDF5 HDF5 implementation is buggy for string variables, so do not write run_id or run_restart_from when using HDF5. --- include/datafile.hxx | 21 ++++++++++++++++++--- src/solver/solver.cxx | 23 +++++++++++++---------- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/include/datafile.hxx b/include/datafile.hxx index edbd005b1b..e2c023b956 100644 --- a/include/datafile.hxx +++ b/include/datafile.hxx @@ -34,6 +34,7 @@ class Vector3D; #include #include +#include #include /*! @@ -103,10 +104,24 @@ class Datafile { bool can_write_strings() { // NetCDF DataFormat subclasses can read/write strings, but the HDF5 subclass is // buggy. If the DataFormat is not an H5Format, it can write strings correctly. - auto p = dynamic_cast(file.get()); +#ifdef HDF5 + // Extract the file extension + int len = strlen(filename); + int ind = len-1; + while((ind != -1) && (filename[ind] != '.')) { + ind--; + } + const char *s = filename + ind+1; - // If p is not null, then file points to an H5Format object. - return (p == nullptr); + const char *hdf5_match[] = {"h5","hdf","hdf5"}; + for(int i=0; i Date: Tue, 9 Feb 2021 23:11:25 +0100 Subject: [PATCH 153/428] Always run HDF5 tests if BOUT++ is configured with HDF5 --- tests/integrated/test-io_hdf5/runtest | 1 - tests/integrated/test-restart-io_hdf5/runtest | 1 - 2 files changed, 2 deletions(-) diff --git a/tests/integrated/test-io_hdf5/runtest b/tests/integrated/test-io_hdf5/runtest index 1dc9ac1e4f..b4c12cf71f 100755 --- a/tests/integrated/test-io_hdf5/runtest +++ b/tests/integrated/test-io_hdf5/runtest @@ -4,7 +4,6 @@ # Run the test, compare results against the benchmark # -#requires: all_tests #Requires: hdf5 from boututils.run_wrapper import shell, shell_safe, launch_safe diff --git a/tests/integrated/test-restart-io_hdf5/runtest b/tests/integrated/test-restart-io_hdf5/runtest index cddb450691..0dd1792ac0 100755 --- a/tests/integrated/test-restart-io_hdf5/runtest +++ b/tests/integrated/test-restart-io_hdf5/runtest @@ -3,7 +3,6 @@ # Test file I/O by loading from restart files and writing to dump files # # requires: hdf5 -# requires: all_tests from boutdata import restart from boutdata.collect import collect From d85ca078205bf6eaa144fc8804e5f55ca4d376d7 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 10 Feb 2021 00:26:58 +0000 Subject: [PATCH 154/428] Only use save_repeat_run_id for dump files If the save_repeat argument to Solver::outputVars() is false, we are writing a restart file and should never save_repeat the run_id or run_restart_from. --- src/solver/solver.cxx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 985b816778..5a5f9523df 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -605,7 +605,8 @@ void Solver::outputVars(Datafile &outputfile, bool save_repeat) { // Add run information if (outputfile.can_write_strings()) { // HDF5 string I/O is buggy, so skip writing run_id to dump files - bool save_repeat_run_id = (*options)["save_repeat_run_id"] + bool save_repeat_run_id = (!save_repeat) ? false : + (*options)["save_repeat_run_id"] .doc("Write run_id and run_restart_from at every " "output timestep, to make it easier to " "concatenate output data sets in time") From 0f114f690480624942159f5ae16094f2f6de7a3b Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 10 Feb 2021 00:58:30 +0000 Subject: [PATCH 155/428] Open and close restart file before adding Solver variables Opening and closing the 'restart' Datafile means that it knows its filename, which is needed for the can_write_strings() method to correctly return 'false' for HDF5 files. --- src/physics/physicsmodel.cxx | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/physics/physicsmodel.cxx b/src/physics/physicsmodel.cxx index 67c056bc9d..3a62dfdc2f 100644 --- a/src/physics/physicsmodel.cxx +++ b/src/physics/physicsmodel.cxx @@ -96,10 +96,6 @@ void PhysicsModel::bout_solve(Vector3D &var, const char *name, int PhysicsModel::postInit(bool restarting) { TRACE("PhysicsModel::postInit"); - // Add the solver variables to the restart file - // Second argument specifies no time history - solver->outputVars(restart, false); - std::string restart_dir; ///< Directory for restart files std::string dump_ext, restart_ext; ///< Dump, Restart file extension @@ -116,6 +112,16 @@ int PhysicsModel::postInit(bool restarting) { options->get("restart_format", restart_ext, dump_ext); std::string filename = restart_dir + "/BOUT.restart."+restart_ext; + + // Add the solver variables to the restart file + // Second argument specifies no time history + // Open and close the restart file first so that it knows it's filename - needed so + // can_write_strings() works and we can skip writing run_id for HDF5 files. + if (!restart.openr("%s",filename.c_str())) + throw BoutException("Error: Could not open restart file %s\n", filename.c_str()); + restart.close() + solver->outputVars(restart, false); + if (restarting) { output.write("Loading restart file: %s\n", filename.c_str()); From 6aca861c19f39d1132e2bd032bd09f560edd2b8f Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 10 Feb 2021 09:04:57 +0000 Subject: [PATCH 156/428] Fix typo; missing semicolon --- src/physics/physicsmodel.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/physics/physicsmodel.cxx b/src/physics/physicsmodel.cxx index 3a62dfdc2f..ae780559a5 100644 --- a/src/physics/physicsmodel.cxx +++ b/src/physics/physicsmodel.cxx @@ -119,7 +119,7 @@ int PhysicsModel::postInit(bool restarting) { // can_write_strings() works and we can skip writing run_id for HDF5 files. if (!restart.openr("%s",filename.c_str())) throw BoutException("Error: Could not open restart file %s\n", filename.c_str()); - restart.close() + restart.close(); solver->outputVars(restart, false); if (restarting) { From fb3161f8dd46d64650c9ef8d3195bb682af3f4ab Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 10 Feb 2021 13:09:25 +0000 Subject: [PATCH 157/428] Fix segfault in check for HDF5 in Datafile::can_write_strings() An incorrect C-array length caused the segfaults. --- include/datafile.hxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/datafile.hxx b/include/datafile.hxx index e2c023b956..174edc4c2d 100644 --- a/include/datafile.hxx +++ b/include/datafile.hxx @@ -114,7 +114,7 @@ class Datafile { const char *s = filename + ind+1; const char *hdf5_match[] = {"h5","hdf","hdf5"}; - for(int i=0; i Date: Wed, 10 Feb 2021 13:12:47 +0000 Subject: [PATCH 158/428] Add braces - clang-tidy fix --- src/physics/physicsmodel.cxx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/physics/physicsmodel.cxx b/src/physics/physicsmodel.cxx index ae780559a5..814dba9f5c 100644 --- a/src/physics/physicsmodel.cxx +++ b/src/physics/physicsmodel.cxx @@ -117,8 +117,9 @@ int PhysicsModel::postInit(bool restarting) { // Second argument specifies no time history // Open and close the restart file first so that it knows it's filename - needed so // can_write_strings() works and we can skip writing run_id for HDF5 files. - if (!restart.openr("%s",filename.c_str())) + if (!restart.openr("%s",filename.c_str())) { throw BoutException("Error: Could not open restart file %s\n", filename.c_str()); + } restart.close(); solver->outputVars(restart, false); From d037a01ec3521acc5df323f3e2f7b2e258fba978 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 13 Feb 2021 23:16:43 +0000 Subject: [PATCH 159/428] Save provenance tracking info from grid file Save (if present) the grid_id, hypnotoad_version, hypnotoad_git_hash, hypnotoad_git_diff and hypnotoad_geqdsk_filename. --- src/mesh/impls/bout/boutmesh.cxx | 31 +++++++++++++++++++++++++++++++ src/mesh/impls/bout/boutmesh.hxx | 7 +++++++ 2 files changed, 38 insertions(+) diff --git a/src/mesh/impls/bout/boutmesh.cxx b/src/mesh/impls/bout/boutmesh.cxx index ad07313911..a15836b399 100644 --- a/src/mesh/impls/bout/boutmesh.cxx +++ b/src/mesh/impls/bout/boutmesh.cxx @@ -2664,4 +2664,35 @@ void BoutMesh::outputVars(Datafile &file) { file.add(ny_inner, "ny_inner", false); getCoordinates()->outputVars(file); + + // Try and save some provenance tracking info that new enough versions of + // hypnotoad provide in the grid file. + // Note with current Datafile/DataFormat implementation, must not write an + // empty string because it ends up as a null char* pointer, which causes a + // segfault. + if (file.can_write_strings()) { + if (this->get(grid_id, "grid_id") == 0 && grid_id != "") { + file.add(grid_id, "grid_id", false); + } + if (this->get(hypnotoad_version, "hypnotoad_version") == 0 + && hypnotoad_version != "") { + + file.add(hypnotoad_version, "hypnotoad_version", false); + } + if (this->get(hypnotoad_git_hash, "hypnotoad_git_hash") == 0 + && hypnotoad_git_hash != "") { + + file.add(hypnotoad_git_hash, "hypnotoad_git_hash", false); + } + if (this->get(hypnotoad_git_diff, "hypnotoad_git_diff") == 0 + && hypnotoad_git_diff != "") { + + file.add(hypnotoad_git_diff, "hypnotoad_git_diff", false); + } + if (this->get(hypnotoad_geqdsk_filename, "hypnotoad_geqdsk_filename") == 0 + && hypnotoad_geqdsk_filename != "") { + + file.add(hypnotoad_geqdsk_filename, "hypnotoad_geqdsk_filename", false); + } + } } diff --git a/src/mesh/impls/bout/boutmesh.hxx b/src/mesh/impls/bout/boutmesh.hxx index 0546f9b3fe..c16d5d3f0f 100644 --- a/src/mesh/impls/bout/boutmesh.hxx +++ b/src/mesh/impls/bout/boutmesh.hxx @@ -238,6 +238,13 @@ private: int MXG, MYG, MZG; // Boundary sizes + // Grid file provenance tracking info + std::string grid_id = ""; + std::string hypnotoad_version = ""; + std::string hypnotoad_git_hash = ""; + std::string hypnotoad_git_diff = ""; + std::string hypnotoad_geqdsk_filename = ""; + void default_connections(); void set_connection(int ypos1, int ypos2, int xge, int xlt, bool ts = false); void add_target(int ypos, int xge, int xlt); From c03b8542a72533cb3bf053139cc6978b5fb1266a Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 13 Feb 2021 23:58:49 +0000 Subject: [PATCH 160/428] Manual entry on provenance-tracking metadata --- manual/sphinx/user_docs/output_and_post.rst | 62 +++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/manual/sphinx/user_docs/output_and_post.rst b/manual/sphinx/user_docs/output_and_post.rst index 8fb1366441..dfb5409fe2 100644 --- a/manual/sphinx/user_docs/output_and_post.rst +++ b/manual/sphinx/user_docs/output_and_post.rst @@ -627,3 +627,65 @@ NetCDF files. u = bread(f, "U") # Finally read the variable +.. _sec-reproducibility: + +Reproducibility and provenance tracking +======================================= + +To help with reproducibility of simulations and provenance tracking of +data, BOUT++ saves some metadata into output files. + +.. note:: Most of this is only saved when using NetCDF for output; + HDF5 output has a known bug writing string variables and + will be removed in v5. + +.. table:: Provenance tracking metadata attributes + + +---------------------------------------------------------------------------+ + | File attributes | + +=============================+=============================================+ + | `BOUT_REVISION` | Git hash of the BOUT++ version that the | + | | code was compiled with. | + +-----------------------------+---------------------------------------------+ + +.. table:: Provenance tracking metadata variables + + +---------------------------------------------------------------------------+ + | Variables | + +=============================+=============================================+ + | `run_id` | Unique identifier (UUID) for a run | + +-----------------------------+---------------------------------------------+ + | `run_restart_from` | If the run was restarted, the `run_id` of | + | | the run it was restarted from. | + | | `"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"` if | + | | the run was not restarted, or the previous | + | | run had no `run_id` | + +-----------------------------+---------------------------------------------+ + +.. table:: Provenance tracking grid metadata variables + + +-----------------------------+---------------------------------------------+ + | Grid-related | These variables are created if a grid file | + | variables | was used for the run, and if the grid file | + | | was created with a new enough version of | + | | hypnotoad | + +=============================+=============================================+ + | `grid_id` | Unique identifier (UUID) for the grid file | + +-----------------------------+---------------------------------------------+ + | `hypnotoad_version` | Version number of hypnotoad used to create | + | | the grid file | + +-----------------------------+---------------------------------------------+ + | `hypnotoad_git_hash` | Git hash of the version of hypnotoad used | + | | to create the grid file (only present if | + | | hypnotoad is used from a git repo rather | + | | installed as a package). | + +-----------------------------+---------------------------------------------+ + | `hypnotoad_git_diff` | Git diff of the version of hypnotoad used | + | | to create the grid file (only present if | + | | hypnotoad is used from a git repo rather | + | | installed as a package and the code was | + | | changed since the latest commit) | + +-----------------------------+---------------------------------------------+ + | `hypnotoad_geqdsk_filename` | Name of the geqdsk file used to create the | + | | grid (if a geqdsk file was used) | + +-----------------------------+---------------------------------------------+ From 347bd29f21f502ce529ebbc24b6d5c5e3fb0d408 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sun, 14 Feb 2021 01:58:11 +0000 Subject: [PATCH 161/428] clang-tidy suggestion --- src/mesh/impls/bout/boutmesh.cxx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/mesh/impls/bout/boutmesh.cxx b/src/mesh/impls/bout/boutmesh.cxx index a15836b399..7f7d87cee1 100644 --- a/src/mesh/impls/bout/boutmesh.cxx +++ b/src/mesh/impls/bout/boutmesh.cxx @@ -2671,26 +2671,26 @@ void BoutMesh::outputVars(Datafile &file) { // empty string because it ends up as a null char* pointer, which causes a // segfault. if (file.can_write_strings()) { - if (this->get(grid_id, "grid_id") == 0 && grid_id != "") { + if (this->get(grid_id, "grid_id") == 0 and not grid_id.empty()) { file.add(grid_id, "grid_id", false); } if (this->get(hypnotoad_version, "hypnotoad_version") == 0 - && hypnotoad_version != "") { + and not hypnotoad_version.empty()) { file.add(hypnotoad_version, "hypnotoad_version", false); } if (this->get(hypnotoad_git_hash, "hypnotoad_git_hash") == 0 - && hypnotoad_git_hash != "") { + and not hypnotoad_git_hash.empty()) { file.add(hypnotoad_git_hash, "hypnotoad_git_hash", false); } if (this->get(hypnotoad_git_diff, "hypnotoad_git_diff") == 0 - && hypnotoad_git_diff != "") { + and not hypnotoad_git_diff.empty()) { file.add(hypnotoad_git_diff, "hypnotoad_git_diff", false); } if (this->get(hypnotoad_geqdsk_filename, "hypnotoad_geqdsk_filename") == 0 - && hypnotoad_geqdsk_filename != "") { + and not hypnotoad_geqdsk_filename.empty()) { file.add(hypnotoad_geqdsk_filename, "hypnotoad_geqdsk_filename", false); } From aed7ad0a173bd9e3ee9f99b18b90b12b221f1e7a Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 11 Mar 2021 23:57:58 +0100 Subject: [PATCH 162/428] Exclude y-guard cells in Delp2 Avoids use of potentially uninitialised values in corner cells, also slightly more optimal. --- src/mesh/coordinates.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mesh/coordinates.cxx b/src/mesh/coordinates.cxx index 3bd1fac7ec..e03beadf0b 100644 --- a/src/mesh/coordinates.cxx +++ b/src/mesh/coordinates.cxx @@ -1391,8 +1391,8 @@ Field3D Coordinates::Delp2(const Field3D& f, CELL_LOC outloc, bool useFFT) { auto ft = Matrix(localmesh->LocalNx, ncz / 2 + 1); auto delft = Matrix(localmesh->LocalNx, ncz / 2 + 1); - // Loop over all y indices - for (int jy = 0; jy < localmesh->LocalNy; jy++) { + // Loop over y indices + for (int jy = localmesh->ystart; jy <= localmesh->yend; jy++) { // Take forward FFT From 3519db871d949e2a2a1999cbd59cc11729f0029a Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 12 Mar 2021 17:44:14 +0100 Subject: [PATCH 163/428] Comment on why Delp2 should not calculate in y-boundary cells --- src/mesh/coordinates.cxx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/mesh/coordinates.cxx b/src/mesh/coordinates.cxx index e03beadf0b..c0089495d3 100644 --- a/src/mesh/coordinates.cxx +++ b/src/mesh/coordinates.cxx @@ -1392,6 +1392,8 @@ Field3D Coordinates::Delp2(const Field3D& f, CELL_LOC outloc, bool useFFT) { auto delft = Matrix(localmesh->LocalNx, ncz / 2 + 1); // Loop over y indices + // Note: should not include y-guard or y-boundary points here as that would + // use values from corner cells in dx, which may not be initialised. for (int jy = localmesh->ystart; jy <= localmesh->yend; jy++) { // Take forward FFT From 85d06d1b539f3aa9caa55e41fc37aea98605cce3 Mon Sep 17 00:00:00 2001 From: Ben Dudson Date: Fri, 19 Mar 2021 09:54:01 +0000 Subject: [PATCH 164/428] Backport of Backward Euler solver Useful for finding steady-state solutions; terrible for time-evolving. --- manual/sphinx/user_docs/time_integration.rst | 51 +++- src/solver/impls/snes/snes.cxx | 299 +++++++++++++------ src/solver/impls/snes/snes.hxx | 58 ++-- tests/integrated/test-solver/test_solver.cxx | 2 +- 4 files changed, 285 insertions(+), 125 deletions(-) diff --git a/manual/sphinx/user_docs/time_integration.rst b/manual/sphinx/user_docs/time_integration.rst index 29550bc7c0..5ed1247b44 100644 --- a/manual/sphinx/user_docs/time_integration.rst +++ b/manual/sphinx/user_docs/time_integration.rst @@ -1,3 +1,5 @@ +.. _sec-time-integration: + Time integration ================ @@ -59,8 +61,8 @@ needed to make the solver available. +---------------+-----------------------------------------+--------------------+ | imexbdf2 | IMEX-BDF2 scheme | –with-petsc | +---------------+-----------------------------------------+--------------------+ - -| + | beuler / snes | Backward Euler with SNES solvers | --with-petsc | + +---------------+-----------------------------------------+--------------------+ Each solver can have its own settings which work in slightly different ways, but some common settings and which solvers they are used in are @@ -72,16 +74,18 @@ given in table :numref:`tab-solveropts`. +------------------+--------------------------------------------+-------------------------------------+ | Option | Description | Solvers used | +==================+============================================+=====================================+ - | atol | Absolute tolerance | rk4, pvode, cvode, ida, imexbdf2 | + | atol | Absolute tolerance | rk4, pvode, cvode, ida, imexbdf2, | + | | | beuler | +------------------+--------------------------------------------+-------------------------------------+ - | rtol | Relative tolerance | rk4, pvode, cvode, ida, imexbdf2 | + | rtol | Relative tolerance | rk4, pvode, cvode, ida, imexbdf2, | + | | | beuler | +------------------+--------------------------------------------+-------------------------------------+ | mxstep | Maximum internal steps | rk4, imexbdf2 | | | per output step | | +------------------+--------------------------------------------+-------------------------------------+ | max\_timestep | Maximum timestep | rk4, cvode | +------------------+--------------------------------------------+-------------------------------------+ - | timestep | Starting timestep | rk4, karniadakis, euler, imexbdf2 | + | timestep | Starting timestep | rk4, euler, imexbdf2, beuler | +------------------+--------------------------------------------+-------------------------------------+ | adaptive | Adapt timestep? (Y/N) | rk4, imexbdf2 | +------------------+--------------------------------------------+-------------------------------------+ @@ -93,12 +97,14 @@ given in table :numref:`tab-solveropts`. +------------------+--------------------------------------------+-------------------------------------+ | maxl | Maximum number of linear iterations | cvode, imexbdf2 | +------------------+--------------------------------------------+-------------------------------------+ + | max_nonlinear_it | Maximum number of nonlinear iterations | imexbdf2, beuler | + +------------------+--------------------------------------------+-------------------------------------+ | use\_jacobian | Use user-supplied Jacobian? (Y/N) | cvode | +------------------+--------------------------------------------+-------------------------------------+ | adams\_moulton | Use Adams-Moulton method | cvode | | | rather than BDF | | +------------------+--------------------------------------------+-------------------------------------+ - | diagnose | Collect and print additional diagnostics | cvode, imexbdf2 | + | diagnose | Collect and print additional diagnostics | cvode, imexbdf2, beuler | +------------------+--------------------------------------------+-------------------------------------+ | @@ -330,8 +336,39 @@ And the adaptive timestepping options: | adapt_period | 1 | Number of internal steps between tolerance checks | +---------------------+-----------+----------------------------------------------------+ +Backward Euler - SNES +--------------------- + +The `beuler` or `snes` solver type (either name can be used) is +intended mainly for solving steady-state problems, so integrates in +time using a stable but low accuracy method (Backward Euler). It uses +PETSc's SNES solvers to solve the nonlinear system at each timestep, +and adjusts the internal timestep to keep the number of SNES +iterations within a given range. + ++---------------------+-----------+----------------------------------------------------+ +| Option | Default |Description | ++=====================+===========+====================================================+ +| max_nonlinear_it | 50 | If exceeded, solve restarts with timestep / 2 | ++---------------------+-----------+----------------------------------------------------+ +| upper_its | 80% max | If exceeded, next timestep reduced by 10% | ++---------------------+-----------+----------------------------------------------------+ +| lower_its | 50% max | If under this, next timestep increased by 10% | ++---------------------+-----------+----------------------------------------------------+ + +The predictor is linear extrapolation from the last two timesteps. It seems to be +effective, but can be disabled by setting `predictor = false`. + +The `SNES type +`_ +can be set through PETSc command-line options, or in the BOUT++ +options as setting `snes_type`. Good choices for unpreconditioned +problems seem to be `anderson +`_ +(the default) and `qn +`_ +(quasinewton). - ODE integration --------------- diff --git a/src/solver/impls/snes/snes.cxx b/src/solver/impls/snes/snes.cxx index 21ca460326..26fdb82924 100644 --- a/src/solver/impls/snes/snes.cxx +++ b/src/solver/impls/snes/snes.cxx @@ -21,153 +21,262 @@ * This function assumes the context void pointer is a pointer * to an SNESSolver object. */ -static PetscErrorCode FormFunction(SNES UNUSED(snes), Vec x, Vec f, void *ctx) { +static PetscErrorCode FormFunction(SNES UNUSED(snes), Vec x, Vec f, void* ctx) { return static_cast(ctx)->snes_function(x, f); } int SNESSolver::init(int nout, BoutReal tstep) { TRACE("Initialising SNES solver"); - + /// Call the generic initialisation first - if (Solver::init(nout, tstep)) + if (Solver::init(nout, tstep) != 0) { return 1; - + } + + out_timestep = tstep; // Output timestep + nsteps = nout; // Save number of output steps + output << "\n\tSNES steady state solver\n"; - + // Calculate number of variables nlocal = getLocalN(); - + // Get total problem size int ntmp; - if(MPI_Allreduce(&nlocal, &ntmp, 1, MPI_INT, MPI_SUM, BoutComm::get())) { + if (MPI_Allreduce(&nlocal, &ntmp, 1, MPI_INT, MPI_SUM, BoutComm::get())) { throw BoutException("MPI_Allreduce failed!"); } neq = ntmp; output.write("\t3d fields = %d, 2d fields = %d neq=%d, local_N=%d\n", n3Dvars(), n2Dvars(), neq, nlocal); - - // Get options - OPTION(options, mxstep, 500); // Maximum number of steps between outputs - + + timestep = + (*options)["timestep"].doc("Initial backward Euler timestep").withDefault(1.0); + + diagnose = + (*options)["diagnose"].doc("Print additional diagnostics").withDefault(false); + + predictor = (*options)["predictor"].doc("Use linear predictor?").withDefault(true); + // Initialise PETSc components int ierr; - + // Vectors - ierr = VecCreate(BoutComm::get(), &snes_x);CHKERRQ(ierr); - ierr = VecSetSizes(snes_x, nlocal, PETSC_DECIDE);CHKERRQ(ierr); - ierr = VecSetFromOptions(snes_x);CHKERRQ(ierr); - - VecDuplicate(snes_x,&snes_f); - - // Set initial guess at the solution from variables - BoutReal *xdata; - ierr = VecGetArray(snes_x,&xdata);CHKERRQ(ierr); - save_vars(xdata); - ierr = VecRestoreArray(snes_x,&xdata);CHKERRQ(ierr); - + ierr = VecCreate(BoutComm::get(), &snes_x); + CHKERRQ(ierr); // NOLINT + ierr = VecSetSizes(snes_x, nlocal, PETSC_DECIDE); + CHKERRQ(ierr); // NOLINT + ierr = VecSetFromOptions(snes_x); + CHKERRQ(ierr); // NOLINT + + VecDuplicate(snes_x, &snes_f); + VecDuplicate(snes_x, &x0); + + if (predictor) { + // Storage for previous solution + VecDuplicate(snes_x, &x1); + } + // Nonlinear solver interface (SNES) - SNESCreate(BoutComm::get(),&snes); - + SNESCreate(BoutComm::get(), &snes); + // Set the callback function - SNESSetFunction(snes,snes_f,FormFunction,this); - + SNESSetFunction(snes, snes_f, FormFunction, this); + + std::string snes_type = (*options)["snes_type"].withDefault("anderson"); + SNESSetType(snes, snes_type.c_str()); + // Set up the Jacobian - //MatCreateSNESMF(snes,&Jmf); - //SNESSetJacobian(snes,Jmf,Jmf,SNESComputeJacobianDefault,this); - MatCreateAIJ(BoutComm::get(), - nlocal,nlocal, // Local sizes + MatCreateAIJ(BoutComm::get(), nlocal, nlocal, // Local sizes PETSC_DETERMINE, PETSC_DETERMINE, // Global sizes - 3, // Number of nonzero entries in diagonal portion of local submatrix + 3, // Number of nonzero entries in diagonal portion of local submatrix PETSC_NULL, - 0, // Number of nonzeros per row in off-diagonal portion of local submatrix - PETSC_NULL, - &Jmf); -#if PETSC_VERSION_GE(3,4,0) - SNESSetJacobian(snes,Jmf,Jmf,SNESComputeJacobianDefault,this); + 0, // Number of nonzeros per row in off-diagonal portion of local submatrix + PETSC_NULL, &Jmf); +#if PETSC_VERSION_GE(3, 4, 0) + SNESSetJacobian(snes, Jmf, Jmf, SNESComputeJacobianDefault, this); #else // Before 3.4 - SNESSetJacobian(snes,Jmf,Jmf,SNESDefaultComputeJacobian,this); + SNESSetJacobian(snes, Jmf, Jmf, SNESDefaultComputeJacobian, this); #endif - MatSetOption(Jmf,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE); + MatSetOption(Jmf, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE); // Set tolerances - BoutReal atol, rtol; // Tolerances for SNES solver - options->get("atol", atol, 1e-16); - options->get("rtol", rtol, 1e-10); - SNESSetTolerances(snes,atol,rtol,PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT); - + BoutReal atol = + (*options)["atol"].doc("Absolute tolerance in SNES solve").withDefault(1e-16); + BoutReal rtol = + (*options)["rtol"].doc("Relative tolerance in SNES solve").withDefault(1e-10); + + int maxits = (*options)["max_nonlinear_it"] + .doc("Maximum number of iterations per SNES solve") + .withDefault(50); + + upper_its = (*options)["upper_its"] + .doc("Iterations above which the next timestep is reduced") + .withDefault(static_cast(maxits * 0.8)); + + lower_its = (*options)["lower_its"] + .doc("Iterations below which the next timestep is increased") + .withDefault(static_cast(maxits * 0.5)); + + SNESSetTolerances(snes, atol, rtol, PETSC_DEFAULT, maxits, PETSC_DEFAULT); + // Get runtime options SNESSetFromOptions(snes); - + return 0; } int SNESSolver::run() { TRACE("SNESSolver::run()"); - - /* - output << "Computing Jacobian\n"; - MatStructure flag; - implicit_curtime = curtime; - implicit_gamma = gamma; - SNESComputeFunction(snes, snes_x, snes_f); - SNESComputeJacobian(snes,snes_x,&Jmf,&Jmf,&flag); - MatView(Jmf, PETSC_VIEWER_STDOUT_SELF); - */ - SNESSolve(snes, nullptr, snes_x); - - // Find out if converged - SNESConvergedReason reason; - SNESGetConvergedReason(snes,&reason); - if(reason < 0) { - // Diverged - throw BoutException("SNES failed to converge. Reason: %d\n", reason); + // Set initial guess at the solution from variables + { + BoutReal* xdata = nullptr; + int ierr = VecGetArray(snes_x, &xdata); + CHKERRQ(ierr); // NOLINT + save_vars(xdata); + ierr = VecRestoreArray(snes_x, &xdata); + CHKERRQ(ierr); // NOLINT } - - int its; - SNESGetIterationNumber(snes,&its); - - //output << "Number of SNES iterations: " << its << endl; - - // Put the result into variables - const BoutReal *xdata; - int ierr; - ierr = VecGetArrayRead(snes_x,&xdata);CHKERRQ(ierr); - load_vars(const_cast(xdata)); - ierr = VecRestoreArrayRead(snes_x,&xdata);CHKERRQ(ierr); - run_rhs(0.0); // Run RHS to calculate auxilliary variables - - /// Call the monitor function - - if(call_monitors(0.0, 1, 1)) { - // User signalled to quit + for (int s = 0; s < nsteps; s++) { + BoutReal target = simtime + out_timestep; + + bool looping = true; + do { + // Copy the state (snes_x) into initial values (x0) + VecCopy(snes_x, x0); + + // Set the timestep + dt = timestep; + looping = true; + if (simtime + dt >= target) { + // Ensure that the timestep goes to the next output time and then stops + looping = false; + dt = target - simtime; + } + + if (predictor and (time1 > 0.0)) { + // Use (time1, x1) and (simtime, x0) to make prediction + // snes_x <- x0 + (dt / (simtime - time1)) * (x0 - x1) + // snes_x <- -β * x1 + (1 + β) * snes_x + BoutReal beta = dt / (simtime - time1); + VecAXPBY(snes_x, -beta, (1. + beta), x1); + } + + // Run the solver + SNESSolve(snes, nullptr, snes_x); + + // Find out if converged + SNESConvergedReason reason; + SNESGetConvergedReason(snes, &reason); + if (reason < 0) { + // Diverged + + // Try a smaller timestep + timestep /= 2.0; + // Restore state + VecCopy(x0, snes_x); + + // Check lock state + PetscInt lock_state; + VecLockGet(snes_x, &lock_state); + if (lock_state > 0) { + // Locked for read + output.write("WARNING: snes_x locked for reading\n"); + } else if (lock_state < 0) { + // Locked for write + output.write("WARNING: snes_x locked for writing\n"); + } + continue; // Try again + } + + if (predictor) { + // Save previous values: x1 <- x0 + VecCopy(x0, x1); + time1 = simtime; + } + + simtime += dt; + int its; + SNESGetIterationNumber(snes, &its); + + if (diagnose) { + output.write("SNES time: {}, timestep: {}, iterations: {}\n", simtime, timestep, + its); + } + + if (looping) { + if (its <= lower_its) { + // Increase timestep slightly + timestep *= 1.1; + } else if (its >= upper_its) { + // Reduce timestep slightly + timestep *= 0.9; + } + } + } while (looping); + + // Put the result into variables + { + const BoutReal* xdata = nullptr; + int ierr = VecGetArrayRead(snes_x, &xdata); + CHKERRQ(ierr); // NOLINT + load_vars(const_cast(xdata)); + ierr = VecRestoreArrayRead(snes_x, &xdata); + CHKERRQ(ierr); // NOLINT + } + run_rhs(simtime); // Run RHS to calculate auxilliary variables + + /// Call the monitor function + + if (call_monitors(simtime, s, nsteps) != 0) { + break; // User signalled to quit + } } - + return 0; } // f = rhs PetscErrorCode SNESSolver::snes_function(Vec x, Vec f) { - const BoutReal *xdata; - BoutReal *fdata; - int ierr; - // Get data from PETSc into BOUT++ fields - ierr = VecGetArrayRead(x,&xdata);CHKERRQ(ierr); + const BoutReal* xdata = nullptr; + int ierr = VecGetArrayRead(x, &xdata); + CHKERRQ(ierr); // NOLINT load_vars(const_cast(xdata)); - ierr = VecRestoreArrayRead(x,&xdata);CHKERRQ(ierr); + ierr = VecRestoreArrayRead(x, &xdata); + CHKERRQ(ierr); // NOLINT + + try { + // Call RHS function + run_rhs(simtime + dt); + } catch (BoutException& e) { + // Simulation might fail, e.g. negative densities + // if timestep too large + output.write("WARNING: BoutException thrown: {}\n", e.what()); + + // Tell SNES that the input was out of domain + SNESSetFunctionDomainError(snes); + // Note: Returning non-zero error here leaves vectors in locked state + return 0; + } - // Call RHS function - run_rhs(0.0); - // Copy derivatives back - ierr = VecGetArray(f,&fdata);CHKERRQ(ierr); + BoutReal* fdata = nullptr; + ierr = VecGetArray(f, &fdata); + CHKERRQ(ierr); // NOLINT save_derivs(fdata); - ierr = VecRestoreArray(f,&fdata);CHKERRQ(ierr); - + ierr = VecRestoreArray(f, &fdata); + CHKERRQ(ierr); // NOLINT + + // Backward Euler + // Set fdata = xdata - x0 - Δt*fdata + VecAYPX(f, -dt, x); // f <- x - Δt*f + VecAXPY(f, -1.0, x0); // f <- f - x0 + return 0; } diff --git a/src/solver/impls/snes/snes.hxx b/src/solver/impls/snes/snes.hxx index 83e5efc596..0b10a940f1 100644 --- a/src/solver/impls/snes/snes.hxx +++ b/src/solver/impls/snes/snes.hxx @@ -1,13 +1,13 @@ /************************************************************************** - * + * * Finds the steady-state solution of a set of equations * using PETSc for the SNES interface - * + * ************************************************************************** - * Copyright 2015 B.D.Dudson + * Copyright 2015, 2021 B.D.Dudson * * Contact: Ben Dudson, bd512@york.ac.uk - * + * * This file is part of BOUT++. * * BOUT++ is free software: you can redistribute it and/or modify @@ -45,32 +45,46 @@ class SNESSolver; #include namespace { RegisterSolver registersolversnes("snes"); -} +RegisterSolver registersolverbeuler("beuler"); +} // namespace /// Uses PETSc's SNES interface to find a steady state solution to a -/// nonlinear ODE +/// nonlinear ODE by integrating in time with Backward Euler class SNESSolver : public Solver { - public: - SNESSolver(Options *opt = nullptr) : Solver(opt) {} +public: + SNESSolver(Options* opt = nullptr) : Solver(opt) {} ~SNESSolver() {} - + int init(int nout, BoutReal tstep) override; - + int run() override; - + PetscErrorCode snes_function(Vec x, Vec f); ///< Nonlinear function - private: - int mxstep; ///< Maximum number of internal steps between outputs - +private: + BoutReal timestep; ///< Internal timestep + BoutReal dt; ///< Current timestep used in snes_function + + int lower_its, upper_its; ///< Limits on iterations for timestep adjustment + + BoutReal out_timestep; ///< Output timestep + int nsteps; ///< Number of steps to take + + bool diagnose; ///< Output additional diagnostics + int nlocal; ///< Number of variables on local processor - int neq; ///< Number of variables in total - - PetscLib lib; ///< Handles initialising, finalising PETSc - Vec snes_f; ///< Used by SNES to store function - Vec snes_x; ///< Result of SNES - SNES snes; ///< SNES context - Mat Jmf; ///< Matrix-free Jacobian - + int neq; ///< Number of variables in total + + PetscLib lib; ///< Handles initialising, finalising PETSc + Vec snes_f; ///< Used by SNES to store function + Vec snes_x; ///< Result of SNES + Vec x0; ///< Solution at start of current timestep + + bool predictor; ///< Use linear predictor? + Vec x1; ///< Previous solution + BoutReal time1{-1.0}; ///< Time of previous solution + + SNES snes; ///< SNES context + Mat Jmf; ///< Matrix-free Jacobian }; #endif // __SNES_SOLVER_H__ diff --git a/tests/integrated/test-solver/test_solver.cxx b/tests/integrated/test-solver/test_solver.cxx index f0c0c2612e..423a032d10 100644 --- a/tests/integrated/test-solver/test_solver.cxx +++ b/tests/integrated/test-solver/test_solver.cxx @@ -38,7 +38,7 @@ int main(int argc, char** argv) { // Currently hardcode solvers we don't want to test // Should be able to check which solvers aren't suitable - std::vector eigen_solvers = {"power", "slepc", "snes"}; + std::vector eigen_solvers = {"power", "slepc", "snes", "beuler"}; for (auto& eigen_solver : eigen_solvers) { if (SolverFactory::getInstance()->remove(eigen_solver)) { From 054a09967c6707cb56d07514286b719dcc6331fe Mon Sep 17 00:00:00 2001 From: Ben Dudson Date: Fri, 19 Mar 2021 11:39:52 +0000 Subject: [PATCH 165/428] Add test for Backward Euler method --- tests/integrated/test-beuler/CMakeLists.txt | 1 + tests/integrated/test-beuler/README.md | 15 +++ tests/integrated/test-beuler/makefile | 5 + tests/integrated/test-beuler/runtest | 22 +++++ tests/integrated/test-beuler/test_beuler.cxx | 98 ++++++++++++++++++++ 5 files changed, 141 insertions(+) create mode 100644 tests/integrated/test-beuler/CMakeLists.txt create mode 100644 tests/integrated/test-beuler/README.md create mode 100644 tests/integrated/test-beuler/makefile create mode 100755 tests/integrated/test-beuler/runtest create mode 100644 tests/integrated/test-beuler/test_beuler.cxx diff --git a/tests/integrated/test-beuler/CMakeLists.txt b/tests/integrated/test-beuler/CMakeLists.txt new file mode 100644 index 0000000000..05a9aadcc1 --- /dev/null +++ b/tests/integrated/test-beuler/CMakeLists.txt @@ -0,0 +1 @@ +bout_add_integrated_test(test_beuler SOURCES test_beuler.cxx) diff --git a/tests/integrated/test-beuler/README.md b/tests/integrated/test-beuler/README.md new file mode 100644 index 0000000000..b13be6542b --- /dev/null +++ b/tests/integrated/test-beuler/README.md @@ -0,0 +1,15 @@ +test-beuler +=========== + +Integrate a stiff system: + + ddt(f) = 998 * f + 1998 * (g - 1.0); + ddt(g) = -999 * f - 1999 * (g - 1.0); + +starting with f=1, g=0. The solution has an exp(-t) term, and +stiff exp(-1000t) term which can be challenging to integrate. +The solution should converge to f=0, g=1. + +This is an example of a problem where many time integration +solvers will fail (including CVODE with standard settings), +but are quite easily solved with Backward Euler. diff --git a/tests/integrated/test-beuler/makefile b/tests/integrated/test-beuler/makefile new file mode 100644 index 0000000000..4eb9387e29 --- /dev/null +++ b/tests/integrated/test-beuler/makefile @@ -0,0 +1,5 @@ +BOUT_TOP = ../../.. + +SOURCEC = test_beuler.cxx + +include $(BOUT_TOP)/make.config diff --git a/tests/integrated/test-beuler/runtest b/tests/integrated/test-beuler/runtest new file mode 100755 index 0000000000..990877a6f3 --- /dev/null +++ b/tests/integrated/test-beuler/runtest @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + +from boututils.run_wrapper import shell_safe, launch_safe + +from sys import exit + +nthreads = 1 +nproc = 1 + + +print("Making solver test") +shell_safe("make > make.log") + +print("Running solver test") +status, out = launch_safe("./test_beuler", nproc=nproc, mthread=nthreads, pipe=True) +with open("run.log", "w") as f: + f.write(out) + +if status: + print(out) + +exit(status) diff --git a/tests/integrated/test-beuler/test_beuler.cxx b/tests/integrated/test-beuler/test_beuler.cxx new file mode 100644 index 0000000000..e7e9f31834 --- /dev/null +++ b/tests/integrated/test-beuler/test_beuler.cxx @@ -0,0 +1,98 @@ +#include "bout/constants.hxx" +#include "bout/petsclib.hxx" +#include "bout/physicsmodel.hxx" +#include "bout/slepclib.hxx" +#include "bout/solverfactory.hxx" + +#include +#include +#include +#include + +// A simple phyics model with a stiff decay towards a steady state solution +// +class TestSolver : public PhysicsModel { +public: + Field3D f, g; + + int init(bool UNUSED(restarting)) override { + solver->add(f, "f"); + solver->add(g, "g"); + + f = 1.0; + g = 0.0; + + return 0; + } + + int rhs(BoutReal UNUSED(time)) override { + // This should have a steady-state solution f=0, g=1 + ddt(f) = 998 * f + 1998 * (g - 1.0); + ddt(g) = -999 * f - 1999 * (g - 1.0); + + return 0; + } + + bool check_solution(BoutReal atol) { + // Return true if correct solution + return (std::abs(f(1, 1, 0)) < atol) and (std::abs(g(1, 1, 0) - 1) < atol); + } +}; + +int main(int argc, char** argv) { + + // Absolute tolerance for difference between the actual value and the + // expected value + constexpr BoutReal tolerance = 1.e-5; + + // Our own output to stdout, as main library will only be writing to log files + Output output_test; + + auto& root = Options::root(); + + root["mesh"]["MXG"] = 1; + root["mesh"]["MYG"] = 1; + root["mesh"]["nx"] = 3; + root["mesh"]["ny"] = 1; + root["mesh"]["nz"] = 1; + + root["output"]["enabled"] = false; + root["restart"]["enabled"] = false; + + PetscLib::setArgs(argc, argv); + Solver::setArgs(argc, argv); + BoutComm::setArgs(argc, argv); + + bout::globals::mesh = Mesh::create(); + bout::globals::mesh->load(); + + bout::globals::dump = + bout::experimental::setupDumpFile(Options::root(), *bout::globals::mesh, "."); + + // Global options + root["NOUT"] = 20; + root["TIMESTEP"] = 1; + + // Get specific options section for this solver. Can't just use default + // "solver" section, as we run into problems when solvers use the same + // name for an option with inconsistent defaults + auto options = Options::getRoot()->getSection("beuler"); + auto solver = std::unique_ptr{Solver::create("beuler", options)}; + + TestSolver model{}; + solver->setModel(&model); + + BoutMonitor bout_monitor{}; + solver->addMonitor(&bout_monitor, Solver::BACK); + + solver->solve(); + + BoutFinalise(false); + + if (model.check_solution(tolerance)) { + output_test << " PASSED\n"; + return 0; + } + output_test << " FAILED\n"; + return 1; +} From 081068e0841070a8bf249645db6363fe2f14b7e8 Mon Sep 17 00:00:00 2001 From: Ben Dudson Date: Fri, 19 Mar 2021 11:51:26 +0000 Subject: [PATCH 166/428] beuler solver needs petsc Only run test-beuler if petsc is available --- tests/integrated/test-beuler/runtest | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integrated/test-beuler/runtest b/tests/integrated/test-beuler/runtest index 990877a6f3..00fb93708c 100755 --- a/tests/integrated/test-beuler/runtest +++ b/tests/integrated/test-beuler/runtest @@ -1,5 +1,7 @@ #!/usr/bin/env python3 +# requires: petsc + from boututils.run_wrapper import shell_safe, launch_safe from sys import exit @@ -7,7 +9,6 @@ from sys import exit nthreads = 1 nproc = 1 - print("Making solver test") shell_safe("make > make.log") From 4ebff577aa4a94b984e3dbd1b129f522d24fba5e Mon Sep 17 00:00:00 2001 From: Ben Dudson Date: Mon, 12 Apr 2021 16:40:19 +0100 Subject: [PATCH 167/428] Fix formatting strings for pre-fmt Outputs need to use %e,%d,%s etc. rather than {} --- src/solver/impls/snes/snes.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/solver/impls/snes/snes.cxx b/src/solver/impls/snes/snes.cxx index 26fdb82924..8cf81996ec 100644 --- a/src/solver/impls/snes/snes.cxx +++ b/src/solver/impls/snes/snes.cxx @@ -204,7 +204,7 @@ int SNESSolver::run() { SNESGetIterationNumber(snes, &its); if (diagnose) { - output.write("SNES time: {}, timestep: {}, iterations: {}\n", simtime, timestep, + output.write("SNES time: %e, timestep: %e, iterations: %d\n", simtime, timestep, its); } @@ -256,7 +256,7 @@ PetscErrorCode SNESSolver::snes_function(Vec x, Vec f) { } catch (BoutException& e) { // Simulation might fail, e.g. negative densities // if timestep too large - output.write("WARNING: BoutException thrown: {}\n", e.what()); + output.write("WARNING: BoutException thrown: %s\n", e.what()); // Tell SNES that the input was out of domain SNESSetFunctionDomainError(snes); From 2e06caa180217d2186e6febd7a02c0e9171c1543 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 15 Apr 2021 13:41:23 +0100 Subject: [PATCH 168/428] Add Solver methods for user Jacobian Always store the Jacobian if set, but defer to PhysicsModel Jacobian if that's set --- include/bout/solver.hxx | 9 +++++++- src/solver/solver.cxx | 20 +++++++++++++++++ tests/unit/solver/test_fakesolver.hxx | 2 ++ tests/unit/solver/test_solver.cxx | 31 +++++++++++++++++++++++++++ 4 files changed, 61 insertions(+), 1 deletion(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index 6930ce1af5..4f64b43c4e 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -193,7 +193,7 @@ public: /// Specify a preconditioner (optional) void setPrecon(PhysicsPrecon f) { prefunc = f; } /// Specify a Jacobian (optional) - virtual void setJacobian(Jacobian UNUSED(j)) {} + virtual void setJacobian(Jacobian jacobian) { user_jacobian = jacobian; } /// Split operator solves virtual void setSplitOperator(rhsfunc fC, rhsfunc fD); @@ -448,6 +448,11 @@ protected: bool have_user_precon(); int run_precon(BoutReal t, BoutReal gamma, BoutReal delta); + /// Do we have a user Jacobian? + bool hasUserJacobian(); + /// Run the user Jacobian + int runJacobian(BoutReal time); + // Loading data from BOUT++ to/from solver void load_vars(BoutReal* udata); void load_derivs(BoutReal* udata); @@ -495,6 +500,8 @@ private: rhsfunc phys_run{nullptr}; /// The user's preconditioner function PhysicsPrecon prefunc{nullptr}; + /// The user's Jacobian function + Jacobian user_jacobian{nullptr}; /// Is the physics model using separate convective (explicit) and /// diffusive (implicit) RHS functions? bool split_operator{false}; diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 5a5f9523df..d8663231ff 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -1416,6 +1416,26 @@ int Solver::run_precon(BoutReal t, BoutReal gamma, BoutReal delta) { return (*prefunc)(t, gamma, delta); } +bool Solver::hasUserJacobian() { + if (model) { + return model->hasJacobian(); + } + + return user_jacobian != nullptr; +} + +int Solver::runJacobian(BoutReal time) { + if (not hasUserJacobian()) { + return 1; + } + + if (model) { + return model->runJacobian(time); + } + + return (*user_jacobian)(time); +} + // Add source terms to time derivatives void Solver::add_mms_sources(BoutReal t) { if(!mms) diff --git a/tests/unit/solver/test_fakesolver.hxx b/tests/unit/solver/test_fakesolver.hxx index e175f3a0f7..8366e31703 100644 --- a/tests/unit/solver/test_fakesolver.hxx +++ b/tests/unit/solver/test_fakesolver.hxx @@ -78,6 +78,8 @@ public: auto callTimestepMonitorsShim(BoutReal simtime, BoutReal lastdt) -> int { return call_timestep_monitors(simtime, lastdt); } + using Solver::hasUserJacobian; + using Solver::runJacobian; }; #endif // FAKESOLVER_H diff --git a/tests/unit/solver/test_solver.cxx b/tests/unit/solver/test_solver.cxx index 02e415f2cd..5aa099ec8e 100644 --- a/tests/unit/solver/test_solver.cxx +++ b/tests/unit/solver/test_solver.cxx @@ -610,6 +610,37 @@ TEST_F(SolverTest, RunPreconditioner) { EXPECT_EQ(solver.runPreconShim(time, gamma, delta), expected); } +TEST_F(SolverTest, HasJacobian) { + Jacobian jacobian = [](BoutReal time) -> int { + return static_cast(time); + }; + + Options options; + FakeSolver solver{&options}; + + EXPECT_FALSE(solver.hasUserJacobian()); + + solver.setJacobian(jacobian); + + EXPECT_TRUE(solver.hasUserJacobian()); +} + +TEST_F(SolverTest, RunJacobian) { + Jacobian jacobian = [](BoutReal time) -> int { + return static_cast(time); + }; + + Options options; + FakeSolver solver{&options}; + + solver.setJacobian(jacobian); + + constexpr auto time = 4.0; + constexpr auto expected = 4; + + EXPECT_EQ(solver.runJacobian(time), expected); +} + TEST_F(SolverTest, AddMonitor) { Options options; FakeSolver solver{&options}; From e1f0efd10d5535c8c33c785b42e246f50305fde9 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 15 Apr 2021 13:49:24 +0100 Subject: [PATCH 169/428] Fix some time Solvers not using user preconditioner/Jacobian If preconditioner or Jacobian was set using `PhysicsModel::setPrecon/setJacobian` then CVODE, ARKODE and PETSc `Solver`s would not use them --- src/solver/impls/arkode/arkode.cxx | 6 +++--- src/solver/impls/arkode/arkode.hxx | 3 --- src/solver/impls/cvode/cvode.cxx | 6 +++--- src/solver/impls/cvode/cvode.hxx | 3 --- src/solver/impls/petsc/petsc.cxx | 15 ++++++--------- src/solver/impls/petsc/petsc.hxx | 7 ------- 6 files changed, 12 insertions(+), 28 deletions(-) diff --git a/src/solver/impls/arkode/arkode.cxx b/src/solver/impls/arkode/arkode.cxx index 4cf0f885d8..36211d25cb 100644 --- a/src/solver/impls/arkode/arkode.cxx +++ b/src/solver/impls/arkode/arkode.cxx @@ -473,7 +473,7 @@ int ArkodeSolver::init(int nout, BoutReal tstep) { /// Set Jacobian-vector multiplication function const auto use_jacobian = (*options)["use_jacobian"].withDefault(false); - if (use_jacobian && jacfunc) { + if (use_jacobian and hasUserJacobian()) { output.write("\tUsing user-supplied Jacobian function\n"); if (ARKStepSetJacTimes(arkode_mem, nullptr, arkode_jac) != ARK_SUCCESS) @@ -690,7 +690,7 @@ void ArkodeSolver::pre(BoutReal t, BoutReal gamma, BoutReal delta, BoutReal* uda void ArkodeSolver::jac(BoutReal t, BoutReal* ydata, BoutReal* vdata, BoutReal* Jvdata) { TRACE("Running Jacobian: ArkodeSolver::jac(%e)", t); - if (jacfunc == nullptr) + if (not hasUserJacobian()) throw BoutException("No jacobian function supplied!\n"); // Load state from ydate @@ -700,7 +700,7 @@ void ArkodeSolver::jac(BoutReal t, BoutReal* ydata, BoutReal* vdata, BoutReal* J load_derivs(vdata); // Call function - (*jacfunc)(t); + runJacobian(t); // Save Jv from vars save_derivs(Jvdata); diff --git a/src/solver/impls/arkode/arkode.hxx b/src/solver/impls/arkode/arkode.hxx index 9fa6a2f741..617f23b391 100644 --- a/src/solver/impls/arkode/arkode.hxx +++ b/src/solver/impls/arkode/arkode.hxx @@ -60,8 +60,6 @@ public: ArkodeSolver(Options* opts = nullptr); ~ArkodeSolver(); - void setJacobian(Jacobian j) override { jacfunc = j; } - BoutReal getCurrentTimestep() override { return hcur; } int init(int nout, BoutReal tstep) override; @@ -82,7 +80,6 @@ private: BoutReal TIMESTEP; // Time between outputs BoutReal hcur; // Current internal timestep - Jacobian jacfunc{nullptr}; // Jacobian - vector function bool diagnose{false}; // Output additional diagnostics N_Vector uvec{nullptr}; // Values diff --git a/src/solver/impls/cvode/cvode.cxx b/src/solver/impls/cvode/cvode.cxx index e5914f8eeb..6a3c427e25 100644 --- a/src/solver/impls/cvode/cvode.cxx +++ b/src/solver/impls/cvode/cvode.cxx @@ -356,7 +356,7 @@ int CvodeSolver::init(int nout, BoutReal tstep) { /// Set Jacobian-vector multiplication function const auto use_jacobian = (*options)["use_jacobian"].withDefault(false); - if ((use_jacobian) && (jacfunc != nullptr)) { + if (use_jacobian and hasUserJacobian()) { output_info.write("\tUsing user-supplied Jacobian function\n"); if (CVSpilsSetJacTimes(cvode_mem, nullptr, cvode_jac) != CV_SUCCESS) @@ -573,7 +573,7 @@ void CvodeSolver::pre(BoutReal t, BoutReal gamma, BoutReal delta, BoutReal* udat void CvodeSolver::jac(BoutReal t, BoutReal* ydata, BoutReal* vdata, BoutReal* Jvdata) { TRACE("Running Jacobian: CvodeSolver::jac(%e)", t); - if (jacfunc == nullptr) + if (not hasUserJacobian()) throw BoutException("No jacobian function supplied!\n"); // Load state from ydate @@ -583,7 +583,7 @@ void CvodeSolver::jac(BoutReal t, BoutReal* ydata, BoutReal* vdata, BoutReal* Jv load_derivs(vdata); // Call function - (*jacfunc)(t); + runJacobian(t); // Save Jv from vars save_derivs(Jvdata); diff --git a/src/solver/impls/cvode/cvode.hxx b/src/solver/impls/cvode/cvode.hxx index a628c6c041..58de884c29 100644 --- a/src/solver/impls/cvode/cvode.hxx +++ b/src/solver/impls/cvode/cvode.hxx @@ -59,8 +59,6 @@ public: CvodeSolver(Options* opts = nullptr); ~CvodeSolver(); - void setJacobian(Jacobian j) override { jacfunc = j; } - BoutReal getCurrentTimestep() override { return hcur; } int init(int nout, BoutReal tstep) override; @@ -81,7 +79,6 @@ private: BoutReal TIMESTEP; // Time between outputs BoutReal hcur; // Current internal timestep - Jacobian jacfunc{nullptr}; // Jacobian - vector function bool diagnose{false}; // Output additional diagnostics N_Vector uvec{nullptr}; // Values diff --git a/src/solver/impls/petsc/petsc.cxx b/src/solver/impls/petsc/petsc.cxx index 83265cedc9..7411d6f6af 100644 --- a/src/solver/impls/petsc/petsc.cxx +++ b/src/solver/impls/petsc/petsc.cxx @@ -66,9 +66,6 @@ PetscSolver::PetscSolver(Options *opts) : Solver(opts) { initialised = false; bout_snes_time = .0; - prefunc = nullptr; - jacfunc = nullptr; - output_flag = PETSC_FALSE; } @@ -291,12 +288,12 @@ int PetscSolver::init(int NOUT, BoutReal TIMESTEP) { // Matrix free Jacobian - if(use_jacobian && (jacfunc != nullptr)) { + if (use_jacobian and hasUserJacobian()) { // Use a user-supplied Jacobian function ierr = MatCreateShell(comm, local_N, local_N, neq, neq, this, &Jmf); CHKERRQ(ierr); ierr = MatShellSetOperation(Jmf, MATOP_MULT, (void (*)()) PhysicsJacobianApply); CHKERRQ(ierr); ierr = TSSetIJacobian(ts, Jmf, Jmf, solver_ijacobian, this); CHKERRQ(ierr); - }else { + } else { // Use finite difference approximation ierr = MatCreateSNESMF(snes,&Jmf);CHKERRQ(ierr); ierr = SNESSetJacobian(snes,Jmf,Jmf,MatMFFDComputeJacobian,this);CHKERRQ(ierr); @@ -308,7 +305,7 @@ int PetscSolver::init(int NOUT, BoutReal TIMESTEP) { ierr = KSPGetPC(ksp,&pc);CHKERRQ(ierr); - if(use_precon && (prefunc != nullptr)) { + if (use_precon and have_user_precon()) { #if PETSC_VERSION_GE(3,5,0) ierr = SNESGetNPC(snes,&psnes);CHKERRQ(ierr); @@ -340,7 +337,7 @@ int PetscSolver::init(int NOUT, BoutReal TIMESTEP) { // Use right preconditioner ierr = KSPSetPCSide(ksp, PC_RIGHT);CHKERRQ(ierr); - }else { + } else { // Default to no preconditioner ierr = PCSetType(pc,PCNONE);CHKERRQ(ierr); } @@ -580,7 +577,7 @@ PetscErrorCode PetscSolver::pre(PC UNUSED(pc), Vec x, Vec y) { VecRestoreArray(x, &data); // Call the preconditioner - (*prefunc)(ts_time, 1./shift, 0.0); + run_precon(ts_time, 1./shift, 0.0); // Save the solution from time derivatives VecGetArray(y, &data); @@ -616,7 +613,7 @@ PetscErrorCode PetscSolver::jac(Vec x, Vec y) { VecRestoreArray(x, &data); // Call the Jacobian function - (*jacfunc)(ts_time); + runJacobian(ts_time); // Save the solution from time derivatives VecGetArray(y, &data); diff --git a/src/solver/impls/petsc/petsc.hxx b/src/solver/impls/petsc/petsc.hxx index 30a977c9e6..c3a3080200 100644 --- a/src/solver/impls/petsc/petsc.hxx +++ b/src/solver/impls/petsc/petsc.hxx @@ -83,10 +83,6 @@ public: PetscSolver(Options *opts = nullptr); ~PetscSolver(); - // Can be called from physics initialisation to supply callbacks - void setPrecon(PhysicsPrecon f) { prefunc = f; } - void setJacobian(Jacobian j) override { jacfunc = j; } - int init(int NOUT, BoutReal TIMESTEP) override; int run() override; @@ -114,9 +110,6 @@ public: PetscLogEvent solver_event, loop_event, init_event; private: - PhysicsPrecon prefunc; ///< Preconditioner - Jacobian jacfunc; ///< Jacobian - vector function - BoutReal shift; ///< Shift (alpha) parameter from TS Vec state; BoutReal ts_time; ///< Internal PETSc timestepper time From d26bca65b2e0749d91a50dfa28f3cd78c41d08d2 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 15 Apr 2021 15:17:14 +0100 Subject: [PATCH 170/428] Rename has/run preconditioner/Jacobian solver methods --- include/bout/solver.hxx | 11 ++++++++--- src/solver/impls/arkode/arkode.cxx | 10 +++++----- src/solver/impls/cvode/cvode.cxx | 10 +++++----- src/solver/impls/ida/ida.cxx | 6 +++--- src/solver/impls/imex-bdf2/imex-bdf2.cxx | 10 +++++----- src/solver/impls/petsc/petsc.cxx | 6 +++--- src/solver/solver.cxx | 23 +++++++++++++---------- tests/unit/solver/test_fakesolver.hxx | 6 +++--- tests/unit/solver/test_solver.cxx | 4 ++-- 9 files changed, 47 insertions(+), 39 deletions(-) diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index 4f64b43c4e..aa9e0c21e3 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -445,11 +445,16 @@ protected: int call_timestep_monitors(BoutReal simtime, BoutReal lastdt); /// Do we have a user preconditioner? - bool have_user_precon(); - int run_precon(BoutReal t, BoutReal gamma, BoutReal delta); + bool hasPreconditioner(); + DEPRECATED(bool have_user_precon)() { return hasPreconditioner(); } + /// Run the user preconditioner + int runPreconditioner(BoutReal time, BoutReal gamma, BoutReal delta); + DEPRECATED(int run_precon)(BoutReal time, BoutReal gamma, BoutReal delta) { + return runPreconditioner(time, gamma, delta); + } /// Do we have a user Jacobian? - bool hasUserJacobian(); + bool hasJacobian(); /// Run the user Jacobian int runJacobian(BoutReal time); diff --git a/src/solver/impls/arkode/arkode.cxx b/src/solver/impls/arkode/arkode.cxx index 36211d25cb..6ceda3442f 100644 --- a/src/solver/impls/arkode/arkode.cxx +++ b/src/solver/impls/arkode/arkode.cxx @@ -422,7 +422,7 @@ int ArkodeSolver::init(int nout, BoutReal tstep) { throw BoutException("ARKSpgmr failed\n"); #endif - if (!have_user_precon()) { + if (!hasPreconditioner()) { output.write("\tUsing BBD preconditioner\n"); /// Get options @@ -473,7 +473,7 @@ int ArkodeSolver::init(int nout, BoutReal tstep) { /// Set Jacobian-vector multiplication function const auto use_jacobian = (*options)["use_jacobian"].withDefault(false); - if (use_jacobian and hasUserJacobian()) { + if (use_jacobian and hasJacobian()) { output.write("\tUsing user-supplied Jacobian function\n"); if (ARKStepSetJacTimes(arkode_mem, nullptr, arkode_jac) != ARK_SUCCESS) @@ -661,7 +661,7 @@ void ArkodeSolver::pre(BoutReal t, BoutReal gamma, BoutReal delta, BoutReal* uda const BoutReal tstart = MPI_Wtime(); - if (!have_user_precon()) { + if (!hasPreconditioner()) { // Identity (but should never happen) const int N = NV_LOCLENGTH_P(uvec); std::copy(rvec, rvec + N, zvec); @@ -674,7 +674,7 @@ void ArkodeSolver::pre(BoutReal t, BoutReal gamma, BoutReal delta, BoutReal* uda // Load vector to be inverted into F_vars load_derivs(rvec); - run_precon(t, gamma, delta); + runPreconditioner(t, gamma, delta); // Save the solution from F_vars save_derivs(zvec); @@ -690,7 +690,7 @@ void ArkodeSolver::pre(BoutReal t, BoutReal gamma, BoutReal delta, BoutReal* uda void ArkodeSolver::jac(BoutReal t, BoutReal* ydata, BoutReal* vdata, BoutReal* Jvdata) { TRACE("Running Jacobian: ArkodeSolver::jac(%e)", t); - if (not hasUserJacobian()) + if (not hasJacobian()) throw BoutException("No jacobian function supplied!\n"); // Load state from ydate diff --git a/src/solver/impls/cvode/cvode.cxx b/src/solver/impls/cvode/cvode.cxx index 6a3c427e25..cda4ad4269 100644 --- a/src/solver/impls/cvode/cvode.cxx +++ b/src/solver/impls/cvode/cvode.cxx @@ -309,7 +309,7 @@ int CvodeSolver::init(int nout, BoutReal tstep) { throw BoutException("CVSpgmr failed\n"); #endif - if (!have_user_precon()) { + if (!hasPreconditioner()) { output_info.write("\tUsing BBD preconditioner\n"); /// Get options @@ -356,7 +356,7 @@ int CvodeSolver::init(int nout, BoutReal tstep) { /// Set Jacobian-vector multiplication function const auto use_jacobian = (*options)["use_jacobian"].withDefault(false); - if (use_jacobian and hasUserJacobian()) { + if (use_jacobian and hasJacobian()) { output_info.write("\tUsing user-supplied Jacobian function\n"); if (CVSpilsSetJacTimes(cvode_mem, nullptr, cvode_jac) != CV_SUCCESS) @@ -544,7 +544,7 @@ void CvodeSolver::pre(BoutReal t, BoutReal gamma, BoutReal delta, BoutReal* udat int N = NV_LOCLENGTH_P(uvec); - if (!have_user_precon()) { + if (!hasPreconditioner()) { // Identity (but should never happen) for (int i = 0; i < N; i++) zvec[i] = rvec[i]; @@ -557,7 +557,7 @@ void CvodeSolver::pre(BoutReal t, BoutReal gamma, BoutReal delta, BoutReal* udat // Load vector to be inverted into F_vars load_derivs(rvec); - run_precon(t, gamma, delta); + runPreconditioner(t, gamma, delta); // Save the solution from F_vars save_derivs(zvec); @@ -573,7 +573,7 @@ void CvodeSolver::pre(BoutReal t, BoutReal gamma, BoutReal delta, BoutReal* udat void CvodeSolver::jac(BoutReal t, BoutReal* ydata, BoutReal* vdata, BoutReal* Jvdata) { TRACE("Running Jacobian: CvodeSolver::jac(%e)", t); - if (not hasUserJacobian()) + if (not hasJacobian()) throw BoutException("No jacobian function supplied!\n"); // Load state from ydate diff --git a/src/solver/impls/ida/ida.cxx b/src/solver/impls/ida/ida.cxx index 665df2e34c..61a866ba50 100644 --- a/src/solver/impls/ida/ida.cxx +++ b/src/solver/impls/ida/ida.cxx @@ -194,7 +194,7 @@ int IdaSolver::init(int nout, BoutReal tstep) { const auto use_precon = (*options)["use_precon"].withDefault(false); if (use_precon) { - if (!have_user_precon()) { + if (!hasPreconditioner()) { output.write("\tUsing BBD preconditioner\n"); /// Get options // Compute band_width_default from actually added fields, to allow for multiple Mesh @@ -326,7 +326,7 @@ void IdaSolver::pre(BoutReal t, BoutReal cj, BoutReal delta, BoutReal* udata, const BoutReal tstart = MPI_Wtime(); - if (!have_user_precon()) { + if (!hasPreconditioner()) { // Identity (but should never happen) const int N = NV_LOCLENGTH_P(id); std::copy(rvec, rvec + N, zvec); @@ -339,7 +339,7 @@ void IdaSolver::pre(BoutReal t, BoutReal cj, BoutReal delta, BoutReal* udata, // Load vector to be inverted into F_vars load_derivs(rvec); - run_precon(t, cj, delta); + runPreconditioner(t, cj, delta); // Save the solution from F_vars save_derivs(zvec); diff --git a/src/solver/impls/imex-bdf2/imex-bdf2.cxx b/src/solver/impls/imex-bdf2/imex-bdf2.cxx index d7461efee1..6c12b9c01d 100644 --- a/src/solver/impls/imex-bdf2/imex-bdf2.cxx +++ b/src/solver/impls/imex-bdf2/imex-bdf2.cxx @@ -700,7 +700,7 @@ void IMEXBDF2::constructSNES(SNES *snesIn){ PC pc; KSPGetPC(ksp,&pc); - if(use_precon && have_user_precon()) { + if (use_precon && hasPreconditioner()) { output.write("\tUsing user-supplied preconditioner\n"); // Set a Shell (matrix-free) preconditioner type @@ -710,10 +710,10 @@ void IMEXBDF2::constructSNES(SNES *snesIn){ PCShellSetApply(pc,imexbdf2PCapply); // Context used to supply object pointer PCShellSetContext(pc,this); - }else if(matrix_free){ + } else if (matrix_free) { PCSetType(pc, PCNONE); } - + ///////////////////////////////////////////////////// // diagnostics @@ -1274,7 +1274,7 @@ PetscErrorCode IMEXBDF2::snes_function(Vec x, Vec f, bool linear) { * Preconditioner function */ PetscErrorCode IMEXBDF2::precon(Vec x, Vec f) { - if(!have_user_precon()) { + if (!hasPreconditioner()) { // No user preconditioner throw BoutException("No user preconditioner"); } @@ -1296,7 +1296,7 @@ PetscErrorCode IMEXBDF2::precon(Vec x, Vec f) { ierr = VecRestoreArray(x,&xdata);CHKERRQ(ierr); // Run the preconditioner - run_precon(implicit_curtime, implicit_gamma, 0.0); + runPreconditioner(implicit_curtime, implicit_gamma, 0.0); // Save the solution from F_vars BoutReal *fdata; diff --git a/src/solver/impls/petsc/petsc.cxx b/src/solver/impls/petsc/petsc.cxx index 7411d6f6af..24c2f261fd 100644 --- a/src/solver/impls/petsc/petsc.cxx +++ b/src/solver/impls/petsc/petsc.cxx @@ -288,7 +288,7 @@ int PetscSolver::init(int NOUT, BoutReal TIMESTEP) { // Matrix free Jacobian - if (use_jacobian and hasUserJacobian()) { + if (use_jacobian and hasJacobian()) { // Use a user-supplied Jacobian function ierr = MatCreateShell(comm, local_N, local_N, neq, neq, this, &Jmf); CHKERRQ(ierr); ierr = MatShellSetOperation(Jmf, MATOP_MULT, (void (*)()) PhysicsJacobianApply); CHKERRQ(ierr); @@ -305,7 +305,7 @@ int PetscSolver::init(int NOUT, BoutReal TIMESTEP) { ierr = KSPGetPC(ksp,&pc);CHKERRQ(ierr); - if (use_precon and have_user_precon()) { + if (use_precon and hasPreconditioner()) { #if PETSC_VERSION_GE(3,5,0) ierr = SNESGetNPC(snes,&psnes);CHKERRQ(ierr); @@ -577,7 +577,7 @@ PetscErrorCode PetscSolver::pre(PC UNUSED(pc), Vec x, Vec y) { VecRestoreArray(x, &data); // Call the preconditioner - run_precon(ts_time, 1./shift, 0.0); + runPreconditioner(ts_time, 1. / shift, 0.0); // Save the solution from time derivatives VecGetArray(y, &data); diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index d8663231ff..93d189ccad 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -1399,25 +1399,28 @@ bool Solver::varAdded(const std::string& name) { || contains(v3d, name); } -bool Solver::have_user_precon() { - if(model) +bool Solver::hasPreconditioner() { + if (model != nullptr) { return model->hasPrecon(); + } return prefunc != nullptr; } -int Solver::run_precon(BoutReal t, BoutReal gamma, BoutReal delta) { - if(!have_user_precon()) +int Solver::runPreconditioner(BoutReal t, BoutReal gamma, BoutReal delta) { + if (not hasPreconditioner()) { return 1; + } - if(model) + if (model != nullptr) { return model->runPrecon(t, gamma, delta); - + } + return (*prefunc)(t, gamma, delta); } -bool Solver::hasUserJacobian() { - if (model) { +bool Solver::hasJacobian() { + if (model != nullptr) { return model->hasJacobian(); } @@ -1425,11 +1428,11 @@ bool Solver::hasUserJacobian() { } int Solver::runJacobian(BoutReal time) { - if (not hasUserJacobian()) { + if (not hasJacobian()) { return 1; } - if (model) { + if (model != nullptr) { return model->runJacobian(time); } diff --git a/tests/unit/solver/test_fakesolver.hxx b/tests/unit/solver/test_fakesolver.hxx index 8366e31703..46322f6b77 100644 --- a/tests/unit/solver/test_fakesolver.hxx +++ b/tests/unit/solver/test_fakesolver.hxx @@ -66,9 +66,9 @@ public: // Shims for protected functions auto getMaxTimestepShim() const -> BoutReal { return max_dt; } auto getLocalNShim() -> int { return getLocalN(); } - auto haveUserPreconShim() -> bool { return have_user_precon(); } + auto haveUserPreconShim() -> bool { return hasPreconditioner(); } auto runPreconShim(BoutReal t, BoutReal gamma, BoutReal delta) -> int { - return run_precon(t, gamma, delta); + return runPreconditioner(t, gamma, delta); } auto globalIndexShim(int local_start) -> Field3D { return globalIndex(local_start); } auto getMonitorsShim() const -> const std::list& { return getMonitors(); } @@ -78,7 +78,7 @@ public: auto callTimestepMonitorsShim(BoutReal simtime, BoutReal lastdt) -> int { return call_timestep_monitors(simtime, lastdt); } - using Solver::hasUserJacobian; + using Solver::hasJacobian; using Solver::runJacobian; }; diff --git a/tests/unit/solver/test_solver.cxx b/tests/unit/solver/test_solver.cxx index 5aa099ec8e..78a824a350 100644 --- a/tests/unit/solver/test_solver.cxx +++ b/tests/unit/solver/test_solver.cxx @@ -618,11 +618,11 @@ TEST_F(SolverTest, HasJacobian) { Options options; FakeSolver solver{&options}; - EXPECT_FALSE(solver.hasUserJacobian()); + EXPECT_FALSE(solver.hasJacobian()); solver.setJacobian(jacobian); - EXPECT_TRUE(solver.hasUserJacobian()); + EXPECT_TRUE(solver.hasJacobian()); } TEST_F(SolverTest, RunJacobian) { From a3bb05c8535d9c4504990b4737366cadfdd66833 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 15 Apr 2021 15:24:54 +0100 Subject: [PATCH 171/428] Use better method of exposing protected methods for testing --- tests/unit/solver/test_fakesolver.hxx | 20 +++----- tests/unit/solver/test_solver.cxx | 68 +++++++++++++-------------- 2 files changed, 41 insertions(+), 47 deletions(-) diff --git a/tests/unit/solver/test_fakesolver.hxx b/tests/unit/solver/test_fakesolver.hxx index 46322f6b77..bb6ef16ca9 100644 --- a/tests/unit/solver/test_fakesolver.hxx +++ b/tests/unit/solver/test_fakesolver.hxx @@ -65,19 +65,13 @@ public: // Shims for protected functions auto getMaxTimestepShim() const -> BoutReal { return max_dt; } - auto getLocalNShim() -> int { return getLocalN(); } - auto haveUserPreconShim() -> bool { return hasPreconditioner(); } - auto runPreconShim(BoutReal t, BoutReal gamma, BoutReal delta) -> int { - return runPreconditioner(t, gamma, delta); - } - auto globalIndexShim(int local_start) -> Field3D { return globalIndex(local_start); } - auto getMonitorsShim() const -> const std::list& { return getMonitors(); } - auto callMonitorsShim(BoutReal simtime, int iter, int NOUT) -> int { - return call_monitors(simtime, iter, NOUT); - } - auto callTimestepMonitorsShim(BoutReal simtime, BoutReal lastdt) -> int { - return call_timestep_monitors(simtime, lastdt); - } + using Solver::getLocalN; + using Solver::hasPreconditioner; + using Solver::runPreconditioner; + using Solver::globalIndex; + using Solver::getMonitors; + using Solver::call_monitors; + using Solver::call_timestep_monitors; using Solver::hasJacobian; using Solver::runJacobian; }; diff --git a/tests/unit/solver/test_solver.cxx b/tests/unit/solver/test_solver.cxx index 78a824a350..9130f12961 100644 --- a/tests/unit/solver/test_solver.cxx +++ b/tests/unit/solver/test_solver.cxx @@ -572,7 +572,7 @@ TEST_F(SolverTest, GetLocalN) { + (localmesh_nx_no_boundry * localmesh_ny_no_boundry * localmesh_nz) + (nx * ny * nz); - EXPECT_EQ(solver.getLocalNShim(), expected_total); + EXPECT_EQ(solver.getLocalN(), expected_total); } TEST_F(SolverTest, HavePreconditioner) { @@ -584,11 +584,11 @@ TEST_F(SolverTest, HavePreconditioner) { Options options; FakeSolver solver{&options}; - EXPECT_FALSE(solver.haveUserPreconShim()); + EXPECT_FALSE(solver.hasPreconditioner()); solver.setPrecon(preconditioner); - EXPECT_TRUE(solver.haveUserPreconShim()); + EXPECT_TRUE(solver.hasPreconditioner()); } TEST_F(SolverTest, RunPreconditioner) { @@ -607,7 +607,7 @@ TEST_F(SolverTest, RunPreconditioner) { constexpr auto delta = 3.0; constexpr auto expected = time + gamma + delta; - EXPECT_EQ(solver.runPreconShim(time, gamma, delta), expected); + EXPECT_EQ(solver.runPreconditioner(time, gamma, delta), expected); } TEST_F(SolverTest, HasJacobian) { @@ -653,7 +653,7 @@ TEST_F(SolverTest, AddMonitor) { EXPECT_THROW(monitor.setTimestepShim(20.0), BoutException); - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 0, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 0, 0)); EXPECT_EQ(monitor.last_called, 0); } @@ -669,19 +669,19 @@ TEST_F(SolverTest, AddMonitorFront) { EXPECT_NO_THROW(solver.addMonitor(&monitor2, Solver::FRONT)); // Everything's fine - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 0, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 0, 0)); EXPECT_EQ(monitor1.last_called, 0); EXPECT_EQ(monitor2.last_called, 0); // One monitor signals to quit - EXPECT_THROW(solver.callMonitorsShim(5.0, 1, 0), BoutException); + EXPECT_THROW(solver.call_monitors(5.0, 1, 0), BoutException); EXPECT_EQ(monitor1.last_called, 0); EXPECT_EQ(monitor2.last_called, 1); // Last timestep - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 9, 10)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 9, 10)); EXPECT_EQ(monitor1.last_called, 9); EXPECT_EQ(monitor2.last_called, 9); @@ -700,19 +700,19 @@ TEST_F(SolverTest, AddMonitorBack) { EXPECT_NO_THROW(solver.addMonitor(&monitor2, Solver::BACK)); // Everything's fine - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 0, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 0, 0)); EXPECT_EQ(monitor1.last_called, 0); EXPECT_EQ(monitor2.last_called, 0); // One monitor signals to quit - EXPECT_THROW(solver.callMonitorsShim(5.0, 1, 0), BoutException); + EXPECT_THROW(solver.call_monitors(5.0, 1, 0), BoutException); EXPECT_EQ(monitor1.last_called, 1); EXPECT_EQ(monitor2.last_called, 0); // Last timestep - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 9, 10)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 9, 10)); EXPECT_EQ(monitor1.last_called, 9); EXPECT_EQ(monitor2.last_called, 9); @@ -736,7 +736,7 @@ TEST_F(SolverTest, AddMonitorCheckFrequencies) { EXPECT_NO_THROW(solver.addMonitor(&larger_timestep)); EXPECT_THROW(solver.addMonitor(&incompatible_timestep), BoutException); - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, -1, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, -1, 0)); EXPECT_EQ(default_timestep.last_called, -1); EXPECT_EQ(smaller_timestep.last_called, -1); @@ -744,7 +744,7 @@ TEST_F(SolverTest, AddMonitorCheckFrequencies) { EXPECT_EQ(larger_timestep.last_called, -1); EXPECT_EQ(incompatible_timestep.last_called, called_sentinel); - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 9, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 9, 0)); EXPECT_EQ(default_timestep.last_called, 0); EXPECT_EQ(smaller_timestep.last_called, 0); @@ -752,7 +752,7 @@ TEST_F(SolverTest, AddMonitorCheckFrequencies) { EXPECT_EQ(larger_timestep.last_called, -1); EXPECT_EQ(incompatible_timestep.last_called, called_sentinel); - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 10, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 10, 0)); EXPECT_EQ(default_timestep.last_called, 0); EXPECT_EQ(smaller_timestep.last_called, 0); @@ -760,7 +760,7 @@ TEST_F(SolverTest, AddMonitorCheckFrequencies) { EXPECT_EQ(larger_timestep.last_called, -1); EXPECT_EQ(incompatible_timestep.last_called, called_sentinel); - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 199, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 199, 0)); EXPECT_EQ(default_timestep.last_called, 19); EXPECT_EQ(smaller_timestep.last_called, 19); @@ -768,7 +768,7 @@ TEST_F(SolverTest, AddMonitorCheckFrequencies) { EXPECT_EQ(larger_timestep.last_called, 0); EXPECT_EQ(incompatible_timestep.last_called, called_sentinel); - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 399, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 399, 0)); EXPECT_EQ(default_timestep.last_called, 39); EXPECT_EQ(smaller_timestep.last_called, 39); @@ -783,7 +783,7 @@ TEST_F(SolverTest, AddMonitorCheckFrequencies) { FakeMonitor larger_postinit_timestep{4.}; EXPECT_NO_THROW(solver.addMonitor(&larger_postinit_timestep, Solver::BACK)); - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 399, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 399, 0)); EXPECT_EQ(default_timestep.last_called, 39); EXPECT_EQ(smaller_timestep.last_called, 39); @@ -805,11 +805,11 @@ TEST_F(SolverTest, RemoveMonitor) { solver.removeMonitor(&monitor1); std::list expected{&monitor2}; - EXPECT_EQ(solver.getMonitorsShim(), expected); + EXPECT_EQ(solver.getMonitors(), expected); // Removing same monitor again should be a no-op solver.removeMonitor(&monitor1); - EXPECT_EQ(solver.getMonitorsShim(), expected); + EXPECT_EQ(solver.getMonitors(), expected); } namespace { @@ -829,9 +829,9 @@ TEST_F(SolverTest, AddTimestepMonitor) { EXPECT_NO_THROW(solver.addTimestepMonitor(timestep_monitor1)); EXPECT_NO_THROW(solver.addTimestepMonitor(timestep_monitor2)); - EXPECT_EQ(solver.callTimestepMonitorsShim(1., 1.), 0); - EXPECT_EQ(solver.callTimestepMonitorsShim(1., -1.), 1); - EXPECT_EQ(solver.callTimestepMonitorsShim(-1., -1.), 2); + EXPECT_EQ(solver.call_timestep_monitors(1., 1.), 0); + EXPECT_EQ(solver.call_timestep_monitors(1., -1.), 1); + EXPECT_EQ(solver.call_timestep_monitors(-1., -1.), 2); } TEST_F(SolverTest, RemoveTimestepMonitor) { @@ -844,15 +844,15 @@ TEST_F(SolverTest, RemoveTimestepMonitor) { solver.removeTimestepMonitor(timestep_monitor1); - EXPECT_EQ(solver.callTimestepMonitorsShim(1., 1.), 0); - EXPECT_EQ(solver.callTimestepMonitorsShim(1., -1.), 0); - EXPECT_EQ(solver.callTimestepMonitorsShim(-1., -1.), 2); + EXPECT_EQ(solver.call_timestep_monitors(1., 1.), 0); + EXPECT_EQ(solver.call_timestep_monitors(1., -1.), 0); + EXPECT_EQ(solver.call_timestep_monitors(-1., -1.), 2); solver.removeTimestepMonitor(timestep_monitor1); - EXPECT_EQ(solver.callTimestepMonitorsShim(1., 1.), 0); - EXPECT_EQ(solver.callTimestepMonitorsShim(1., -1.), 0); - EXPECT_EQ(solver.callTimestepMonitorsShim(-1., -1.), 2); + EXPECT_EQ(solver.call_timestep_monitors(1., 1.), 0); + EXPECT_EQ(solver.call_timestep_monitors(1., -1.), 0); + EXPECT_EQ(solver.call_timestep_monitors(-1., -1.), 2); } TEST_F(SolverTest, DontCallTimestepMonitors) { @@ -862,9 +862,9 @@ TEST_F(SolverTest, DontCallTimestepMonitors) { EXPECT_NO_THROW(solver.addTimestepMonitor(timestep_monitor1)); EXPECT_NO_THROW(solver.addTimestepMonitor(timestep_monitor2)); - EXPECT_EQ(solver.callTimestepMonitorsShim(1., 1.), 0); - EXPECT_EQ(solver.callTimestepMonitorsShim(1., -1.), 0); - EXPECT_EQ(solver.callTimestepMonitorsShim(-1., -1.), 0); + EXPECT_EQ(solver.call_timestep_monitors(1., 1.), 0); + EXPECT_EQ(solver.call_timestep_monitors(1., -1.), 0); + EXPECT_EQ(solver.call_timestep_monitors(-1., -1.), 0); } TEST_F(SolverTest, BasicSolve) { @@ -969,7 +969,7 @@ TEST_F(SolverTest, SolveFixDefaultTimestep) { EXPECT_TRUE(solver.init_called); EXPECT_TRUE(solver.run_called); - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 99, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 99, 0)); EXPECT_EQ(default_timestep.last_called, 0); EXPECT_EQ(smaller_timestep.last_called, 9); @@ -1012,7 +1012,7 @@ TEST_F(SolverTest, SolveFixDefaultTimestepSmaller) { EXPECT_TRUE(solver.init_called); EXPECT_TRUE(solver.run_called); - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 99, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 99, 0)); EXPECT_EQ(default_timestep.last_called, 99); EXPECT_EQ(smaller_timestep.last_called, 9); @@ -1035,7 +1035,7 @@ TEST_F(SolverTest, SolveFixDefaultTimestepLarger) { EXPECT_TRUE(solver.init_called); EXPECT_TRUE(solver.run_called); - EXPECT_NO_THROW(solver.callMonitorsShim(0.0, 99, 0)); + EXPECT_NO_THROW(solver.call_monitors(0.0, 99, 0)); EXPECT_EQ(default_timestep.last_called, 9); EXPECT_EQ(smaller_timestep.last_called, 99); From 26111454fe98aa1ca412786db75432098199836d Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 5 May 2021 14:48:02 +0200 Subject: [PATCH 172/428] Option to change max nonlinear iterations in CVODE --- src/solver/impls/cvode/cvode.cxx | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/solver/impls/cvode/cvode.cxx b/src/solver/impls/cvode/cvode.cxx index e5914f8eeb..a63ca05e93 100644 --- a/src/solver/impls/cvode/cvode.cxx +++ b/src/solver/impls/cvode/cvode.cxx @@ -285,6 +285,14 @@ int CvodeSolver::init(int nout, BoutReal tstep) { CVodeSetMaxOrd(cvode_mem, mxorder); } + const auto max_nonlinear_iterations = (*options)["max_nonlinear_iterations"] + .doc("Maximum number of nonlinear iterations allowed by CVODE before reducing " + "timestep. CVODE default (used if this option is negative) is 3.") + .withDefault(-1); + if (max_nonlinear_iterations > 0) { + CVodeSetMaxNonlinIters(cvode_mem, max_nonlinear_iterations); + } + /// Newton method can include Preconditioners and Jacobian function if (!func_iter) { output_info.write("\tUsing Newton iteration\n"); From f00935ee43edd5476bf93babd2d60e269d74f407 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 5 May 2021 21:47:08 +0200 Subject: [PATCH 173/428] Option to set positivity constraints in CVODE Uses CVodeSetConstraints function to allow constraints to be applied to variables to be: none (default, no constraint), positive, non_negative, negative, or non_positive. --- src/solver/impls/cvode/cvode.cxx | 97 +++++++++++++++++++++++++++----- src/solver/impls/cvode/cvode.hxx | 10 ++-- 2 files changed, 89 insertions(+), 18 deletions(-) diff --git a/src/solver/impls/cvode/cvode.cxx b/src/solver/impls/cvode/cvode.cxx index a63ca05e93..274df99d32 100644 --- a/src/solver/impls/cvode/cvode.cxx +++ b/src/solver/impls/cvode/cvode.cxx @@ -35,6 +35,7 @@ #include "options.hxx" #include "output.hxx" #include "unused.hxx" +#include "bout/bout_enum_class.hxx" #include "bout/mesh.hxx" #include "utils.hxx" @@ -68,6 +69,9 @@ using CVODEINT = sunindextype; #endif #endif +BOUT_ENUM_CLASS(positivity_constraint, none, positive, non_negative, negative, + non_positive); + static int cvode_rhs(BoutReal t, N_Vector u, N_Vector du, void* user_data); static int cvode_bbd_rhs(CVODEINT Nlocal, BoutReal t, N_Vector u, N_Vector du, void* user_data); @@ -251,7 +255,7 @@ int CvodeSolver::init(int nout, BoutReal tstep) { if (abstolvec == nullptr) throw BoutException("SUNDIALS memory allocation (abstol vector) failed\n"); - set_abstol_values(NV_DATA_P(abstolvec), f2dtols, f3dtols); + set_vector_option_values(NV_DATA_P(abstolvec), f2dtols, f3dtols); if (CVodeSVtolerances(cvode_mem, reltol, abstolvec) < 0) throw BoutException("CVodeSVtolerances failed\n"); @@ -293,6 +297,72 @@ int CvodeSolver::init(int nout, BoutReal tstep) { CVodeSetMaxNonlinIters(cvode_mem, max_nonlinear_iterations); } + const auto apply_positivity_constraints = (*options)["apply_positivity_constraints"] + .doc("Use CVODE function CVodeSetConstraints to constrain variables - the constraint " + "to be applied is set by the positivity_constraint option in the subsection for " + "each variable") + .withDefault(false); + if (apply_positivity_constraints) { + std::vector f2d_constraints; + f2d_constraints.reserve(f2d.size()); + std::transform(begin(f2d), end(f2d), std::back_inserter(f2d_constraints), + [](const VarStr& f2) { + auto f2_options = Options::root()[f2.name]; + const auto value = f2_options["positivity_constraint"] + .doc("Constraint to apply to this variable if " + "solver:apply_positivity_constraint=true. " + "Possible values are: none (default), " + "positive, non_negative, negative, or " + "non_positive.") + .withDefault(positivity_constraint::none); + switch (value) { + case positivity_constraint::none: return 0.0; + case positivity_constraint::positive: return 2.0; + case positivity_constraint::non_negative: return 1.0; + case positivity_constraint::negative: return -2.0; + case positivity_constraint::non_positive: return -1.0; + default: throw BoutException("Incorrect value for " + "positivity_constraint"); + } + }); + + std::vector f3d_constraints; + f3d_constraints.reserve(f3d.size()); + std::transform(begin(f3d), end(f3d), std::back_inserter(f3d_constraints), + [](const VarStr& f3) { + auto f3_options = Options::root()[f3.name]; + const auto value = f3_options["positivity_constraint"] + .doc("Constraint to apply to this variable if " + "solver:apply_positivity_constraint=true. " + "Possible values are: none (default), " + "positive, non_negative, negative, or " + "non_positive.") + .withDefault(positivity_constraint::none); + switch (value) { + case positivity_constraint::none: return 0.0; + case positivity_constraint::positive: return 2.0; + case positivity_constraint::non_negative: return 1.0; + case positivity_constraint::negative: return -2.0; + case positivity_constraint::non_positive: return -1.0; + default: throw BoutException("Incorrect value for " + "positivity_constraint"); + } + }); + + N_Vector constraints_vec = N_VNew_Parallel(BoutComm::get(), local_N, neq); + if (constraints_vec == nullptr) + throw BoutException("SUNDIALS memory allocation (positivity constraints vector) " + "failed\n"); + + set_vector_option_values(NV_DATA_P(constraints_vec), f2d_constraints, + f3d_constraints); + + if (CVodeSetConstraints(cvode_mem, constraints_vec) < 0) + throw BoutException("CVodeSetConstraints failed\n"); + + N_VDestroy_Parallel(constraints_vec); + } + /// Newton method can include Preconditioners and Jacobian function if (!func_iter) { output_info.write("\tUsing Newton iteration\n"); @@ -654,33 +724,34 @@ static int cvode_jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y, N_Vector U } /************************************************************************** - * vector abstol functions + * CVODE vector option functions **************************************************************************/ -void CvodeSolver::set_abstol_values(BoutReal* abstolvec_data, - std::vector& f2dtols, - std::vector& f3dtols) { - int p = 0; // Counter for location in abstolvec_data array +void CvodeSolver::set_vector_option_values(BoutReal* option_data, + std::vector& f2dtols, + std::vector& f3dtols) { + int p = 0; // Counter for location in option_data array // All boundaries for (const auto& i2d : bout::globals::mesh->getRegion2D("RGN_BNDRY")) { - loop_abstol_values_op(i2d, abstolvec_data, p, f2dtols, f3dtols, true); + loop_vector_option_values_op(i2d, option_data, p, f2dtols, f3dtols, true); } // Bulk of points for (const auto& i2d : bout::globals::mesh->getRegion2D("RGN_NOBNDRY")) { - loop_abstol_values_op(i2d, abstolvec_data, p, f2dtols, f3dtols, false); + loop_vector_option_values_op(i2d, option_data, p, f2dtols, f3dtols, false); } } -void CvodeSolver::loop_abstol_values_op(Ind2D UNUSED(i2d), BoutReal* abstolvec_data, - int& p, std::vector& f2dtols, - std::vector& f3dtols, bool bndry) { +void CvodeSolver::loop_vector_option_values_op(Ind2D UNUSED(i2d), BoutReal* option_data, + int& p, std::vector& f2dtols, + std::vector& f3dtols, bool bndry) +{ // Loop over 2D variables for (std::vector::size_type i = 0; i < f2dtols.size(); i++) { if (bndry && !f2d[i].evolve_bndry) { continue; } - abstolvec_data[p] = f2dtols[i]; + option_data[p] = f2dtols[i]; p++; } @@ -690,7 +761,7 @@ void CvodeSolver::loop_abstol_values_op(Ind2D UNUSED(i2d), BoutReal* abstolvec_d if (bndry && !f3d[i].evolve_bndry) { continue; } - abstolvec_data[p] = f3dtols[i]; + option_data[p] = f3dtols[i]; p++; } } diff --git a/src/solver/impls/cvode/cvode.hxx b/src/solver/impls/cvode/cvode.hxx index a628c6c041..d56a6b5491 100644 --- a/src/solver/impls/cvode/cvode.hxx +++ b/src/solver/impls/cvode/cvode.hxx @@ -104,11 +104,11 @@ private: bool cvode_initialised = false; - void set_abstol_values(BoutReal* abstolvec_data, std::vector& f2dtols, - std::vector& f3dtols); - void loop_abstol_values_op(Ind2D i2d, BoutReal* abstolvec_data, int& p, - std::vector& f2dtols, - std::vector& f3dtols, bool bndry); + void set_vector_option_values(BoutReal* abstolvec_data, std::vector& f2dtols, + std::vector& f3dtols); + void loop_vector_option_values_op(Ind2D i2d, BoutReal* abstolvec_data, int& p, + std::vector& f2dtols, + std::vector& f3dtols, bool bndry); #if SUNDIALS_VERSION_MAJOR >= 3 /// SPGMR solver structure SUNLinearSolver sun_solver{nullptr}; From 895b6a0d0fd23eda6c93204f641b0ba6df8c8e1b Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 5 May 2021 23:11:33 +0200 Subject: [PATCH 174/428] Check version of SUNDIALS is new enough for constraints --- src/solver/impls/cvode/cvode.cxx | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/solver/impls/cvode/cvode.cxx b/src/solver/impls/cvode/cvode.cxx index 274df99d32..23ba1ab4ae 100644 --- a/src/solver/impls/cvode/cvode.cxx +++ b/src/solver/impls/cvode/cvode.cxx @@ -302,6 +302,12 @@ int CvodeSolver::init(int nout, BoutReal tstep) { "to be applied is set by the positivity_constraint option in the subsection for " "each variable") .withDefault(false); +#if not (SUNDIALS_VERSION_MAJOR >= 3 and SUNDIALS_VERSION_MINOR >= 2) + if (apply_positivity_constraints) { + throw BoutException("The apply_positivity_constraints option is only available with " + "SUNDIALS>=3.2.0"); + } +#else if (apply_positivity_constraints) { std::vector f2d_constraints; f2d_constraints.reserve(f2d.size()); @@ -362,6 +368,7 @@ int CvodeSolver::init(int nout, BoutReal tstep) { N_VDestroy_Parallel(constraints_vec); } +#endif /// Newton method can include Preconditioners and Jacobian function if (!func_iter) { From 7b79b9421cca70fa3fe18c7e972bd1158ed04a6a Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 5 May 2021 23:20:05 +0200 Subject: [PATCH 175/428] Make method parameter names consistent (clang-tidy fix) --- src/solver/impls/cvode/cvode.hxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/solver/impls/cvode/cvode.hxx b/src/solver/impls/cvode/cvode.hxx index d56a6b5491..272d4ce4d0 100644 --- a/src/solver/impls/cvode/cvode.hxx +++ b/src/solver/impls/cvode/cvode.hxx @@ -104,9 +104,9 @@ private: bool cvode_initialised = false; - void set_vector_option_values(BoutReal* abstolvec_data, std::vector& f2dtols, + void set_vector_option_values(BoutReal* option_data, std::vector& f2dtols, std::vector& f3dtols); - void loop_vector_option_values_op(Ind2D i2d, BoutReal* abstolvec_data, int& p, + void loop_vector_option_values_op(Ind2D i2d, BoutReal* option_data, int& p, std::vector& f2dtols, std::vector& f3dtols, bool bndry); #if SUNDIALS_VERSION_MAJOR >= 3 From 8d9e0545aede92c7c1b878ba6f212e18e53b3034 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Thu, 6 May 2021 20:06:16 +0200 Subject: [PATCH 176/428] Move constraint creation to utility method Reduces code duplication. --- src/solver/impls/cvode/cvode.cxx | 77 +++++++++++++------------------- src/solver/impls/cvode/cvode.hxx | 2 + 2 files changed, 34 insertions(+), 45 deletions(-) diff --git a/src/solver/impls/cvode/cvode.cxx b/src/solver/impls/cvode/cvode.cxx index 23ba1ab4ae..4b5dfb4ddf 100644 --- a/src/solver/impls/cvode/cvode.cxx +++ b/src/solver/impls/cvode/cvode.cxx @@ -309,51 +309,8 @@ int CvodeSolver::init(int nout, BoutReal tstep) { } #else if (apply_positivity_constraints) { - std::vector f2d_constraints; - f2d_constraints.reserve(f2d.size()); - std::transform(begin(f2d), end(f2d), std::back_inserter(f2d_constraints), - [](const VarStr& f2) { - auto f2_options = Options::root()[f2.name]; - const auto value = f2_options["positivity_constraint"] - .doc("Constraint to apply to this variable if " - "solver:apply_positivity_constraint=true. " - "Possible values are: none (default), " - "positive, non_negative, negative, or " - "non_positive.") - .withDefault(positivity_constraint::none); - switch (value) { - case positivity_constraint::none: return 0.0; - case positivity_constraint::positive: return 2.0; - case positivity_constraint::non_negative: return 1.0; - case positivity_constraint::negative: return -2.0; - case positivity_constraint::non_positive: return -1.0; - default: throw BoutException("Incorrect value for " - "positivity_constraint"); - } - }); - - std::vector f3d_constraints; - f3d_constraints.reserve(f3d.size()); - std::transform(begin(f3d), end(f3d), std::back_inserter(f3d_constraints), - [](const VarStr& f3) { - auto f3_options = Options::root()[f3.name]; - const auto value = f3_options["positivity_constraint"] - .doc("Constraint to apply to this variable if " - "solver:apply_positivity_constraint=true. " - "Possible values are: none (default), " - "positive, non_negative, negative, or " - "non_positive.") - .withDefault(positivity_constraint::none); - switch (value) { - case positivity_constraint::none: return 0.0; - case positivity_constraint::positive: return 2.0; - case positivity_constraint::non_negative: return 1.0; - case positivity_constraint::negative: return -2.0; - case positivity_constraint::non_positive: return -1.0; - default: throw BoutException("Incorrect value for " - "positivity_constraint"); - } - }); + auto f2d_constraints = create_constraints(f2d); + auto f3d_constraints = create_constraints(f3d); N_Vector constraints_vec = N_VNew_Parallel(BoutComm::get(), local_N, neq); if (constraints_vec == nullptr) @@ -464,6 +421,36 @@ int CvodeSolver::init(int nout, BoutReal tstep) { return 0; } +template +std::vector CvodeSolver::create_constraints( + const std::vector>& fields) { + + std::vector constraints; + constraints.reserve(fields.size()); + std::transform(begin(fields), end(fields), std::back_inserter(constraints), + [](const VarStr& f) { + auto f_options = Options::root()[f.name]; + const auto value = f_options["positivity_constraint"] + .doc("Constraint to apply to this variable if " + "solver:apply_positivity_constraint=true. " + "Possible values are: none (default), " + "positive, non_negative, negative, or " + "non_positive.") + .withDefault(positivity_constraint::none); + switch (value) { + case positivity_constraint::none: return 0.0; + case positivity_constraint::positive: return 2.0; + case positivity_constraint::non_negative: return 1.0; + case positivity_constraint::negative: return -2.0; + case positivity_constraint::non_positive: return -1.0; + default: throw BoutException("Incorrect value for " + "positivity_constraint"); + } + }); + return constraints; +} + + /************************************************************************** * Run - Advance time **************************************************************************/ diff --git a/src/solver/impls/cvode/cvode.hxx b/src/solver/impls/cvode/cvode.hxx index 272d4ce4d0..eb565b25b9 100644 --- a/src/solver/impls/cvode/cvode.hxx +++ b/src/solver/impls/cvode/cvode.hxx @@ -109,6 +109,8 @@ private: void loop_vector_option_values_op(Ind2D i2d, BoutReal* option_data, int& p, std::vector& f2dtols, std::vector& f3dtols, bool bndry); + template + std::vector create_constraints(const std::vector>& fields); #if SUNDIALS_VERSION_MAJOR >= 3 /// SPGMR solver structure SUNLinearSolver sun_solver{nullptr}; From f94c2b69be60e0401f53511993c63d9aec1080ee Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 2 Jun 2021 19:56:17 +0100 Subject: [PATCH 177/428] Rename `max_nonlinear_it` to `max_nonlinear_iterations` `imexbdf2` and `beuler` solvers had a `max_nonlinear_it` option, while `cvode` had `max_nonlinear_iterations`. Standardise on the longer name. Also adds a note that `cvode` has `max_nonlinear_iterations` to the manual. --- manual/sphinx/user_docs/time_integration.rst | 70 ++++++++++---------- src/solver/impls/imex-bdf2/imex-bdf2.cxx | 5 +- src/solver/impls/snes/snes.cxx | 4 +- 3 files changed, 40 insertions(+), 39 deletions(-) diff --git a/manual/sphinx/user_docs/time_integration.rst b/manual/sphinx/user_docs/time_integration.rst index 5ed1247b44..adc26de818 100644 --- a/manual/sphinx/user_docs/time_integration.rst +++ b/manual/sphinx/user_docs/time_integration.rst @@ -71,41 +71,41 @@ given in table :numref:`tab-solveropts`. .. _tab-solveropts: .. table:: Time integration solver options - +------------------+--------------------------------------------+-------------------------------------+ - | Option | Description | Solvers used | - +==================+============================================+=====================================+ - | atol | Absolute tolerance | rk4, pvode, cvode, ida, imexbdf2, | - | | | beuler | - +------------------+--------------------------------------------+-------------------------------------+ - | rtol | Relative tolerance | rk4, pvode, cvode, ida, imexbdf2, | - | | | beuler | - +------------------+--------------------------------------------+-------------------------------------+ - | mxstep | Maximum internal steps | rk4, imexbdf2 | - | | per output step | | - +------------------+--------------------------------------------+-------------------------------------+ - | max\_timestep | Maximum timestep | rk4, cvode | - +------------------+--------------------------------------------+-------------------------------------+ - | timestep | Starting timestep | rk4, euler, imexbdf2, beuler | - +------------------+--------------------------------------------+-------------------------------------+ - | adaptive | Adapt timestep? (Y/N) | rk4, imexbdf2 | - +------------------+--------------------------------------------+-------------------------------------+ - | use\_precon | Use a preconditioner? (Y/N) | pvode, cvode, ida, imexbdf2 | - +------------------+--------------------------------------------+-------------------------------------+ - | mudq, mldq | BBD preconditioner settings | pvode, cvode, ida | - +------------------+--------------------------------------------+-------------------------------------+ - | mukeep, mlkeep | | | - +------------------+--------------------------------------------+-------------------------------------+ - | maxl | Maximum number of linear iterations | cvode, imexbdf2 | - +------------------+--------------------------------------------+-------------------------------------+ - | max_nonlinear_it | Maximum number of nonlinear iterations | imexbdf2, beuler | - +------------------+--------------------------------------------+-------------------------------------+ - | use\_jacobian | Use user-supplied Jacobian? (Y/N) | cvode | - +------------------+--------------------------------------------+-------------------------------------+ - | adams\_moulton | Use Adams-Moulton method | cvode | - | | rather than BDF | | - +------------------+--------------------------------------------+-------------------------------------+ - | diagnose | Collect and print additional diagnostics | cvode, imexbdf2, beuler | - +------------------+--------------------------------------------+-------------------------------------+ + +--------------------------+--------------------------------------------+-------------------------------------+ + | Option | Description | Solvers used | + +==========================+============================================+=====================================+ + | atol | Absolute tolerance | rk4, pvode, cvode, ida, imexbdf2, | + | | | beuler | + +--------------------------+--------------------------------------------+-------------------------------------+ + | rtol | Relative tolerance | rk4, pvode, cvode, ida, imexbdf2, | + | | | beuler | + +--------------------------+--------------------------------------------+-------------------------------------+ + | mxstep | Maximum internal steps | rk4, imexbdf2 | + | | per output step | | + +--------------------------+--------------------------------------------+-------------------------------------+ + | max\_timestep | Maximum timestep | rk4, cvode | + +--------------------------+--------------------------------------------+-------------------------------------+ + | timestep | Starting timestep | rk4, euler, imexbdf2, beuler | + +--------------------------+--------------------------------------------+-------------------------------------+ + | adaptive | Adapt timestep? (Y/N) | rk4, imexbdf2 | + +--------------------------+--------------------------------------------+-------------------------------------+ + | use\_precon | Use a preconditioner? (Y/N) | pvode, cvode, ida, imexbdf2 | + +--------------------------+--------------------------------------------+-------------------------------------+ + | mudq, mldq | BBD preconditioner settings | pvode, cvode, ida | + +--------------------------+--------------------------------------------+-------------------------------------+ + | mukeep, mlkeep | | | + +--------------------------+--------------------------------------------+-------------------------------------+ + | maxl | Maximum number of linear iterations | cvode, imexbdf2 | + +--------------------------+--------------------------------------------+-------------------------------------+ + | max_nonlinear_iterations | Maximum number of nonlinear iterations | cvode, imexbdf2, beuler | + +--------------------------+--------------------------------------------+-------------------------------------+ + | use\_jacobian | Use user-supplied Jacobian? (Y/N) | cvode | + +--------------------------+--------------------------------------------+-------------------------------------+ + | adams\_moulton | Use Adams-Moulton method | cvode | + | | rather than BDF | | + +--------------------------+--------------------------------------------+-------------------------------------+ + | diagnose | Collect and print additional diagnostics | cvode, imexbdf2, beuler | + +--------------------------+--------------------------------------------+-------------------------------------+ | diff --git a/src/solver/impls/imex-bdf2/imex-bdf2.cxx b/src/solver/impls/imex-bdf2/imex-bdf2.cxx index 6c12b9c01d..7294af5146 100644 --- a/src/solver/impls/imex-bdf2/imex-bdf2.cxx +++ b/src/solver/impls/imex-bdf2/imex-bdf2.cxx @@ -663,8 +663,9 @@ void IMEXBDF2::constructSNES(SNES *snesIn){ BoutReal atol, rtol; // Tolerances for SNES solver options->get("atol", atol, 1e-16); options->get("rtol", rtol, 1e-10); - int max_nonlinear_it; // Maximum nonlinear (SNES) iterations - options->get("max_nonlinear_it", max_nonlinear_it, 5); + int max_nonlinear_it = (*options)["max_nonlinear_iterations"] + .doc("Maximum number of nonlinear iterations per SNES solve") + .withDefault(5); SNESSetTolerances(*snesIn,atol,rtol,PETSC_DEFAULT,max_nonlinear_it,PETSC_DEFAULT); ///////////////////////////////////////////////////// diff --git a/src/solver/impls/snes/snes.cxx b/src/solver/impls/snes/snes.cxx index 8cf81996ec..ef78c16562 100644 --- a/src/solver/impls/snes/snes.cxx +++ b/src/solver/impls/snes/snes.cxx @@ -109,8 +109,8 @@ int SNESSolver::init(int nout, BoutReal tstep) { BoutReal rtol = (*options)["rtol"].doc("Relative tolerance in SNES solve").withDefault(1e-10); - int maxits = (*options)["max_nonlinear_it"] - .doc("Maximum number of iterations per SNES solve") + int maxits = (*options)["max_nonlinear_iterations"] + .doc("Maximum number of nonlinear iterations per SNES solve") .withDefault(50); upper_its = (*options)["upper_its"] From 41e871e520d8d29ff8a6e1c9dc7d833d1ff84a2e Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 2 Jun 2021 20:11:25 +0100 Subject: [PATCH 178/428] Document options for CVODE positivity constraints [skip ci] --- manual/sphinx/user_docs/time_integration.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/manual/sphinx/user_docs/time_integration.rst b/manual/sphinx/user_docs/time_integration.rst index adc26de818..d89f919179 100644 --- a/manual/sphinx/user_docs/time_integration.rst +++ b/manual/sphinx/user_docs/time_integration.rst @@ -146,6 +146,13 @@ iterations becomes large, this may be an indication that the system is poorly conditioned, and a preconditioner might help improve performance. See :ref:`sec-preconditioning`. +CVODE can set constraints to keep some quantities positive, non-negative, +negative or non-positive. These constraints can be activated by setting the +option ``solver:apply_positivity_constraints=true``, and then in the section +for a certain variable (e.g. ``[n]``), setting the option +``positivity_constraint`` to one of ``positive``, ``non_negative``, +``negative``, or ``non_positive``. + IMEX-BDF2 --------- From 0b32fe34a61df2c630583f54dcf0bdfb32c8cd0f Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 2 Jun 2021 20:45:28 +0100 Subject: [PATCH 179/428] Fix apidoc_args --- manual/sphinx/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/manual/sphinx/conf.py b/manual/sphinx/conf.py index ce69f3ab6f..dc83eac5bf 100755 --- a/manual/sphinx/conf.py +++ b/manual/sphinx/conf.py @@ -68,6 +68,7 @@ def __getattr__(cls, name): project="BOUT++", rootpath='../doxygen/bout/xml', suffix='rst', + members=True, quiet=False) apidoc_args.rootpath = os.path.abspath(apidoc_args.rootpath) if not os.path.isdir(apidoc_args.destdir): From 4e7b5dad033205e408b2ed5c00357b6562d31918 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Wed, 28 Jul 2021 14:26:36 +0200 Subject: [PATCH 180/428] Update locale --- locale/de/libbout.po | 88 ++++++++++++++++++++--------------------- locale/es/libbout.po | 88 ++++++++++++++++++++--------------------- locale/fr/libbout.po | 88 ++++++++++++++++++++--------------------- locale/libbout.pot | 88 ++++++++++++++++++++--------------------- locale/zh_CN/libbout.po | 88 ++++++++++++++++++++--------------------- locale/zh_TW/libbout.po | 88 ++++++++++++++++++++--------------------- 6 files changed, 264 insertions(+), 264 deletions(-) diff --git a/locale/de/libbout.po b/locale/de/libbout.po index fdbcb327da..b5507ba828 100644 --- a/locale/de/libbout.po +++ b/locale/de/libbout.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: BOUT++ 4.2.1\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2020-09-23 17:43+0100\n" +"POT-Creation-Date: 2021-07-28 14:25+0200\n" "PO-Revision-Date: 2020-03-19 12:42+0000\n" "Last-Translator: David \n" "Language-Team: German\n" @@ -185,14 +185,14 @@ msgstr "\tOpenMP Parallelisierung mit %d Threads ist aktiviert\n" #. Mark the option as used #. Option not found -#: ../src/sys/options.cxx:183 ../src/sys/options.cxx:238 -#: ../src/sys/options.cxx:280 ../src/sys/options.cxx:319 +#: ../src/sys/options.cxx:195 ../src/sys/options.cxx:250 +#: ../src/sys/options.cxx:292 ../src/sys/options.cxx:331 #: ../include/options.hxx:406 ../include/options.hxx:438 -#: ../include/options.hxx:460 ../include/options.hxx:635 +#: ../include/options.hxx:460 ../include/options.hxx:658 msgid "\tOption " msgstr "\tOption " -#: ../src/sys/options.cxx:309 +#: ../src/sys/options.cxx:321 #, c-format msgid "\tOption '%s': Boolean expected. Got '%s'\n" msgstr "\tOption '%s': Boolscherwert erwartet, '%s' gefunden\n" @@ -276,7 +276,7 @@ msgstr "" " -v, --verbose\t\tWortreicherer Ausgabe\n" " -q, --quiet\t\tNur wichtigere Ausgaben anzeigen\n" -#: ../src/solver/solver.cxx:491 +#: ../src/solver/solver.cxx:509 #, c-format msgid "" "\n" @@ -285,7 +285,7 @@ msgstr "" "\n" "Simulation beendet um %s\n" -#: ../src/solver/solver.cxx:456 +#: ../src/solver/solver.cxx:474 #, c-format msgid "" "\n" @@ -318,7 +318,7 @@ msgstr "" "Weitere Eingabeparameter sind in dem Manual und dem Quellcode (z.B. %s.cxx) " "des Physikmoduls definiert.\n" -#: ../include/options.hxx:638 +#: ../include/options.hxx:661 msgid ") overwritten with:" msgstr ") überschrieben mit:" @@ -326,7 +326,7 @@ msgstr ") überschrieben mit:" msgid "4 of 8" msgstr "" -#: ../src/sys/options.cxx:464 +#: ../src/sys/options.cxx:476 msgid "All options used\n" msgstr "Alle genutzten Optionen\n" @@ -350,7 +350,7 @@ msgstr "" "%d Punkte in der X-Richtung können nicht gleichmässig zwischen %d Prozessen " "verteilt werden\n" -#: ../src/bout++.cxx:691 +#: ../src/bout++.cxx:697 msgid "Check if a file exists, and exit if it does." msgstr "" @@ -414,7 +414,7 @@ msgstr "" "Der Befehl 'bout-log-color' konnte nicht ausgeführt werden. Stellen Sie " "sicher, dass er sich in $PATH befindet.\n" -#: ../src/solver/solver.cxx:574 +#: ../src/solver/solver.cxx:669 #, c-format msgid "Couldn't add Monitor: %g is not a multiple of %g!" msgstr "" @@ -436,13 +436,13 @@ msgstr "Die Region '%s' ist nicht in regionMap3D" msgid "Couldn't find region %s in regionMapPerp" msgstr "Die Region '%s' ist nicht in regionMapPerp" -#: ../src/sys/options.cxx:268 +#: ../src/sys/options.cxx:280 #, c-format msgid "Couldn't get BoutReal from option %s = '%s'" msgstr "" "Die Option %s = '%s' konnte nicht als Gleitkommazahl interpretiert werden." -#: ../src/sys/options.cxx:216 +#: ../src/sys/options.cxx:228 #, c-format msgid "Couldn't get integer from option %s = '%s'" msgstr "Die Option %s = '%s' konnte nicht als ganze Zahl interpretiert werden." @@ -457,7 +457,7 @@ msgstr "Der Datenordner \"%s\" existiert nicht oder ist nicht lesbar\n" msgid "DataDir \"%s\" is not a directory\n" msgstr "\"%s\" soll als Datenordner verwendet werden, ist jedoch kein Ordner\n" -#: ../src/solver/solver.cxx:524 +#: ../src/solver/solver.cxx:587 msgid "ERROR: Solver is already initialised\n" msgstr "FEHLER: Der Integrator ist bereits initialisiert.\n" @@ -475,7 +475,7 @@ msgstr "Es wurde ein Fehler beim Schreiben der Einstellungsdatei gefunden" msgid "Error: nx must be greater than 2 times MXG (2 * %d)" msgstr "Fehler: nx muss größer als 2 mal MXG sein (2 * %d)" -#: ../src/solver/solver.cxx:449 +#: ../src/solver/solver.cxx:454 msgid "Failed to initialise solver-> Aborting\n" msgstr "" "Der Integrator konnte nicht initialisiert werden. Der Prozess wird " @@ -486,7 +486,7 @@ msgstr "" msgid "Finding value for NXPE (ideal = %f)\n" msgstr "Suche NXPE Wert (optimal = %f)\n" -#: ../src/solver/solver.cxx:526 +#: ../src/solver/solver.cxx:589 msgid "Initialising solver\n" msgstr "Integrator wird initialisiert\n" @@ -521,15 +521,15 @@ msgstr "Das Gitter muss ny enthalten" msgid "Missing integer array %s\n" msgstr "Ganzzahlen-Array '%s' nicht gesetzt\n" -#: ../src/solver/solver.cxx:678 +#: ../src/solver/solver.cxx:773 msgid "Monitor signalled to quit" msgstr "Der Monitor signaliserte die Beendigung" -#: ../src/solver/solver.cxx:685 +#: ../src/solver/solver.cxx:780 msgid "Monitor signalled to quit\n" msgstr "Beendigung durch Monitor\n" -#: ../src/bout++.cxx:695 +#: ../src/bout++.cxx:701 msgid "Name of file whose existence triggers a stop" msgstr "" @@ -550,29 +550,29 @@ msgstr "" "Anzahl an Prozessoren (%d) nicht teilbar durch Anzahl in y Richtung (%d)\n" #. Less than 2 time-steps left -#: ../src/bout++.cxx:739 +#: ../src/bout++.cxx:745 #, c-format msgid "Only %e seconds (%.2f steps) left. Quitting\n" msgstr "Nur noch %e Sekunden (%.2f Schritte) verfügbar. Abbruch\n" -#: ../src/sys/options.cxx:175 ../src/sys/options.cxx:195 -#: ../src/sys/options.cxx:250 ../src/sys/options.cxx:292 +#: ../src/sys/options.cxx:187 ../src/sys/options.cxx:207 +#: ../src/sys/options.cxx:262 ../src/sys/options.cxx:304 #, c-format msgid "Option %s has no value" msgstr "Der Option '%s' wurde kein Wert zugewiesen" -#: ../src/sys/options.cxx:72 +#: ../src/sys/options.cxx:78 #, c-format msgid "Option %s is not a section" msgstr "Die Option '%s' ist keine Gruppierung" #. Doesn't exist -#: ../src/sys/options.cxx:83 +#: ../src/sys/options.cxx:95 #, c-format msgid "Option %s:%s does not exist" msgstr "Die Option %s:%s exisitiert nicht" -#: ../include/options.hxx:643 +#: ../include/options.hxx:666 #, c-format msgid "" "Options: Setting a value from same source (%s) to new value '%s' - old value " @@ -610,12 +610,12 @@ msgstr "Perp Region '%s' hinzugefügt" msgid "Revision: %s\n" msgstr "Revision: %s\n" -#: ../src/solver/solver.cxx:492 +#: ../src/solver/solver.cxx:510 msgid "Run time : " msgstr "Dauer: " #. / Run the solver -#: ../src/solver/solver.cxx:453 +#: ../src/solver/solver.cxx:467 msgid "" "Running simulation\n" "\n" @@ -627,7 +627,7 @@ msgstr "" msgid "Signal" msgstr "" -#: ../src/bout++.cxx:709 +#: ../src/bout++.cxx:715 msgid "" "Sim Time | RHS evals | Wall Time | Calc Inv Comm I/O SOLVER\n" "\n" @@ -636,7 +636,7 @@ msgstr "" "Integrator\n" "\n" -#: ../src/bout++.cxx:712 +#: ../src/bout++.cxx:718 msgid "" "Sim Time | RHS_e evals | RHS_I evals | Wall Time | Calc Inv " "Comm I/O SOLVER\n" @@ -646,18 +646,18 @@ msgstr "" "Komm I/O Integrator\n" "\n" -#: ../src/solver/solver.cxx:444 +#: ../src/solver/solver.cxx:449 #, c-format msgid "Solver running for %d outputs with monitor timestep of %e\n" msgstr "" "Integriere mit einem `Monitor`-Zeitschritt von %2$e für %1$d Aufrufe.\n" -#: ../src/solver/solver.cxx:440 +#: ../src/solver/solver.cxx:445 #, c-format msgid "Solver running for %d outputs with output timestep of %e\n" msgstr "Integriere %d Zeitschritte von je %e\n" -#: ../src/solver/solver.cxx:589 +#: ../src/solver/solver.cxx:684 #, c-format msgid "" "Solver::addMonitor: Cannot reduce timestep (from %g to %g) after init is " @@ -666,7 +666,7 @@ msgstr "" "Der Integrator kann den Zeitschritt nicht von %g auf %g reduzieren, nachdem " "er initialisiert wurde!" -#: ../src/solver/solver.cxx:1037 +#: ../src/solver/solver.cxx:1132 #, c-format msgid "" "Time derivative at wrong location - Field is at %s, derivative is at %s for " @@ -675,7 +675,7 @@ msgstr "" "Die zeitliche Ableitung ist an der falschen Stelle. Das Feld '%3$s' ist an " "Position %1$s, während die Ableitung an Position %2$s ist.\n" -#: ../src/solver/solver.cxx:1259 +#: ../src/solver/solver.cxx:1354 #, c-format msgid "Time derivative for variable '%s' not set" msgstr "Zeitliche Ableitung für Variable '%s' nicht gesetzt" @@ -703,7 +703,7 @@ msgstr "" "Unbekannte Paralleltransformation\n" "Gültige Optionen sind 'identity', 'shifted', 'fci'" -#: ../src/sys/options.cxx:466 +#: ../src/sys/options.cxx:478 msgid "Unused options:\n" msgstr "Ungenutzte Optionen:\n" @@ -743,42 +743,42 @@ msgstr "" "[VAR=WERT]\n" #. restart file should be written by physics model -#: ../src/solver/solver.cxx:700 +#: ../src/solver/solver.cxx:795 msgid "User signalled to quit. Returning\n" msgstr "Beendigung durch Benutzer.\n" -#: ../src/sys/options.cxx:231 +#: ../src/sys/options.cxx:243 #, c-format msgid "Value for option %s = %e is not an integer" msgstr "Wert der Option %s = %e ist keine Ganzzahl" -#: ../src/sys/options.cxx:273 +#: ../src/sys/options.cxx:285 #, c-format msgid "Value for option %s cannot be converted to a BoutReal" msgstr "Wert der Option %s ist keine Flieskommazahl" -#: ../src/sys/options.cxx:440 +#: ../src/sys/options.cxx:452 #, c-format msgid "Value for option %s cannot be converted to a Field2D" msgstr "Wert der Option %s ist keine Field2D" -#: ../src/sys/options.cxx:390 +#: ../src/sys/options.cxx:402 #, c-format msgid "Value for option %s cannot be converted to a Field3D" msgstr "Wert der Option %s ist keine Field3D" -#: ../src/sys/options.cxx:313 +#: ../src/sys/options.cxx:325 #, c-format msgid "Value for option %s cannot be converted to a bool" msgstr "Wert der Option %s ist keine Boolescher Wert" #. Another type which can't be converted -#: ../src/sys/options.cxx:222 +#: ../src/sys/options.cxx:234 #, c-format msgid "Value for option %s is not an integer" msgstr "Wert der Option %s ist keine Ganzzahl" -#: ../src/solver/solver.cxx:996 ../src/solver/solver.cxx:1000 +#: ../src/solver/solver.cxx:1091 ../src/solver/solver.cxx:1095 #, c-format msgid "Variable '%s' not initialised" msgstr "Variable '%s' ist nicht initialisiert" @@ -812,7 +812,7 @@ msgid "" "option extrapolate_y=false to disable this.\n" msgstr "" -#: ../src/bout++.cxx:686 +#: ../src/bout++.cxx:692 msgid "Wall time limit in hours. By default (< 0), no limit" msgstr "" diff --git a/locale/es/libbout.po b/locale/es/libbout.po index 9d8c83eb8f..ee6dedd10b 100644 --- a/locale/es/libbout.po +++ b/locale/es/libbout.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: BOUT++ 4.2.1\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2020-09-23 17:43+0100\n" +"POT-Creation-Date: 2021-07-28 14:25+0200\n" "PO-Revision-Date: 2019-02-11 12:46+0900\n" "Last-Translator: Marta \n" "Language-Team: Spanish\n" @@ -182,14 +182,14 @@ msgstr "\tParalelización en OpenMP activada, usando %d procesos (`threads`)\n" #. Mark the option as used #. Option not found -#: ../src/sys/options.cxx:183 ../src/sys/options.cxx:238 -#: ../src/sys/options.cxx:280 ../src/sys/options.cxx:319 +#: ../src/sys/options.cxx:195 ../src/sys/options.cxx:250 +#: ../src/sys/options.cxx:292 ../src/sys/options.cxx:331 #: ../include/options.hxx:406 ../include/options.hxx:438 -#: ../include/options.hxx:460 ../include/options.hxx:635 +#: ../include/options.hxx:460 ../include/options.hxx:658 msgid "\tOption " msgstr "\tOpción " -#: ../src/sys/options.cxx:309 +#: ../src/sys/options.cxx:321 #, c-format msgid "\tOption '%s': Boolean expected. Got '%s'\n" msgstr "\tOpción '%s': valor Booleano esperado. Se obtuvo '%s'\n" @@ -273,7 +273,7 @@ msgstr "" " -v, --verbose\t\tAumentar verbosidad\n" " -q, --quiet\t\tDisminuir verbosidad\n" -#: ../src/solver/solver.cxx:491 +#: ../src/solver/solver.cxx:509 #, c-format msgid "" "\n" @@ -282,7 +282,7 @@ msgstr "" "\n" "Ejecución finalizada en : %s\n" -#: ../src/solver/solver.cxx:456 +#: ../src/solver/solver.cxx:474 #, c-format msgid "" "\n" @@ -315,7 +315,7 @@ msgstr "" "Para todos los possibles parámetros input, vea el manual de usuario y/o el " "modelo físico fuente (ej. %s.cxx)\n" -#: ../include/options.hxx:638 +#: ../include/options.hxx:661 msgid ") overwritten with:" msgstr "" @@ -323,7 +323,7 @@ msgstr "" msgid "4 of 8" msgstr "" -#: ../src/sys/options.cxx:464 +#: ../src/sys/options.cxx:476 msgid "All options used\n" msgstr "Usando todas las opciones\n" @@ -346,7 +346,7 @@ msgstr "Regiones frontera en este procesador: " msgid "Cannot split %d X points equally between %d processors\n" msgstr "No se pueden dividir %d X points entre %d procesadores por igual\n" -#: ../src/bout++.cxx:691 +#: ../src/bout++.cxx:697 msgid "Check if a file exists, and exit if it does." msgstr "" @@ -411,7 +411,7 @@ msgstr "" "No se pudo ejecutar bout-log-color. Asegúrese de que se encuentre en su " "PATH\n" -#: ../src/solver/solver.cxx:574 +#: ../src/solver/solver.cxx:669 #, c-format msgid "Couldn't add Monitor: %g is not a multiple of %g!" msgstr "No se pudo añadir el Monitor: %g no és multiplo de %g!" @@ -431,12 +431,12 @@ msgstr "No se pudo encontrar la región %s en regionMap2D" msgid "Couldn't find region %s in regionMapPerp" msgstr "No se pudo encontrar la región %s en regionMapPerp" -#: ../src/sys/options.cxx:268 +#: ../src/sys/options.cxx:280 #, c-format msgid "Couldn't get BoutReal from option %s = '%s'" msgstr "No se pudo recuperar BoutReal de la opción %s = '%s'" -#: ../src/sys/options.cxx:216 +#: ../src/sys/options.cxx:228 #, c-format msgid "Couldn't get integer from option %s = '%s'" msgstr "No se pudo recuperar el entero de la opción %s = '%s'" @@ -451,7 +451,7 @@ msgstr "DataDir \"%s\" no existe o no es accessible\n" msgid "DataDir \"%s\" is not a directory\n" msgstr "DataDir \"%s\" no es un directorio\n" -#: ../src/solver/solver.cxx:524 +#: ../src/solver/solver.cxx:587 msgid "ERROR: Solver is already initialised\n" msgstr "ERROR: el Solver ya se encuentra inicializado\n" @@ -469,7 +469,7 @@ msgstr "Error durante el paso de opciones" msgid "Error: nx must be greater than 2 times MXG (2 * %d)" msgstr "Error: nx debe ser mayor que 2 veces MXG (2 * %d)" -#: ../src/solver/solver.cxx:449 +#: ../src/solver/solver.cxx:454 msgid "Failed to initialise solver-> Aborting\n" msgstr "Fallo en inicializar el solver-> Abortando\n" @@ -478,7 +478,7 @@ msgstr "Fallo en inicializar el solver-> Abortando\n" msgid "Finding value for NXPE (ideal = %f)\n" msgstr "Encontrando valor para NXPE (ideal = %f)\n" -#: ../src/solver/solver.cxx:526 +#: ../src/solver/solver.cxx:589 msgid "Initialising solver\n" msgstr "Initializando el solver\n" @@ -513,15 +513,15 @@ msgstr "La malla `mesh` debe contener ny" msgid "Missing integer array %s\n" msgstr "Fala la matriz entera %s\n" -#: ../src/solver/solver.cxx:678 +#: ../src/solver/solver.cxx:773 msgid "Monitor signalled to quit" msgstr "Monitor indicó salir" -#: ../src/solver/solver.cxx:685 +#: ../src/solver/solver.cxx:780 msgid "Monitor signalled to quit\n" msgstr "Monitor indicó salir\n" -#: ../src/bout++.cxx:695 +#: ../src/bout++.cxx:701 msgid "Name of file whose existence triggers a stop" msgstr "" @@ -542,29 +542,29 @@ msgstr "" "Número de procesadores (%d) no divisible para NPs en la dirección x (%d)\n" #. Less than 2 time-steps left -#: ../src/bout++.cxx:739 +#: ../src/bout++.cxx:745 #, fuzzy, c-format msgid "Only %e seconds (%.2f steps) left. Quitting\n" msgstr "Solo faltan %e segundos. Saliendo\n" -#: ../src/sys/options.cxx:175 ../src/sys/options.cxx:195 -#: ../src/sys/options.cxx:250 ../src/sys/options.cxx:292 +#: ../src/sys/options.cxx:187 ../src/sys/options.cxx:207 +#: ../src/sys/options.cxx:262 ../src/sys/options.cxx:304 #, c-format msgid "Option %s has no value" msgstr "Opción %s sin valor" -#: ../src/sys/options.cxx:72 +#: ../src/sys/options.cxx:78 #, c-format msgid "Option %s is not a section" msgstr "Opción %s no es una sección" #. Doesn't exist -#: ../src/sys/options.cxx:83 +#: ../src/sys/options.cxx:95 #, c-format msgid "Option %s:%s does not exist" msgstr "Opción %s:%s no existe" -#: ../include/options.hxx:643 +#: ../include/options.hxx:666 #, c-format msgid "" "Options: Setting a value from same source (%s) to new value '%s' - old value " @@ -602,12 +602,12 @@ msgstr "Región Perp registrada %s" msgid "Revision: %s\n" msgstr "Revisión: %s\n" -#: ../src/solver/solver.cxx:492 +#: ../src/solver/solver.cxx:510 msgid "Run time : " msgstr "Tiempo de ejecución : " #. / Run the solver -#: ../src/solver/solver.cxx:453 +#: ../src/solver/solver.cxx:467 msgid "" "Running simulation\n" "\n" @@ -619,7 +619,7 @@ msgstr "" msgid "Signal" msgstr "" -#: ../src/bout++.cxx:709 +#: ../src/bout++.cxx:715 msgid "" "Sim Time | RHS evals | Wall Time | Calc Inv Comm I/O SOLVER\n" "\n" @@ -628,7 +628,7 @@ msgstr "" "SOLVER\n" "\n" -#: ../src/bout++.cxx:712 +#: ../src/bout++.cxx:718 msgid "" "Sim Time | RHS_e evals | RHS_I evals | Wall Time | Calc Inv " "Comm I/O SOLVER\n" @@ -638,19 +638,19 @@ msgstr "" "Com I/O SOLVER\n" "\n" -#: ../src/solver/solver.cxx:444 +#: ../src/solver/solver.cxx:449 #, c-format msgid "Solver running for %d outputs with monitor timestep of %e\n" msgstr "" "Solver corriendo para %d outputs con intervalos de tiempo de monitor de %e\n" -#: ../src/solver/solver.cxx:440 +#: ../src/solver/solver.cxx:445 #, c-format msgid "Solver running for %d outputs with output timestep of %e\n" msgstr "" "Solver corriendo para %d outputs con intervalos de tiempo de output de %e\n" -#: ../src/solver/solver.cxx:589 +#: ../src/solver/solver.cxx:684 #, c-format msgid "" "Solver::addMonitor: Cannot reduce timestep (from %g to %g) after init is " @@ -659,7 +659,7 @@ msgstr "" "Solver::addMonitor: No se puedo reducir el intervalo de tiempo (de %g a %g) " "después de que init fuera llamado!" -#: ../src/solver/solver.cxx:1037 +#: ../src/solver/solver.cxx:1132 #, c-format msgid "" "Time derivative at wrong location - Field is at %s, derivative is at %s for " @@ -668,7 +668,7 @@ msgstr "" "Derivada del tiempo en lugar erróneo - El field se encuentra en %s, la " "derivada se encuentra en %s para el field '%s'\n" -#: ../src/solver/solver.cxx:1259 +#: ../src/solver/solver.cxx:1354 #, c-format msgid "Time derivative for variable '%s' not set" msgstr "Derivada del tiempo para la variable '%s' no fijada" @@ -696,7 +696,7 @@ msgstr "" "Opción paralleltransform desconocida.\n" "Opciones válidas son 'identity', 'shifted', 'fci'" -#: ../src/sys/options.cxx:466 +#: ../src/sys/options.cxx:478 msgid "Unused options:\n" msgstr "Opciones sin usar:\n" @@ -736,43 +736,43 @@ msgstr "" "[VAR=VALUE]\n" #. restart file should be written by physics model -#: ../src/solver/solver.cxx:700 +#: ../src/solver/solver.cxx:795 #, fuzzy msgid "User signalled to quit. Returning\n" msgstr "Monitor indicó salir\n" -#: ../src/sys/options.cxx:231 +#: ../src/sys/options.cxx:243 #, c-format msgid "Value for option %s = %e is not an integer" msgstr "Valor para la opción %s = %e no es un entero" -#: ../src/sys/options.cxx:273 +#: ../src/sys/options.cxx:285 #, fuzzy, c-format msgid "Value for option %s cannot be converted to a BoutReal" msgstr "Valor para la opción %s = %e no es un entero" -#: ../src/sys/options.cxx:440 +#: ../src/sys/options.cxx:452 #, fuzzy, c-format msgid "Value for option %s cannot be converted to a Field2D" msgstr "Valor para la opción %s = %e no es un entero" -#: ../src/sys/options.cxx:390 +#: ../src/sys/options.cxx:402 #, fuzzy, c-format msgid "Value for option %s cannot be converted to a Field3D" msgstr "Valor para la opción %s = %e no es un entero" -#: ../src/sys/options.cxx:313 +#: ../src/sys/options.cxx:325 #, fuzzy, c-format msgid "Value for option %s cannot be converted to a bool" msgstr "Valor para la opción %s = %e no es un entero" #. Another type which can't be converted -#: ../src/sys/options.cxx:222 +#: ../src/sys/options.cxx:234 #, fuzzy, c-format msgid "Value for option %s is not an integer" msgstr "Valor para la opción %s = %e no es un entero" -#: ../src/solver/solver.cxx:996 ../src/solver/solver.cxx:1000 +#: ../src/solver/solver.cxx:1091 ../src/solver/solver.cxx:1095 #, c-format msgid "Variable '%s' not initialised" msgstr "Variable '%s' sin inicializar" @@ -806,7 +806,7 @@ msgid "" "option extrapolate_y=false to disable this.\n" msgstr "" -#: ../src/bout++.cxx:686 +#: ../src/bout++.cxx:692 msgid "Wall time limit in hours. By default (< 0), no limit" msgstr "" diff --git a/locale/fr/libbout.po b/locale/fr/libbout.po index 77e116472a..d461359c8b 100644 --- a/locale/fr/libbout.po +++ b/locale/fr/libbout.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: BOUT++ 4.2.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2020-09-23 17:43+0100\n" +"POT-Creation-Date: 2021-07-28 14:25+0200\n" "PO-Revision-Date: 2018-10-21 22:46+0100\n" "Last-Translator: \n" "Language-Team: French\n" @@ -163,14 +163,14 @@ msgstr "" #. Mark the option as used #. Option not found -#: ../src/sys/options.cxx:183 ../src/sys/options.cxx:238 -#: ../src/sys/options.cxx:280 ../src/sys/options.cxx:319 +#: ../src/sys/options.cxx:195 ../src/sys/options.cxx:250 +#: ../src/sys/options.cxx:292 ../src/sys/options.cxx:331 #: ../include/options.hxx:406 ../include/options.hxx:438 -#: ../include/options.hxx:460 ../include/options.hxx:635 +#: ../include/options.hxx:460 ../include/options.hxx:658 msgid "\tOption " msgstr "" -#: ../src/sys/options.cxx:309 +#: ../src/sys/options.cxx:321 #, c-format msgid "\tOption '%s': Boolean expected. Got '%s'\n" msgstr "" @@ -246,7 +246,7 @@ msgid "" " -q, --quiet\t\tDecrease verbosity\n" msgstr "" -#: ../src/solver/solver.cxx:491 +#: ../src/solver/solver.cxx:509 #, c-format msgid "" "\n" @@ -255,7 +255,7 @@ msgstr "" "\n" "L'exécution se termine à %s\n" -#: ../src/solver/solver.cxx:456 +#: ../src/solver/solver.cxx:474 #, fuzzy, c-format msgid "" "\n" @@ -280,7 +280,7 @@ msgid "" "model source (e.g. %s.cxx)\n" msgstr "" -#: ../include/options.hxx:638 +#: ../include/options.hxx:661 msgid ") overwritten with:" msgstr "" @@ -288,7 +288,7 @@ msgstr "" msgid "4 of 8" msgstr "" -#: ../src/sys/options.cxx:464 +#: ../src/sys/options.cxx:476 msgid "All options used\n" msgstr "" @@ -310,7 +310,7 @@ msgstr "" msgid "Cannot split %d X points equally between %d processors\n" msgstr "" -#: ../src/bout++.cxx:691 +#: ../src/bout++.cxx:697 msgid "Check if a file exists, and exit if it does." msgstr "" @@ -370,7 +370,7 @@ msgstr "" msgid "Could not run bout-log-color. Make sure it is in your PATH\n" msgstr "" -#: ../src/solver/solver.cxx:574 +#: ../src/solver/solver.cxx:669 #, c-format msgid "Couldn't add Monitor: %g is not a multiple of %g!" msgstr "" @@ -390,12 +390,12 @@ msgstr "" msgid "Couldn't find region %s in regionMapPerp" msgstr "" -#: ../src/sys/options.cxx:268 +#: ../src/sys/options.cxx:280 #, c-format msgid "Couldn't get BoutReal from option %s = '%s'" msgstr "" -#: ../src/sys/options.cxx:216 +#: ../src/sys/options.cxx:228 #, c-format msgid "Couldn't get integer from option %s = '%s'" msgstr "" @@ -410,7 +410,7 @@ msgstr "Le répertoire de données \"%s\" n'existe pas ou n'est pas accessible\n msgid "DataDir \"%s\" is not a directory\n" msgstr "\"%s\" n'est pas un répertoire\n" -#: ../src/solver/solver.cxx:524 +#: ../src/solver/solver.cxx:587 msgid "ERROR: Solver is already initialised\n" msgstr "" @@ -428,7 +428,7 @@ msgstr "" msgid "Error: nx must be greater than 2 times MXG (2 * %d)" msgstr "" -#: ../src/solver/solver.cxx:449 +#: ../src/solver/solver.cxx:454 msgid "Failed to initialise solver-> Aborting\n" msgstr "Échec d'initialisation du solutionneur -> Abandonner\n" @@ -437,7 +437,7 @@ msgstr "Échec d'initialisation du solutionneur -> Abandonner\n" msgid "Finding value for NXPE (ideal = %f)\n" msgstr "" -#: ../src/solver/solver.cxx:526 +#: ../src/solver/solver.cxx:589 msgid "Initialising solver\n" msgstr "" @@ -469,15 +469,15 @@ msgstr "" msgid "Missing integer array %s\n" msgstr "" -#: ../src/solver/solver.cxx:678 +#: ../src/solver/solver.cxx:773 msgid "Monitor signalled to quit" msgstr "" -#: ../src/solver/solver.cxx:685 +#: ../src/solver/solver.cxx:780 msgid "Monitor signalled to quit\n" msgstr "" -#: ../src/bout++.cxx:695 +#: ../src/bout++.cxx:701 msgid "Name of file whose existence triggers a stop" msgstr "" @@ -496,29 +496,29 @@ msgid "Number of processors (%d) not divisible by NPs in y direction (%d)\n" msgstr "" #. Less than 2 time-steps left -#: ../src/bout++.cxx:739 +#: ../src/bout++.cxx:745 #, c-format msgid "Only %e seconds (%.2f steps) left. Quitting\n" msgstr "" -#: ../src/sys/options.cxx:175 ../src/sys/options.cxx:195 -#: ../src/sys/options.cxx:250 ../src/sys/options.cxx:292 +#: ../src/sys/options.cxx:187 ../src/sys/options.cxx:207 +#: ../src/sys/options.cxx:262 ../src/sys/options.cxx:304 #, c-format msgid "Option %s has no value" msgstr "" -#: ../src/sys/options.cxx:72 +#: ../src/sys/options.cxx:78 #, fuzzy, c-format msgid "Option %s is not a section" msgstr "\"%s\" n'est pas un répertoire\n" #. Doesn't exist -#: ../src/sys/options.cxx:83 +#: ../src/sys/options.cxx:95 #, c-format msgid "Option %s:%s does not exist" msgstr "" -#: ../include/options.hxx:643 +#: ../include/options.hxx:666 #, c-format msgid "" "Options: Setting a value from same source (%s) to new value '%s' - old value " @@ -552,12 +552,12 @@ msgstr "" msgid "Revision: %s\n" msgstr "" -#: ../src/solver/solver.cxx:492 +#: ../src/solver/solver.cxx:510 msgid "Run time : " msgstr "Temps d'exécution : " #. / Run the solver -#: ../src/solver/solver.cxx:453 +#: ../src/solver/solver.cxx:467 msgid "" "Running simulation\n" "\n" @@ -569,45 +569,45 @@ msgstr "" msgid "Signal" msgstr "" -#: ../src/bout++.cxx:709 +#: ../src/bout++.cxx:715 msgid "" "Sim Time | RHS evals | Wall Time | Calc Inv Comm I/O SOLVER\n" "\n" msgstr "" -#: ../src/bout++.cxx:712 +#: ../src/bout++.cxx:718 msgid "" "Sim Time | RHS_e evals | RHS_I evals | Wall Time | Calc Inv " "Comm I/O SOLVER\n" "\n" msgstr "" -#: ../src/solver/solver.cxx:444 +#: ../src/solver/solver.cxx:449 #, c-format msgid "Solver running for %d outputs with monitor timestep of %e\n" msgstr "" "Le solveur fonctionne pour %d sorties avec un temps de moniteur de %e\n" -#: ../src/solver/solver.cxx:440 +#: ../src/solver/solver.cxx:445 #, c-format msgid "Solver running for %d outputs with output timestep of %e\n" msgstr "Le solveur fonctionne pour %d sorties avec un pas de sortie de %e\n" -#: ../src/solver/solver.cxx:589 +#: ../src/solver/solver.cxx:684 #, c-format msgid "" "Solver::addMonitor: Cannot reduce timestep (from %g to %g) after init is " "called!" msgstr "" -#: ../src/solver/solver.cxx:1037 +#: ../src/solver/solver.cxx:1132 #, c-format msgid "" "Time derivative at wrong location - Field is at %s, derivative is at %s for " "field '%s'\n" msgstr "" -#: ../src/solver/solver.cxx:1259 +#: ../src/solver/solver.cxx:1354 #, c-format msgid "Time derivative for variable '%s' not set" msgstr "" @@ -633,7 +633,7 @@ msgid "" "Valid choices are 'identity', 'shifted', 'fci'" msgstr "" -#: ../src/sys/options.cxx:466 +#: ../src/sys/options.cxx:478 msgid "Unused options:\n" msgstr "" @@ -671,42 +671,42 @@ msgid "" msgstr "" #. restart file should be written by physics model -#: ../src/solver/solver.cxx:700 +#: ../src/solver/solver.cxx:795 msgid "User signalled to quit. Returning\n" msgstr "" -#: ../src/sys/options.cxx:231 +#: ../src/sys/options.cxx:243 #, c-format msgid "Value for option %s = %e is not an integer" msgstr "" -#: ../src/sys/options.cxx:273 +#: ../src/sys/options.cxx:285 #, c-format msgid "Value for option %s cannot be converted to a BoutReal" msgstr "" -#: ../src/sys/options.cxx:440 +#: ../src/sys/options.cxx:452 #, c-format msgid "Value for option %s cannot be converted to a Field2D" msgstr "" -#: ../src/sys/options.cxx:390 +#: ../src/sys/options.cxx:402 #, c-format msgid "Value for option %s cannot be converted to a Field3D" msgstr "" -#: ../src/sys/options.cxx:313 +#: ../src/sys/options.cxx:325 #, c-format msgid "Value for option %s cannot be converted to a bool" msgstr "" #. Another type which can't be converted -#: ../src/sys/options.cxx:222 +#: ../src/sys/options.cxx:234 #, c-format msgid "Value for option %s is not an integer" msgstr "" -#: ../src/solver/solver.cxx:996 ../src/solver/solver.cxx:1000 +#: ../src/solver/solver.cxx:1091 ../src/solver/solver.cxx:1095 #, c-format msgid "Variable '%s' not initialised" msgstr "" @@ -738,7 +738,7 @@ msgid "" "option extrapolate_y=false to disable this.\n" msgstr "" -#: ../src/bout++.cxx:686 +#: ../src/bout++.cxx:692 msgid "Wall time limit in hours. By default (< 0), no limit" msgstr "" diff --git a/locale/libbout.pot b/locale/libbout.pot index 8f47324593..0cbd55fcd5 100644 --- a/locale/libbout.pot +++ b/locale/libbout.pot @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2020-09-23 17:43+0100\n" +"POT-Creation-Date: 2021-07-28 14:25+0200\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -161,14 +161,14 @@ msgstr "" #. Mark the option as used #. Option not found -#: ../src/sys/options.cxx:183 ../src/sys/options.cxx:238 -#: ../src/sys/options.cxx:280 ../src/sys/options.cxx:319 +#: ../src/sys/options.cxx:195 ../src/sys/options.cxx:250 +#: ../src/sys/options.cxx:292 ../src/sys/options.cxx:331 #: ../include/options.hxx:406 ../include/options.hxx:438 -#: ../include/options.hxx:460 ../include/options.hxx:635 +#: ../include/options.hxx:460 ../include/options.hxx:658 msgid "\tOption " msgstr "" -#: ../src/sys/options.cxx:309 +#: ../src/sys/options.cxx:321 #, c-format msgid "\tOption '%s': Boolean expected. Got '%s'\n" msgstr "" @@ -244,14 +244,14 @@ msgid "" " -q, --quiet\t\tDecrease verbosity\n" msgstr "" -#: ../src/solver/solver.cxx:491 +#: ../src/solver/solver.cxx:509 #, c-format msgid "" "\n" "Run finished at : %s\n" msgstr "" -#: ../src/solver/solver.cxx:456 +#: ../src/solver/solver.cxx:474 #, c-format msgid "" "\n" @@ -274,7 +274,7 @@ msgid "" "model source (e.g. %s.cxx)\n" msgstr "" -#: ../include/options.hxx:638 +#: ../include/options.hxx:661 msgid ") overwritten with:" msgstr "" @@ -282,7 +282,7 @@ msgstr "" msgid "4 of 8" msgstr "" -#: ../src/sys/options.cxx:464 +#: ../src/sys/options.cxx:476 msgid "All options used\n" msgstr "" @@ -304,7 +304,7 @@ msgstr "" msgid "Cannot split %d X points equally between %d processors\n" msgstr "" -#: ../src/bout++.cxx:691 +#: ../src/bout++.cxx:697 msgid "Check if a file exists, and exit if it does." msgstr "" @@ -362,7 +362,7 @@ msgstr "" msgid "Could not run bout-log-color. Make sure it is in your PATH\n" msgstr "" -#: ../src/solver/solver.cxx:574 +#: ../src/solver/solver.cxx:669 #, c-format msgid "Couldn't add Monitor: %g is not a multiple of %g!" msgstr "" @@ -382,12 +382,12 @@ msgstr "" msgid "Couldn't find region %s in regionMapPerp" msgstr "" -#: ../src/sys/options.cxx:268 +#: ../src/sys/options.cxx:280 #, c-format msgid "Couldn't get BoutReal from option %s = '%s'" msgstr "" -#: ../src/sys/options.cxx:216 +#: ../src/sys/options.cxx:228 #, c-format msgid "Couldn't get integer from option %s = '%s'" msgstr "" @@ -402,7 +402,7 @@ msgstr "" msgid "DataDir \"%s\" is not a directory\n" msgstr "" -#: ../src/solver/solver.cxx:524 +#: ../src/solver/solver.cxx:587 msgid "ERROR: Solver is already initialised\n" msgstr "" @@ -420,7 +420,7 @@ msgstr "" msgid "Error: nx must be greater than 2 times MXG (2 * %d)" msgstr "" -#: ../src/solver/solver.cxx:449 +#: ../src/solver/solver.cxx:454 msgid "Failed to initialise solver-> Aborting\n" msgstr "" @@ -429,7 +429,7 @@ msgstr "" msgid "Finding value for NXPE (ideal = %f)\n" msgstr "" -#: ../src/solver/solver.cxx:526 +#: ../src/solver/solver.cxx:589 msgid "Initialising solver\n" msgstr "" @@ -461,15 +461,15 @@ msgstr "" msgid "Missing integer array %s\n" msgstr "" -#: ../src/solver/solver.cxx:678 +#: ../src/solver/solver.cxx:773 msgid "Monitor signalled to quit" msgstr "" -#: ../src/solver/solver.cxx:685 +#: ../src/solver/solver.cxx:780 msgid "Monitor signalled to quit\n" msgstr "" -#: ../src/bout++.cxx:695 +#: ../src/bout++.cxx:701 msgid "Name of file whose existence triggers a stop" msgstr "" @@ -488,29 +488,29 @@ msgid "Number of processors (%d) not divisible by NPs in y direction (%d)\n" msgstr "" #. Less than 2 time-steps left -#: ../src/bout++.cxx:739 +#: ../src/bout++.cxx:745 #, c-format msgid "Only %e seconds (%.2f steps) left. Quitting\n" msgstr "" -#: ../src/sys/options.cxx:175 ../src/sys/options.cxx:195 -#: ../src/sys/options.cxx:250 ../src/sys/options.cxx:292 +#: ../src/sys/options.cxx:187 ../src/sys/options.cxx:207 +#: ../src/sys/options.cxx:262 ../src/sys/options.cxx:304 #, c-format msgid "Option %s has no value" msgstr "" -#: ../src/sys/options.cxx:72 +#: ../src/sys/options.cxx:78 #, c-format msgid "Option %s is not a section" msgstr "" #. Doesn't exist -#: ../src/sys/options.cxx:83 +#: ../src/sys/options.cxx:95 #, c-format msgid "Option %s:%s does not exist" msgstr "" -#: ../include/options.hxx:643 +#: ../include/options.hxx:666 #, c-format msgid "" "Options: Setting a value from same source (%s) to new value '%s' - old value " @@ -544,12 +544,12 @@ msgstr "" msgid "Revision: %s\n" msgstr "" -#: ../src/solver/solver.cxx:492 +#: ../src/solver/solver.cxx:510 msgid "Run time : " msgstr "" #. / Run the solver -#: ../src/solver/solver.cxx:453 +#: ../src/solver/solver.cxx:467 msgid "" "Running simulation\n" "\n" @@ -559,44 +559,44 @@ msgstr "" msgid "Signal" msgstr "" -#: ../src/bout++.cxx:709 +#: ../src/bout++.cxx:715 msgid "" "Sim Time | RHS evals | Wall Time | Calc Inv Comm I/O SOLVER\n" "\n" msgstr "" -#: ../src/bout++.cxx:712 +#: ../src/bout++.cxx:718 msgid "" "Sim Time | RHS_e evals | RHS_I evals | Wall Time | Calc Inv " "Comm I/O SOLVER\n" "\n" msgstr "" -#: ../src/solver/solver.cxx:444 +#: ../src/solver/solver.cxx:449 #, c-format msgid "Solver running for %d outputs with monitor timestep of %e\n" msgstr "" -#: ../src/solver/solver.cxx:440 +#: ../src/solver/solver.cxx:445 #, c-format msgid "Solver running for %d outputs with output timestep of %e\n" msgstr "" -#: ../src/solver/solver.cxx:589 +#: ../src/solver/solver.cxx:684 #, c-format msgid "" "Solver::addMonitor: Cannot reduce timestep (from %g to %g) after init is " "called!" msgstr "" -#: ../src/solver/solver.cxx:1037 +#: ../src/solver/solver.cxx:1132 #, c-format msgid "" "Time derivative at wrong location - Field is at %s, derivative is at %s for " "field '%s'\n" msgstr "" -#: ../src/solver/solver.cxx:1259 +#: ../src/solver/solver.cxx:1354 #, c-format msgid "Time derivative for variable '%s' not set" msgstr "" @@ -622,7 +622,7 @@ msgid "" "Valid choices are 'identity', 'shifted', 'fci'" msgstr "" -#: ../src/sys/options.cxx:466 +#: ../src/sys/options.cxx:478 msgid "Unused options:\n" msgstr "" @@ -660,42 +660,42 @@ msgid "" msgstr "" #. restart file should be written by physics model -#: ../src/solver/solver.cxx:700 +#: ../src/solver/solver.cxx:795 msgid "User signalled to quit. Returning\n" msgstr "" -#: ../src/sys/options.cxx:231 +#: ../src/sys/options.cxx:243 #, c-format msgid "Value for option %s = %e is not an integer" msgstr "" -#: ../src/sys/options.cxx:273 +#: ../src/sys/options.cxx:285 #, c-format msgid "Value for option %s cannot be converted to a BoutReal" msgstr "" -#: ../src/sys/options.cxx:440 +#: ../src/sys/options.cxx:452 #, c-format msgid "Value for option %s cannot be converted to a Field2D" msgstr "" -#: ../src/sys/options.cxx:390 +#: ../src/sys/options.cxx:402 #, c-format msgid "Value for option %s cannot be converted to a Field3D" msgstr "" -#: ../src/sys/options.cxx:313 +#: ../src/sys/options.cxx:325 #, c-format msgid "Value for option %s cannot be converted to a bool" msgstr "" #. Another type which can't be converted -#: ../src/sys/options.cxx:222 +#: ../src/sys/options.cxx:234 #, c-format msgid "Value for option %s is not an integer" msgstr "" -#: ../src/solver/solver.cxx:996 ../src/solver/solver.cxx:1000 +#: ../src/solver/solver.cxx:1091 ../src/solver/solver.cxx:1095 #, c-format msgid "Variable '%s' not initialised" msgstr "" @@ -727,7 +727,7 @@ msgid "" "option extrapolate_y=false to disable this.\n" msgstr "" -#: ../src/bout++.cxx:686 +#: ../src/bout++.cxx:692 msgid "Wall time limit in hours. By default (< 0), no limit" msgstr "" diff --git a/locale/zh_CN/libbout.po b/locale/zh_CN/libbout.po index 5ba45f343c..f27021fad4 100644 --- a/locale/zh_CN/libbout.po +++ b/locale/zh_CN/libbout.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: BOUT++ 4.2.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2020-09-23 17:43+0100\n" +"POT-Creation-Date: 2021-07-28 14:25+0200\n" "PO-Revision-Date: 2018-10-22 22:56+0100\n" "Last-Translator: \n" "Language-Team: Chinese (simplified)\n" @@ -162,14 +162,14 @@ msgstr "" #. Mark the option as used #. Option not found -#: ../src/sys/options.cxx:183 ../src/sys/options.cxx:238 -#: ../src/sys/options.cxx:280 ../src/sys/options.cxx:319 +#: ../src/sys/options.cxx:195 ../src/sys/options.cxx:250 +#: ../src/sys/options.cxx:292 ../src/sys/options.cxx:331 #: ../include/options.hxx:406 ../include/options.hxx:438 -#: ../include/options.hxx:460 ../include/options.hxx:635 +#: ../include/options.hxx:460 ../include/options.hxx:658 msgid "\tOption " msgstr "\t选项 " -#: ../src/sys/options.cxx:309 +#: ../src/sys/options.cxx:321 #, c-format msgid "\tOption '%s': Boolean expected. Got '%s'\n" msgstr "" @@ -246,7 +246,7 @@ msgid "" " -q, --quiet\t\tDecrease verbosity\n" msgstr "" -#: ../src/solver/solver.cxx:491 +#: ../src/solver/solver.cxx:509 #, c-format msgid "" "\n" @@ -255,7 +255,7 @@ msgstr "" "\n" "计算结束于 %s\n" -#: ../src/solver/solver.cxx:456 +#: ../src/solver/solver.cxx:474 #, c-format msgid "" "\n" @@ -280,7 +280,7 @@ msgid "" "model source (e.g. %s.cxx)\n" msgstr "" -#: ../include/options.hxx:638 +#: ../include/options.hxx:661 msgid ") overwritten with:" msgstr "" @@ -288,7 +288,7 @@ msgstr "" msgid "4 of 8" msgstr "" -#: ../src/sys/options.cxx:464 +#: ../src/sys/options.cxx:476 msgid "All options used\n" msgstr "" @@ -310,7 +310,7 @@ msgstr "" msgid "Cannot split %d X points equally between %d processors\n" msgstr "" -#: ../src/bout++.cxx:691 +#: ../src/bout++.cxx:697 msgid "Check if a file exists, and exit if it does." msgstr "" @@ -370,7 +370,7 @@ msgstr "" msgid "Could not run bout-log-color. Make sure it is in your PATH\n" msgstr "" -#: ../src/solver/solver.cxx:574 +#: ../src/solver/solver.cxx:669 #, c-format msgid "Couldn't add Monitor: %g is not a multiple of %g!" msgstr "" @@ -390,12 +390,12 @@ msgstr "" msgid "Couldn't find region %s in regionMapPerp" msgstr "" -#: ../src/sys/options.cxx:268 +#: ../src/sys/options.cxx:280 #, c-format msgid "Couldn't get BoutReal from option %s = '%s'" msgstr "" -#: ../src/sys/options.cxx:216 +#: ../src/sys/options.cxx:228 #, c-format msgid "Couldn't get integer from option %s = '%s'" msgstr "" @@ -410,7 +410,7 @@ msgstr "\"%s\" 不存在或不可访问\n" msgid "DataDir \"%s\" is not a directory\n" msgstr "\"%s\" 不是目录\n" -#: ../src/solver/solver.cxx:524 +#: ../src/solver/solver.cxx:587 msgid "ERROR: Solver is already initialised\n" msgstr "" @@ -428,7 +428,7 @@ msgstr "" msgid "Error: nx must be greater than 2 times MXG (2 * %d)" msgstr "" -#: ../src/solver/solver.cxx:449 +#: ../src/solver/solver.cxx:454 msgid "Failed to initialise solver-> Aborting\n" msgstr "" @@ -437,7 +437,7 @@ msgstr "" msgid "Finding value for NXPE (ideal = %f)\n" msgstr "" -#: ../src/solver/solver.cxx:526 +#: ../src/solver/solver.cxx:589 msgid "Initialising solver\n" msgstr "" @@ -469,15 +469,15 @@ msgstr "" msgid "Missing integer array %s\n" msgstr "" -#: ../src/solver/solver.cxx:678 +#: ../src/solver/solver.cxx:773 msgid "Monitor signalled to quit" msgstr "" -#: ../src/solver/solver.cxx:685 +#: ../src/solver/solver.cxx:780 msgid "Monitor signalled to quit\n" msgstr "" -#: ../src/bout++.cxx:695 +#: ../src/bout++.cxx:701 msgid "Name of file whose existence triggers a stop" msgstr "" @@ -496,29 +496,29 @@ msgid "Number of processors (%d) not divisible by NPs in y direction (%d)\n" msgstr "" #. Less than 2 time-steps left -#: ../src/bout++.cxx:739 +#: ../src/bout++.cxx:745 #, c-format msgid "Only %e seconds (%.2f steps) left. Quitting\n" msgstr "" -#: ../src/sys/options.cxx:175 ../src/sys/options.cxx:195 -#: ../src/sys/options.cxx:250 ../src/sys/options.cxx:292 +#: ../src/sys/options.cxx:187 ../src/sys/options.cxx:207 +#: ../src/sys/options.cxx:262 ../src/sys/options.cxx:304 #, c-format msgid "Option %s has no value" msgstr "" -#: ../src/sys/options.cxx:72 +#: ../src/sys/options.cxx:78 #, fuzzy, c-format msgid "Option %s is not a section" msgstr "\"%s\" 不是目录\n" #. Doesn't exist -#: ../src/sys/options.cxx:83 +#: ../src/sys/options.cxx:95 #, c-format msgid "Option %s:%s does not exist" msgstr "" -#: ../include/options.hxx:643 +#: ../include/options.hxx:666 #, c-format msgid "" "Options: Setting a value from same source (%s) to new value '%s' - old value " @@ -552,12 +552,12 @@ msgstr "" msgid "Revision: %s\n" msgstr "" -#: ../src/solver/solver.cxx:492 +#: ../src/solver/solver.cxx:510 msgid "Run time : " msgstr "计算时间" #. / Run the solver -#: ../src/solver/solver.cxx:453 +#: ../src/solver/solver.cxx:467 msgid "" "Running simulation\n" "\n" @@ -569,44 +569,44 @@ msgstr "" msgid "Signal" msgstr "" -#: ../src/bout++.cxx:709 +#: ../src/bout++.cxx:715 msgid "" "Sim Time | RHS evals | Wall Time | Calc Inv Comm I/O SOLVER\n" "\n" msgstr "" -#: ../src/bout++.cxx:712 +#: ../src/bout++.cxx:718 msgid "" "Sim Time | RHS_e evals | RHS_I evals | Wall Time | Calc Inv " "Comm I/O SOLVER\n" "\n" msgstr "" -#: ../src/solver/solver.cxx:444 +#: ../src/solver/solver.cxx:449 #, c-format msgid "Solver running for %d outputs with monitor timestep of %e\n" msgstr "" -#: ../src/solver/solver.cxx:440 +#: ../src/solver/solver.cxx:445 #, c-format msgid "Solver running for %d outputs with output timestep of %e\n" msgstr "" -#: ../src/solver/solver.cxx:589 +#: ../src/solver/solver.cxx:684 #, c-format msgid "" "Solver::addMonitor: Cannot reduce timestep (from %g to %g) after init is " "called!" msgstr "" -#: ../src/solver/solver.cxx:1037 +#: ../src/solver/solver.cxx:1132 #, c-format msgid "" "Time derivative at wrong location - Field is at %s, derivative is at %s for " "field '%s'\n" msgstr "" -#: ../src/solver/solver.cxx:1259 +#: ../src/solver/solver.cxx:1354 #, c-format msgid "Time derivative for variable '%s' not set" msgstr "" @@ -632,7 +632,7 @@ msgid "" "Valid choices are 'identity', 'shifted', 'fci'" msgstr "" -#: ../src/sys/options.cxx:466 +#: ../src/sys/options.cxx:478 msgid "Unused options:\n" msgstr "" @@ -670,42 +670,42 @@ msgid "" msgstr "" #. restart file should be written by physics model -#: ../src/solver/solver.cxx:700 +#: ../src/solver/solver.cxx:795 msgid "User signalled to quit. Returning\n" msgstr "" -#: ../src/sys/options.cxx:231 +#: ../src/sys/options.cxx:243 #, c-format msgid "Value for option %s = %e is not an integer" msgstr "" -#: ../src/sys/options.cxx:273 +#: ../src/sys/options.cxx:285 #, c-format msgid "Value for option %s cannot be converted to a BoutReal" msgstr "" -#: ../src/sys/options.cxx:440 +#: ../src/sys/options.cxx:452 #, c-format msgid "Value for option %s cannot be converted to a Field2D" msgstr "" -#: ../src/sys/options.cxx:390 +#: ../src/sys/options.cxx:402 #, c-format msgid "Value for option %s cannot be converted to a Field3D" msgstr "" -#: ../src/sys/options.cxx:313 +#: ../src/sys/options.cxx:325 #, c-format msgid "Value for option %s cannot be converted to a bool" msgstr "" #. Another type which can't be converted -#: ../src/sys/options.cxx:222 +#: ../src/sys/options.cxx:234 #, c-format msgid "Value for option %s is not an integer" msgstr "" -#: ../src/solver/solver.cxx:996 ../src/solver/solver.cxx:1000 +#: ../src/solver/solver.cxx:1091 ../src/solver/solver.cxx:1095 #, c-format msgid "Variable '%s' not initialised" msgstr "" @@ -737,7 +737,7 @@ msgid "" "option extrapolate_y=false to disable this.\n" msgstr "" -#: ../src/bout++.cxx:686 +#: ../src/bout++.cxx:692 msgid "Wall time limit in hours. By default (< 0), no limit" msgstr "" diff --git a/locale/zh_TW/libbout.po b/locale/zh_TW/libbout.po index 8667fbedee..8ab9ac0c28 100644 --- a/locale/zh_TW/libbout.po +++ b/locale/zh_TW/libbout.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: BOUT++ 4.2.0\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2020-09-23 17:43+0100\n" +"POT-Creation-Date: 2021-07-28 14:25+0200\n" "PO-Revision-Date: 2018-10-22 22:56+0100\n" "Last-Translator: \n" "Language-Team: Chinese (traditional)\n" @@ -160,14 +160,14 @@ msgstr "\t啟用OpenMP並行化。 使用%d個線程\n" #. Mark the option as used #. Option not found -#: ../src/sys/options.cxx:183 ../src/sys/options.cxx:238 -#: ../src/sys/options.cxx:280 ../src/sys/options.cxx:319 +#: ../src/sys/options.cxx:195 ../src/sys/options.cxx:250 +#: ../src/sys/options.cxx:292 ../src/sys/options.cxx:331 #: ../include/options.hxx:406 ../include/options.hxx:438 -#: ../include/options.hxx:460 ../include/options.hxx:635 +#: ../include/options.hxx:460 ../include/options.hxx:658 msgid "\tOption " msgstr "\t選項 " -#: ../src/sys/options.cxx:309 +#: ../src/sys/options.cxx:321 #, c-format msgid "\tOption '%s': Boolean expected. Got '%s'\n" msgstr "\t選項 '%s': 布爾預期. 拿到 '%s'\n" @@ -245,7 +245,7 @@ msgid "" " -q, --quiet\t\tDecrease verbosity\n" msgstr "" -#: ../src/solver/solver.cxx:491 +#: ../src/solver/solver.cxx:509 #, c-format msgid "" "\n" @@ -254,7 +254,7 @@ msgstr "" "\n" "计算结束于 %s\n" -#: ../src/solver/solver.cxx:456 +#: ../src/solver/solver.cxx:474 #, c-format msgid "" "\n" @@ -279,7 +279,7 @@ msgid "" "model source (e.g. %s.cxx)\n" msgstr "" -#: ../include/options.hxx:638 +#: ../include/options.hxx:661 msgid ") overwritten with:" msgstr "" @@ -287,7 +287,7 @@ msgstr "" msgid "4 of 8" msgstr "" -#: ../src/sys/options.cxx:464 +#: ../src/sys/options.cxx:476 msgid "All options used\n" msgstr "" @@ -309,7 +309,7 @@ msgstr "" msgid "Cannot split %d X points equally between %d processors\n" msgstr "" -#: ../src/bout++.cxx:691 +#: ../src/bout++.cxx:697 msgid "Check if a file exists, and exit if it does." msgstr "" @@ -370,7 +370,7 @@ msgstr "無法打開輸出文件 '%s'\n" msgid "Could not run bout-log-color. Make sure it is in your PATH\n" msgstr "" -#: ../src/solver/solver.cxx:574 +#: ../src/solver/solver.cxx:669 #, c-format msgid "Couldn't add Monitor: %g is not a multiple of %g!" msgstr "" @@ -390,12 +390,12 @@ msgstr "" msgid "Couldn't find region %s in regionMapPerp" msgstr "" -#: ../src/sys/options.cxx:268 +#: ../src/sys/options.cxx:280 #, c-format msgid "Couldn't get BoutReal from option %s = '%s'" msgstr "" -#: ../src/sys/options.cxx:216 +#: ../src/sys/options.cxx:228 #, c-format msgid "Couldn't get integer from option %s = '%s'" msgstr "" @@ -410,7 +410,7 @@ msgstr "\"%s\" 不存在或不可訪問\n" msgid "DataDir \"%s\" is not a directory\n" msgstr "\"%s\" 不是目錄\n" -#: ../src/solver/solver.cxx:524 +#: ../src/solver/solver.cxx:587 msgid "ERROR: Solver is already initialised\n" msgstr "" @@ -428,7 +428,7 @@ msgstr "" msgid "Error: nx must be greater than 2 times MXG (2 * %d)" msgstr "" -#: ../src/solver/solver.cxx:449 +#: ../src/solver/solver.cxx:454 msgid "Failed to initialise solver-> Aborting\n" msgstr "" @@ -437,7 +437,7 @@ msgstr "" msgid "Finding value for NXPE (ideal = %f)\n" msgstr "" -#: ../src/solver/solver.cxx:526 +#: ../src/solver/solver.cxx:589 msgid "Initialising solver\n" msgstr "初始化求解器\n" @@ -469,15 +469,15 @@ msgstr "" msgid "Missing integer array %s\n" msgstr "" -#: ../src/solver/solver.cxx:678 +#: ../src/solver/solver.cxx:773 msgid "Monitor signalled to quit" msgstr "" -#: ../src/solver/solver.cxx:685 +#: ../src/solver/solver.cxx:780 msgid "Monitor signalled to quit\n" msgstr "" -#: ../src/bout++.cxx:695 +#: ../src/bout++.cxx:701 msgid "Name of file whose existence triggers a stop" msgstr "" @@ -496,29 +496,29 @@ msgid "Number of processors (%d) not divisible by NPs in y direction (%d)\n" msgstr "" #. Less than 2 time-steps left -#: ../src/bout++.cxx:739 +#: ../src/bout++.cxx:745 #, c-format msgid "Only %e seconds (%.2f steps) left. Quitting\n" msgstr "" -#: ../src/sys/options.cxx:175 ../src/sys/options.cxx:195 -#: ../src/sys/options.cxx:250 ../src/sys/options.cxx:292 +#: ../src/sys/options.cxx:187 ../src/sys/options.cxx:207 +#: ../src/sys/options.cxx:262 ../src/sys/options.cxx:304 #, fuzzy, c-format msgid "Option %s has no value" msgstr "\"%s\" 不是目錄\n" -#: ../src/sys/options.cxx:72 +#: ../src/sys/options.cxx:78 #, fuzzy, c-format msgid "Option %s is not a section" msgstr "\"%s\" 不是目錄\n" #. Doesn't exist -#: ../src/sys/options.cxx:83 +#: ../src/sys/options.cxx:95 #, c-format msgid "Option %s:%s does not exist" msgstr "選項%s:%s不存在" -#: ../include/options.hxx:643 +#: ../include/options.hxx:666 #, c-format msgid "" "Options: Setting a value from same source (%s) to new value '%s' - old value " @@ -552,12 +552,12 @@ msgstr "" msgid "Revision: %s\n" msgstr "版: %s\n" -#: ../src/solver/solver.cxx:492 +#: ../src/solver/solver.cxx:510 msgid "Run time : " msgstr "計算時間" #. / Run the solver -#: ../src/solver/solver.cxx:453 +#: ../src/solver/solver.cxx:467 msgid "" "Running simulation\n" "\n" @@ -569,7 +569,7 @@ msgstr "" msgid "Signal" msgstr "" -#: ../src/bout++.cxx:709 +#: ../src/bout++.cxx:715 msgid "" "Sim Time | RHS evals | Wall Time | Calc Inv Comm I/O SOLVER\n" "\n" @@ -578,7 +578,7 @@ msgstr "" "間整合\n" "\n" -#: ../src/bout++.cxx:712 +#: ../src/bout++.cxx:718 msgid "" "Sim Time | RHS_e evals | RHS_I evals | Wall Time | Calc Inv " "Comm I/O SOLVER\n" @@ -588,31 +588,31 @@ msgstr "" "通訊 輸入輸出 時間整合\n" "\n" -#: ../src/solver/solver.cxx:444 +#: ../src/solver/solver.cxx:449 #, c-format msgid "Solver running for %d outputs with monitor timestep of %e\n" msgstr "" -#: ../src/solver/solver.cxx:440 +#: ../src/solver/solver.cxx:445 #, c-format msgid "Solver running for %d outputs with output timestep of %e\n" msgstr "" -#: ../src/solver/solver.cxx:589 +#: ../src/solver/solver.cxx:684 #, c-format msgid "" "Solver::addMonitor: Cannot reduce timestep (from %g to %g) after init is " "called!" msgstr "" -#: ../src/solver/solver.cxx:1037 +#: ../src/solver/solver.cxx:1132 #, c-format msgid "" "Time derivative at wrong location - Field is at %s, derivative is at %s for " "field '%s'\n" msgstr "" -#: ../src/solver/solver.cxx:1259 +#: ../src/solver/solver.cxx:1354 #, c-format msgid "Time derivative for variable '%s' not set" msgstr "" @@ -638,7 +638,7 @@ msgid "" "Valid choices are 'identity', 'shifted', 'fci'" msgstr "" -#: ../src/sys/options.cxx:466 +#: ../src/sys/options.cxx:478 msgid "Unused options:\n" msgstr "" @@ -676,42 +676,42 @@ msgid "" msgstr "" #. restart file should be written by physics model -#: ../src/solver/solver.cxx:700 +#: ../src/solver/solver.cxx:795 msgid "User signalled to quit. Returning\n" msgstr "" -#: ../src/sys/options.cxx:231 +#: ../src/sys/options.cxx:243 #, c-format msgid "Value for option %s = %e is not an integer" msgstr "" -#: ../src/sys/options.cxx:273 +#: ../src/sys/options.cxx:285 #, c-format msgid "Value for option %s cannot be converted to a BoutReal" msgstr "" -#: ../src/sys/options.cxx:440 +#: ../src/sys/options.cxx:452 #, c-format msgid "Value for option %s cannot be converted to a Field2D" msgstr "" -#: ../src/sys/options.cxx:390 +#: ../src/sys/options.cxx:402 #, c-format msgid "Value for option %s cannot be converted to a Field3D" msgstr "" -#: ../src/sys/options.cxx:313 +#: ../src/sys/options.cxx:325 #, c-format msgid "Value for option %s cannot be converted to a bool" msgstr "" #. Another type which can't be converted -#: ../src/sys/options.cxx:222 +#: ../src/sys/options.cxx:234 #, fuzzy, c-format msgid "Value for option %s is not an integer" msgstr "\"%s\" 不是目錄\n" -#: ../src/solver/solver.cxx:996 ../src/solver/solver.cxx:1000 +#: ../src/solver/solver.cxx:1091 ../src/solver/solver.cxx:1095 #, c-format msgid "Variable '%s' not initialised" msgstr "" @@ -743,7 +743,7 @@ msgid "" "option extrapolate_y=false to disable this.\n" msgstr "" -#: ../src/bout++.cxx:686 +#: ../src/bout++.cxx:692 msgid "Wall time limit in hours. By default (< 0), no limit" msgstr "" From 3ce0f3df304b417149b2e524a2bce031528a8c59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Wed, 28 Jul 2021 14:23:04 +0200 Subject: [PATCH 181/428] Fix RTD [skip ci] --- manual/sphinx/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manual/sphinx/requirements.txt b/manual/sphinx/requirements.txt index 6d5c75fb9e..7bff022dff 100644 --- a/manual/sphinx/requirements.txt +++ b/manual/sphinx/requirements.txt @@ -1,2 +1,2 @@ breathe~=4.12 -future~=0.16 +sphinx==4.0.1 From 3e11d57e5ea0fbcf49db555f843eb67601822617 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Wed, 28 Jul 2021 15:09:08 +0200 Subject: [PATCH 182/428] Add new contributors --- CITATION.cff | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/CITATION.cff b/CITATION.cff index 02aab4eed8..a32be2c063 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -37,6 +37,7 @@ authors: - family-names: Shanahan given-names: Brendan + affiliation: Max Planck Institute for Plasma Physics, Greifswald - family-names: Friedman given-names: Brett @@ -44,8 +45,10 @@ authors: - family-names: Ma given-names: Chenhao - - family-names: Schwörer + - family-names: Bold given-names: David + affiliation: Max Planck Institute for Plasma Physics, Greifswald + orcid: https://orcid.org/0000-0003-0911-8606 - family-names: Meyerson given-names: Dmitry @@ -154,6 +157,13 @@ authors: - family-names: Wang given-names: Zhanhui + + - family-names: Ahmed + given-names: Sajidah + + - family-names: James + given-names: Toby + version: 4.3.2 date-released: 2020-10-19 repository-code: https://github.com/boutproject/BOUT-dev From 796f299db3a992ef99ac6cb317da64ec8a644538 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 28 Jul 2021 13:11:14 +0100 Subject: [PATCH 183/428] Add missing import to test-squash --- tests/integrated/test-squash/runtest | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integrated/test-squash/runtest b/tests/integrated/test-squash/runtest index d5b309c372..f01e4be1ec 100755 --- a/tests/integrated/test-squash/runtest +++ b/tests/integrated/test-squash/runtest @@ -5,6 +5,7 @@ import itertools import time import numpy as np from boututils.run_wrapper import launch_safe, shell_safe +import re # requires: all_tests # requires: netcdf From ef25de7ffa9c7e8411127e5403aeb87350781784 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 28 Jul 2021 13:53:49 +0100 Subject: [PATCH 184/428] Fix laplacexy/alfven-wave example Missing namespace for `mesh` --- examples/laplacexy/alfven-wave/alfven.cxx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/laplacexy/alfven-wave/alfven.cxx b/examples/laplacexy/alfven-wave/alfven.cxx index 1fac46b83c..7054e53f5e 100644 --- a/examples/laplacexy/alfven-wave/alfven.cxx +++ b/examples/laplacexy/alfven-wave/alfven.cxx @@ -13,6 +13,8 @@ const BoutReal qe = 1.602e-19; // Electron charge const BoutReal Me = 9.109e-31; // Electron mass const BoutReal Mp = 1.67262158e-27; // Proton mass +using bout::globals::mesh; + class Alfven : public PhysicsModel { private: Field3D Vort, Apar; // Evolving fields From 9e616795adfd647a0c8216cd0787c85f68a3955b Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 28 Jul 2021 13:55:49 +0100 Subject: [PATCH 185/428] Update clang-tidy-review from version in next --- .github/workflows/clang-tidy-review.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/clang-tidy-review.yml b/.github/workflows/clang-tidy-review.yml index 072902d340..bd2c04c877 100644 --- a/.github/workflows/clang-tidy-review.yml +++ b/.github/workflows/clang-tidy-review.yml @@ -45,10 +45,12 @@ jobs: -DCMAKE_EXPORT_COMPILE_COMMANDS=On - name: Run clang-tidy - uses: ZedThree/clang-tidy-review@v0.3.0 + uses: ZedThree/clang-tidy-review@v0.6.1 id: review with: build_dir: build - apt_packages: "libfftw3-dev,libnetcdf-c++4-dev,libhdf5-mpi-dev,libopenmpi-dev,petsc-dev,slepc-dev,liblapack-dev,libparpack2-dev,libsundials-dev,uuid-dev" - clang_tidy_checks: '-*,performance-*,readability-*,bugprone-*,clang-analyzer-*,cppcoreguidelines-*,mpi-*,misc-*,-readability-magic-numbers,-cppcoreguidelines-avoid-magic-numbers' - + apt_packages: "libfftw3-dev,libnetcdf-c++4-dev,libopenmpi-dev,petsc-dev,slepc-dev,liblapack-dev,libparpack2-dev,libsundials-dev,uuid-dev" + clang_tidy_checks: '-*,performance-*,readability-*,bugprone-*,clang-analyzer-*,cppcoreguidelines-*,mpi-*,misc-*,-readability-magic-numbers,-cppcoreguidelines-avoid-magic-numbers,-misc-non-private-member-variables-in-classes,-cppcoreguidelines-pro-bounds-array-to-pointer-decay,-cppcoreguidelines-pro-type-vararg,-clang-analyzer-optin.mpi*' + # Googletest triggers a _lot_ of clang-tidy warnings, so ignore all + # the unit tests until they're fixed or ignored upstream + exclude: "tests/unit/*cxx" From 5aeaa4c8ceee844e1b521ec3bfe531feae91883b Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 28 Jul 2021 17:18:59 +0100 Subject: [PATCH 186/428] Bump version number, drop `-alpha` --- CITATION.cff | 2 +- configure | 455 +++++++++++++++++++++++++++- configure.ac | 2 +- manual/doxygen/Doxyfile | 2 +- manual/doxygen/Doxyfile_readthedocs | 2 +- manual/sphinx/conf.py | 2 +- 6 files changed, 445 insertions(+), 20 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index a32be2c063..48ddcc834b 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -164,7 +164,7 @@ authors: - family-names: James given-names: Toby -version: 4.3.2 +version: 4.4.0 date-released: 2020-10-19 repository-code: https://github.com/boutproject/BOUT-dev url: http://boutproject.github.io/ diff --git a/configure b/configure index 0e456cfce0..363d7cad7b 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for BOUT++ 4.4.0-alpha. +# Generated by GNU Autoconf 2.69 for BOUT++ 4.4.0. # # Report bugs to . # @@ -580,8 +580,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='BOUT++' PACKAGE_TARNAME='bout--' -PACKAGE_VERSION='4.4.0-alpha' -PACKAGE_STRING='BOUT++ 4.4.0-alpha' +PACKAGE_VERSION='4.4.0' +PACKAGE_STRING='BOUT++ 4.4.0' PACKAGE_BUGREPORT='bd512@york.ac.uk' PACKAGE_URL='' @@ -685,6 +685,11 @@ GETTEXT_MACRO_VERSION USE_NLS SCOREPPATH sundials_config +PETSC_LIBS +PETSC_CFLAGS +PKG_CONFIG_LIBDIR +PKG_CONFIG_PATH +PKG_CONFIG PARALLELHDF5_TYPE PARALLELHDF5_LIBS PARALLELHDF5_LDFLAGS @@ -846,7 +851,12 @@ CCC CXXCPP CC CFLAGS -CPP' +CPP +PKG_CONFIG +PKG_CONFIG_PATH +PKG_CONFIG_LIBDIR +PETSC_CFLAGS +PETSC_LIBS' # Initialize some variables set by options. @@ -1387,7 +1397,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures BOUT++ 4.4.0-alpha to adapt to many kinds of systems. +\`configure' configures BOUT++ 4.4.0 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1452,7 +1462,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of BOUT++ 4.4.0-alpha:";; + short | recursive ) echo "Configuration of BOUT++ 4.4.0:";; esac cat <<\_ACEOF @@ -1524,6 +1534,14 @@ Some influential environment variables: CC C compiler command CFLAGS C compiler flags CPP C preprocessor + PKG_CONFIG path to pkg-config utility + PKG_CONFIG_PATH + directories to add to pkg-config's search path + PKG_CONFIG_LIBDIR + path overriding pkg-config's built-in search path + PETSC_CFLAGS + C compiler flags for PETSC, overriding pkg-config + PETSC_LIBS linker flags for PETSC, overriding pkg-config Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. @@ -1591,7 +1609,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -BOUT++ configure 4.4.0-alpha +BOUT++ configure 4.4.0 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2172,7 +2190,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by BOUT++ $as_me 4.4.0-alpha, which was +It was created by BOUT++ $as_me 4.4.0, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2655,6 +2673,7 @@ else fi + # Check whether --enable-warnings was given. if test "${enable_warnings+set}" = set; then : enableval=$enable_warnings; @@ -5933,8 +5952,10 @@ if test "$enable_debug" != ""; then : { $as_echo "$as_me:${as_lineno-$LINENO}: Enabling all debug options" >&5 $as_echo "$as_me: Enabling all debug options" >&6;} enable_checks="3" - # use -Og with available, otherwise fall back to -O0 - OPT_FLAGS="-g -O0 -Og -fno-inline -hipa1" + OPT_FLAGS="-g -O0 -fno-inline -hipa1" + # Only for not gcc add -Og due to + # https://github.com/boutproject/BOUT-dev/issues/1879 + $MPICXX --version | grep g++ -q || OPT_FLAGS="$OPT_FLAGS -Og" else @@ -9367,6 +9388,126 @@ fi # PETSc library ############################################################# + + + + + + + +if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. +set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PKG_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PKG_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +PKG_CONFIG=$ac_cv_path_PKG_CONFIG +if test -n "$PKG_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 +$as_echo "$PKG_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_path_PKG_CONFIG"; then + ac_pt_PKG_CONFIG=$PKG_CONFIG + # Extract the first word of "pkg-config", so it can be a program name with args. +set dummy pkg-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $ac_pt_PKG_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG +if test -n "$ac_pt_PKG_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 +$as_echo "$ac_pt_PKG_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_pt_PKG_CONFIG" = x; then + PKG_CONFIG="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + PKG_CONFIG=$ac_pt_PKG_CONFIG + fi +else + PKG_CONFIG="$ac_cv_path_PKG_CONFIG" +fi + +fi +if test -n "$PKG_CONFIG"; then + _pkg_min_version=0.9.0 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5 +$as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } + if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + PKG_CONFIG="" + fi +fi if test "x$with_petsc" != "x" && test "$with_petsc" != "no"; then : @@ -9392,6 +9533,8 @@ $as_echo "$as_me: Using PETSC_DIR=$PETSC_DIR, PETSC_ARCH=$PETSC_ARCH" >&6;} # PETSc changed the location of the conf directory in 3.5, so we # need to check both locations +# If we find nether, try to fall back to pkg-conf + PETSC_PKGCONF=no as_ac_File=`$as_echo "ac_cv_file_$PETSC_DIR/$PETSC_ARCH/conf" | $as_tr_sh` { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $PETSC_DIR/$PETSC_ARCH/conf" >&5 $as_echo_n "checking for $PETSC_DIR/$PETSC_ARCH/conf... " >&6; } @@ -9438,18 +9581,257 @@ if eval test \"x\$"$as_ac_File"\" = x"yes"; then : else - as_fn_error $? "--with-petsc was specified but could not find PETSc distribution. + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PETSC" >&5 +$as_echo_n "checking for PETSC... " >&6; } + +if test -n "$PETSC_CFLAGS"; then + pkg_cv_PETSC_CFLAGS="$PETSC_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"PETSc >= 3.4.0 \""; } >&5 + ($PKG_CONFIG --exists --print-errors "PETSc >= 3.4.0 ") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_PETSC_CFLAGS=`$PKG_CONFIG --cflags "PETSc >= 3.4.0 " 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$PETSC_LIBS"; then + pkg_cv_PETSC_LIBS="$PETSC_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"PETSc >= 3.4.0 \""; } >&5 + ($PKG_CONFIG --exists --print-errors "PETSc >= 3.4.0 ") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_PETSC_LIBS=`$PKG_CONFIG --libs "PETSc >= 3.4.0 " 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + PETSC_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "PETSc >= 3.4.0 " 2>&1` + else + PETSC_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "PETSc >= 3.4.0 " 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$PETSC_PKG_ERRORS" >&5 + + + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PETSC" >&5 +$as_echo_n "checking for PETSC... " >&6; } + +if test -n "$PETSC_CFLAGS"; then + pkg_cv_PETSC_CFLAGS="$PETSC_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"petsc >= 3.4.0 \""; } >&5 + ($PKG_CONFIG --exists --print-errors "petsc >= 3.4.0 ") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_PETSC_CFLAGS=`$PKG_CONFIG --cflags "petsc >= 3.4.0 " 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$PETSC_LIBS"; then + pkg_cv_PETSC_LIBS="$PETSC_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"petsc >= 3.4.0 \""; } >&5 + ($PKG_CONFIG --exists --print-errors "petsc >= 3.4.0 ") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_PETSC_LIBS=`$PKG_CONFIG --libs "petsc >= 3.4.0 " 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + PETSC_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "petsc >= 3.4.0 " 2>&1` + else + PETSC_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "petsc >= 3.4.0 " 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$PETSC_PKG_ERRORS" >&5 + + + as_fn_error $? "--with-petsc was specified but could not find PETSc distribution. + You may need to specify PETSC_DIR and PETSC_ARCH like so: + --with-petsc PETSC_DIR=\$PETSC_DIR PETSC_ARCH=\$PETSC_ARCH + Also see the help online: + http://bout-dev.readthedocs.io/en/latest/user_docs/advanced_install.html#petsc + " "$LINENO" 5 + +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + as_fn_error $? "--with-petsc was specified but could not find PETSc distribution. You may need to specify PETSC_DIR and PETSC_ARCH like so: --with-petsc PETSC_DIR=\$PETSC_DIR PETSC_ARCH=\$PETSC_ARCH Also see the help online: http://bout-dev.readthedocs.io/en/latest/user_docs/advanced_install.html#petsc " "$LINENO" 5 +else + PETSC_CFLAGS=$pkg_cv_PETSC_CFLAGS + PETSC_LIBS=$pkg_cv_PETSC_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + PETSC_PKGCONF=yes +fi + +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PETSC" >&5 +$as_echo_n "checking for PETSC... " >&6; } + +if test -n "$PETSC_CFLAGS"; then + pkg_cv_PETSC_CFLAGS="$PETSC_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"petsc >= 3.4.0 \""; } >&5 + ($PKG_CONFIG --exists --print-errors "petsc >= 3.4.0 ") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_PETSC_CFLAGS=`$PKG_CONFIG --cflags "petsc >= 3.4.0 " 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$PETSC_LIBS"; then + pkg_cv_PETSC_LIBS="$PETSC_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"petsc >= 3.4.0 \""; } >&5 + ($PKG_CONFIG --exists --print-errors "petsc >= 3.4.0 ") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_PETSC_LIBS=`$PKG_CONFIG --libs "petsc >= 3.4.0 " 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried fi + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no fi + if test $_pkg_short_errors_supported = yes; then + PETSC_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "petsc >= 3.4.0 " 2>&1` + else + PETSC_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "petsc >= 3.4.0 " 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$PETSC_PKG_ERRORS" >&5 + + + as_fn_error $? "--with-petsc was specified but could not find PETSc distribution. + You may need to specify PETSC_DIR and PETSC_ARCH like so: + --with-petsc PETSC_DIR=\$PETSC_DIR PETSC_ARCH=\$PETSC_ARCH + Also see the help online: + http://bout-dev.readthedocs.io/en/latest/user_docs/advanced_install.html#petsc + " "$LINENO" 5 +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + as_fn_error $? "--with-petsc was specified but could not find PETSc distribution. + You may need to specify PETSC_DIR and PETSC_ARCH like so: + --with-petsc PETSC_DIR=\$PETSC_DIR PETSC_ARCH=\$PETSC_ARCH + Also see the help online: + http://bout-dev.readthedocs.io/en/latest/user_docs/advanced_install.html#petsc + " "$LINENO" 5 + +else + PETSC_CFLAGS=$pkg_cv_PETSC_CFLAGS + PETSC_LIBS=$pkg_cv_PETSC_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + PETSC_PKGCONF=yes +fi + +else + PETSC_CFLAGS=$pkg_cv_PETSC_CFLAGS + PETSC_LIBS=$pkg_cv_PETSC_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + PETSC_PKGCONF=yes +fi + +fi + + +fi + + + if test $PETSC_PKGCONF = no ; then : # We've found an installation, need to check we can use it. First we # need to be able to check the version number, for which we need @@ -9603,6 +9985,48 @@ fi EXTRA_INCS="$EXTRA_INCS \$(PETSC_CC_INCLUDES)" EXTRA_LIBS="$EXTRA_LIBS \$(PETSC_LIB)" +else + + # Check if PETSc was compiled with SUNDIALS + save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $PETSC_CFLAGS" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking PETSc has SUNDIALS support" >&5 +$as_echo_n "checking PETSc has SUNDIALS support... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #ifdef PETSC_HAVE_SUNDIALS + yes + #endif + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "yes" >/dev/null 2>&1; then : + PETSC_HAS_SUNDIALS="yes" +else + PETSC_HAS_SUNDIALS="no" +fi +rm -f conftest* + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PETSC_HAS_SUNDIALS" >&5 +$as_echo "$PETSC_HAS_SUNDIALS" >&6; } + CPPFLAGS="$save_CPPFLAGS" + + if test "$PETSC_HAS_SUNDIALS" = "yes"; then : + + CXXFLAGS="$CXXFLAGS -DPETSC_HAS_SUNDIALS " + +fi + PETSC_MAKE_INCLUDE= + HAS_PETSC="yes" + + CXXFLAGS="$CXXFLAGS -DBOUT_HAS_PETSC" + + EXTRA_INCS="$EXTRA_INCS $PETSC_CFLAGS" + EXTRA_LIBS="$EXTRA_LIBS $PETSC_LIBS" + +fi else @@ -15777,7 +16201,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by BOUT++ $as_me 4.4.0-alpha, which was +This file was extended by BOUT++ $as_me 4.4.0, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -15834,7 +16258,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -BOUT++ config.status 4.4.0-alpha +BOUT++ config.status 4.4.0 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" @@ -16593,6 +17017,7 @@ BOUT_INCLUDE_PATH=$PWD/include + ac_config_files="$ac_config_files bin/bout-config" @@ -17143,7 +17568,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by BOUT++ $as_me 4.4.0-alpha, which was +This file was extended by BOUT++ $as_me 4.4.0, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -17200,7 +17625,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -BOUT++ config.status 4.4.0-alpha +BOUT++ config.status 4.4.0 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 5a780f0a46..bd1db73019 100644 --- a/configure.ac +++ b/configure.ac @@ -32,7 +32,7 @@ # AC_PREREQ([2.69]) -AC_INIT([BOUT++],[4.4.0-alpha],[bd512@york.ac.uk]) +AC_INIT([BOUT++],[4.4.0],[bd512@york.ac.uk]) AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_MACRO_DIR([m4]) diff --git a/manual/doxygen/Doxyfile b/manual/doxygen/Doxyfile index 1fe9e8b923..f6384f177c 100644 --- a/manual/doxygen/Doxyfile +++ b/manual/doxygen/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = BOUT++ # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 4.4.0-alpha +PROJECT_NUMBER = 4.4.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/manual/doxygen/Doxyfile_readthedocs b/manual/doxygen/Doxyfile_readthedocs index 5ec9606d3e..63e9797f4d 100644 --- a/manual/doxygen/Doxyfile_readthedocs +++ b/manual/doxygen/Doxyfile_readthedocs @@ -38,7 +38,7 @@ PROJECT_NAME = BOUT++ # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 4.4.0-alpha +PROJECT_NUMBER = 4.4.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/manual/sphinx/conf.py b/manual/sphinx/conf.py index dc83eac5bf..4b480cce0a 100755 --- a/manual/sphinx/conf.py +++ b/manual/sphinx/conf.py @@ -134,7 +134,7 @@ def __getattr__(cls, name): # The short X.Y version. version = '4.4' # The full version, including alpha/beta/rc tags. -release = '4.4.0-alpha' +release = '4.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From bbcc62372672a3f082b14d0a2bd00febdf1374e4 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 28 Jul 2021 17:22:57 +0100 Subject: [PATCH 187/428] Update changelog for v4.4.0 [skip ci] --- CHANGELOG.md | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a58d66b0e9..d2f60b29ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,48 @@ See [\#2154](https://github.com/boutproject/BOUT-dev/pull/2154). +## [v4.4.0](https://github.com/boutproject/BOUT-dev/tree/v4.4.0) (2021-07-30) + +[Full Changelog](https://github.com/boutproject/BOUT-dev/compare/v4.3.3...v4.4.4) + +**Merged pull requests:** + +- Add new contributors [\#2386](https://github.com/boutproject/BOUT-dev/pulls/2386) ([dschwoerer](https://github.com/users/dschwoerer)) +- Update locale [\#2385](https://github.com/boutproject/BOUT-dev/pulls/2385) ([dschwoerer](https://github.com/users/dschwoerer)) +- Fix RTD [\#2384](https://github.com/boutproject/BOUT-dev/pulls/2384) ([dschwoerer](https://github.com/users/dschwoerer)) +- Rename `max_nonlinear_it` to `max_nonlinear_iterations` [\#2339](https://github.com/boutproject/BOUT-dev/pulls/2339) ([johnomotani](https://github.com/users/johnomotani)) +- CVODE constraints and max_noinlinear_iterations options (4.4) [\#2304](https://github.com/boutproject/BOUT-dev/pulls/2304) ([johnomotani](https://github.com/users/johnomotani)) +- Fix some Solvers not always using user preconditioner/Jacobian (v4.4) [\#2284](https://github.com/boutproject/BOUT-dev/pulls/2284) ([ZedThree](https://github.com/users/ZedThree)) +- Fix formatting strings for pre-fmt in beuler solver [\#2278](https://github.com/boutproject/BOUT-dev/pulls/2278) ([bendudson](https://github.com/users/bendudson)) +- Backport of Backward Euler solver to v4.4 [\#2265](https://github.com/boutproject/BOUT-dev/pulls/2265) ([bendudson](https://github.com/users/bendudson)) +- Fix use of uninitialised value in Delp2 (4.4) [\#2263](https://github.com/boutproject/BOUT-dev/pulls/2263) ([johnomotani](https://github.com/users/johnomotani)) +- Save provenance tracking info from grid file (4.4) [\#2231](https://github.com/boutproject/BOUT-dev/pulls/2231) ([johnomotani](https://github.com/users/johnomotani)) +- Generate random run ID, track restarts (4.4) [\#2224](https://github.com/boutproject/BOUT-dev/pulls/2224) ([johnomotani](https://github.com/users/johnomotani)) +- Generate report for Timers (4.4) [\#2216](https://github.com/boutproject/BOUT-dev/pulls/2216) ([johnomotani](https://github.com/users/johnomotani)) +- Replace boutdata and boututils directories with submodules (v4.4) [\#2198](https://github.com/boutproject/BOUT-dev/pulls/2198) ([johnomotani](https://github.com/users/johnomotani)) +- Use bout_type="string" for strings in H5Format (4.4) [\#2194](https://github.com/boutproject/BOUT-dev/pulls/2194) ([johnomotani](https://github.com/users/johnomotani)) +- Write descriptions for std::vector and std::string variables [\#2191](https://github.com/boutproject/BOUT-dev/pulls/2191) ([johnomotani](https://github.com/users/johnomotani)) +- Fix reading of char* in Ncxx4 (4.4) [\#2189](https://github.com/boutproject/BOUT-dev/pulls/2189) ([johnomotani](https://github.com/users/johnomotani)) +- Merge master into v4.4.0-alpha [\#2174](https://github.com/boutproject/BOUT-dev/pulls/2174) ([ZedThree](https://github.com/users/ZedThree)) +- I/O for std::vector and std::string (4.4) [\#2155](https://github.com/boutproject/BOUT-dev/pulls/2155) ([johnomotani](https://github.com/users/johnomotani)) +- Check DataFile grid sizes match those in existing mesh (v4.4) [\#2148](https://github.com/boutproject/BOUT-dev/pulls/2148) ([johnomotani](https://github.com/users/johnomotani)) +- Call checkData() before returning result in Laplace inversions (v4.4) [\#2134](https://github.com/boutproject/BOUT-dev/pulls/2134) ([johnomotani](https://github.com/users/johnomotani)) +- Allow setting FFTW_EXHAUSTIVE (v4.4) [\#2132](https://github.com/boutproject/BOUT-dev/pulls/2132) ([johnomotani](https://github.com/users/johnomotani)) +- Make example relocatable [\#2127](https://github.com/boutproject/BOUT-dev/pulls/2127) ([dschwoerer](https://github.com/users/dschwoerer)) +- Merge master into v4.4.0-alpha [\#2121](https://github.com/boutproject/BOUT-dev/pulls/2121) ([ZedThree](https://github.com/users/ZedThree)) +- Handle FieldPerps in Datafile::varAdded() and Datafile::varPtr() (v4.4.0) [\#2094](https://github.com/boutproject/BOUT-dev/pulls/2094) ([johnomotani](https://github.com/users/johnomotani)) +- Staggered grids in InvertPar (v4.4.0) [\#2088](https://github.com/boutproject/BOUT-dev/pulls/2088) ([johnomotani](https://github.com/users/johnomotani)) +- Allow descriptions of output variables; save some diagnostics for solvers (v4.4) [\#2086](https://github.com/boutproject/BOUT-dev/pulls/2086) ([johnomotani](https://github.com/users/johnomotani)) +- Correct Grad2_par2 implementation in InvertParCR (v4.4.0) [\#2077](https://github.com/boutproject/BOUT-dev/pulls/2077) ([johnomotani](https://github.com/users/johnomotani)) +- Enable staggered versions of SplitFluxDerivativeType (4.4) [\#2059](https://github.com/boutproject/BOUT-dev/pulls/2059) ([johnomotani](https://github.com/users/johnomotani)) +- Merge master into v4.4.0-alpha [\#1998](https://github.com/boutproject/BOUT-dev/pulls/1998) ([ZedThree](https://github.com/users/ZedThree)) +- LaplaceXY: finite difference option (v4.4) [\#1924](https://github.com/boutproject/BOUT-dev/pulls/1924) ([johnomotani](https://github.com/users/johnomotani)) +- Backport of Laplace performance test [\#1910](https://github.com/boutproject/BOUT-dev/pulls/1910) ([JosephThomasParker](https://github.com/users/JosephThomasParker)) +- Macro for creating enum classes (v4.4) [\#1895](https://github.com/boutproject/BOUT-dev/pulls/1895) ([johnomotani](https://github.com/users/johnomotani)) +- Implement toFieldAligned and fromFieldAligned for Vector3D (v4.4) [\#1878](https://github.com/boutproject/BOUT-dev/pulls/1878) ([johnomotani](https://github.com/users/johnomotani)) +- Remove 3-element list indexers for collect() [\#1862](https://github.com/boutproject/BOUT-dev/pulls/1862) ([johnomotani](https://github.com/users/johnomotani)) +- Allow user to override library option defaults (v4.4) [\#1849](https://github.com/boutproject/BOUT-dev/pulls/1849) ([johnomotani](https://github.com/users/johnomotani)) + ## [v4.3.2](https://github.com/boutproject/BOUT-dev/tree/v4.3.2) (2020-10-19) [Full Changelog](https://github.com/boutproject/BOUT-dev/compare/v4.3.1...v4.3.2) From 562e6c57dec4dc51b73012fd66db30cdf95ff88f Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 28 Jul 2021 18:42:18 +0100 Subject: [PATCH 188/428] Bump googletest version --- externalpackages/googletest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/externalpackages/googletest b/externalpackages/googletest index 8b6d3f9c4a..96f4ce02a3 160000 --- a/externalpackages/googletest +++ b/externalpackages/googletest @@ -1 +1 @@ -Subproject commit 8b6d3f9c4a774bef3081195d422993323b6bb2e0 +Subproject commit 96f4ce02a3a78d63981c67acbd368945d11d7d70 From 299a7ed7c1c4f404093149ff25140739547f9790 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 28 Jul 2021 18:49:12 +0100 Subject: [PATCH 189/428] Use sor as default preconditioner for test-laplacexy --- tests/integrated/test-laplacexy/data/BOUT.inp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integrated/test-laplacexy/data/BOUT.inp b/tests/integrated/test-laplacexy/data/BOUT.inp index 66bf4b785a..9f7c33fd3c 100644 --- a/tests/integrated/test-laplacexy/data/BOUT.inp +++ b/tests/integrated/test-laplacexy/data/BOUT.inp @@ -24,7 +24,7 @@ ixseps1 = 64 ixseps2 = 64 [laplacexy] -pctype = shell # Supply a second solver as a preconditioner +pctype = sor # Supply a second solver as a preconditioner #pctype = hypre # Algebraic multigrid preconditioner using hypre library finite_volume = false rtol = 1.e-14 From a4f868ab650ef63cf5c150d437c93128b932cc20 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 30 Sep 2019 13:53:26 +0100 Subject: [PATCH 190/428] Replace test-datadir with test-command-args subtests --- tests/integrated/test-command-args/runtest | 16 ++++++++ tests/integrated/test-datadir/README.md | 5 --- tests/integrated/test-datadir/makefile | 9 ----- tests/integrated/test-datadir/runtest | 40 ------------------- tests/integrated/test-datadir/test.cxx | 7 ---- .../integrated/test-datadir/test_dir/BOUT.inp | 24 ----------- 6 files changed, 16 insertions(+), 85 deletions(-) delete mode 100644 tests/integrated/test-datadir/README.md delete mode 100644 tests/integrated/test-datadir/makefile delete mode 100755 tests/integrated/test-datadir/runtest delete mode 100644 tests/integrated/test-datadir/test.cxx delete mode 100644 tests/integrated/test-datadir/test_dir/BOUT.inp diff --git a/tests/integrated/test-command-args/runtest b/tests/integrated/test-command-args/runtest index b4494dc257..6220bee777 100755 --- a/tests/integrated/test-command-args/runtest +++ b/tests/integrated/test-command-args/runtest @@ -72,6 +72,22 @@ class TestCommandLineArgs(unittest.TestCase): self.assertTrue(os.path.exists("test/BOUT.log.0"), msg="FAIL: No BOUT.log.0 file in data directory") + def testDirectoryArgumentNonExistentDirectory(self): + with self.assertRaises(RuntimeError): + shell_safe(self.command + " -d non_existent", pipe=True) + with open("stderr.log") as f: + contents = f.read() + self.assertIn('"non_existent" does not exist', contents, + msg="FAIL: Error message not printed when missing input directory") + + def testDirectoryArgumentNonDirectory(self): + with self.assertRaises(RuntimeError): + shell_safe(self.command + " -d runtest", pipe=True) + with open("stderr.log") as f: + contents = f.read() + self.assertIn('"runtest" is not a directory', contents, + msg="FAIL: Error message not printed when missing input directory") + def testDirectoryArgumentOldSettingsFile(self): self.makeDirAndCopyInput("test") shell_safe(self.command + " -d test", pipe=True) diff --git a/tests/integrated/test-datadir/README.md b/tests/integrated/test-datadir/README.md deleted file mode 100644 index 6c7f86c53a..0000000000 --- a/tests/integrated/test-datadir/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Test Datadir -============ - -A simple test to make sure that if the datadir is not found, it throws -a somewhat useable error ... \ No newline at end of file diff --git a/tests/integrated/test-datadir/makefile b/tests/integrated/test-datadir/makefile deleted file mode 100644 index 22cd8a557c..0000000000 --- a/tests/integrated/test-datadir/makefile +++ /dev/null @@ -1,9 +0,0 @@ -BOUT_TOP = ../../.. -SOURCEC = test.cxx -include $(BOUT_TOP)/make.config - - -clean:: clean_log - -clean_log: - @rm run.log* -f diff --git a/tests/integrated/test-datadir/runtest b/tests/integrated/test-datadir/runtest deleted file mode 100755 index d1b4fabb81..0000000000 --- a/tests/integrated/test-datadir/runtest +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env sh -make || exit - -#requires: all_tests - -of=run.log.1 -if ! ./test -d test_dir > $of 2>&1 -then - cat $of - exit 1 -fi - -of=run.log.2 -if ./test -d not_here > $of 2>&1 -then - echo "Expected error as not_here is not here" - exit 1 -else - if ! grep -i not $of|grep -i dir | grep not_here > /dev/null 2>&1 - then - echo "Expected error message containing that not_here is not a directory" - exit 1 - fi -fi - -of=run.log.3 -if ./test -d runtest > $of 2>&1 -then - echo "Expected error as runtest is not a directory" - exit 1 -else - if ! grep -i not $of|grep -i dir | grep runtest > /dev/null 2>&1 - then - echo "Expected error message containing that runtest is not a directory" - exit 1 - fi -fi - - -echo "Test Succesfull" diff --git a/tests/integrated/test-datadir/test.cxx b/tests/integrated/test-datadir/test.cxx deleted file mode 100644 index 2916f84dba..0000000000 --- a/tests/integrated/test-datadir/test.cxx +++ /dev/null @@ -1,7 +0,0 @@ -#include - -int main(int argc, char **argv) { - auto ret = BoutInitialise(argc, argv); - BoutFinalise(false); - return ret; -} diff --git a/tests/integrated/test-datadir/test_dir/BOUT.inp b/tests/integrated/test-datadir/test_dir/BOUT.inp deleted file mode 100644 index ebe59db937..0000000000 --- a/tests/integrated/test-datadir/test_dir/BOUT.inp +++ /dev/null @@ -1,24 +0,0 @@ -MZ=1 - -nout=1 -timestep=1 - -periodicY=true -MXG=1 -MYG=1 - -zmax=1/2/pi - -[MESH] -dy=1 -dx=1 -nx=5 -ny=5 - -StaggerGrids=true - - -[All] -bndry_all = none -function=x#*y*y*z -scale=1 From fab85bbb8d8be3d6d82a9aaf3daf280fa0337148 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 30 Sep 2019 13:53:52 +0100 Subject: [PATCH 191/428] CMake: Change bout_test_copy_file to update dest if source changes --- CMakeLists.txt | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5486e98b4a..e6d2543c92 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -579,12 +579,11 @@ if (HAS_PRETTY_FUNCTION) endif() # Copy FILENAME from source directory to build directory -# and add dependency on TARGET -macro(bout_test_copy_file TARGET FILENAME) - add_custom_command(TARGET "${TARGET}" POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy - ${CMAKE_CURRENT_SOURCE_DIR}/"${FILENAME}" - ${CMAKE_CURRENT_BINARY_DIR}/"${FILENAME}") +macro(bout_test_copy_file FILENAME) + configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/${FILENAME} + ${CMAKE_CURRENT_BINARY_DIR}/${FILENAME} + COPYONLY) endmacro() # Add a new integrated test @@ -624,16 +623,16 @@ function(bout_add_integrated_test TESTNAME) if (BOUT_TEST_OPTIONS_USE_RUNTEST) add_test(NAME ${TESTNAME} COMMAND ./runtest) - bout_test_copy_file("${TESTNAME}" runtest) + bout_test_copy_file(runtest) else() add_test(NAME ${TESTNAME} COMMAND ${TESTNAME}) endif() if (BOUT_TEST_OPTIONS_USE_DATA_BOUT_INP) - bout_test_copy_file("${TESTNAME}" data/BOUT.inp) + bout_test_copy_file(data/BOUT.inp) endif() if (BOUT_TEST_OPTIONS_EXTRA_FILES) foreach (FILE ${BOUT_TEST_OPTIONS_EXTRA_FILES}) - bout_test_copy_file("${TESTNAME}" "${FILE}") + bout_test_copy_file("${FILE}") endforeach() endif() set_target_properties(${TESTNAME} PROPERTIES FOLDER tests/integrated) From 8020a95a6e87b2482652de4a1ff9a0c0c7769421 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 30 Sep 2019 16:03:40 +0100 Subject: [PATCH 192/428] CMake: Fix wrong name for REVISION macro definition --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e6d2543c92..8cc8bd0b88 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -428,7 +428,7 @@ include(GetGitRevisionDescription) get_git_head_revision(GIT_REFSPEC GIT_SHA1) message(STATUS "Git revision: ${GIT_SHA1}") target_compile_definitions(bout++ - PUBLIC "GIT_REVISION=${GIT_SHA1}") + PUBLIC "REVISION=${GIT_SHA1}") set(BOUT_GIT_REVISION ${GIT_SHA1}) # Optional dependencies From 2196345d5f9458bfebd5141170790be6c06d4c5c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 30 Sep 2019 16:55:53 +0100 Subject: [PATCH 193/428] CMake: Allow integrated tests to set executable name --- CMakeLists.txt | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8cc8bd0b88..3245562ba1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -604,11 +604,14 @@ endmacro() # - EXTRA_FILES: any extra files that are required to run the test # # - REQUIRES: list of variables that must be truthy to enable test +# +# - EXECUTABLE_NAME: name of the executable, if different from the test name function(bout_add_integrated_test TESTNAME) set(options USE_RUNTEST USE_DATA_BOUT_INP) + set(oneValueArgs EXECUTABLE_NAME) set(multiValueArgs SOURCES EXTRA_FILES REQUIRES) - cmake_parse_arguments(BOUT_TEST_OPTIONS "${options}" "" "${multiValueArgs}" ${ARGN}) + cmake_parse_arguments(BOUT_TEST_OPTIONS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) foreach (REQUIREMENT IN LISTS BOUT_TEST_OPTIONS_REQUIRES) if (NOT ${REQUIREMENT}) @@ -621,6 +624,10 @@ function(bout_add_integrated_test TESTNAME) target_link_libraries(${TESTNAME} bout++) target_include_directories(${TESTNAME} PRIVATE $) + if (BOUT_TEST_OPTIONS_EXECUTABLE_NAME) + set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_OPTIONS_EXECUTABLE_NAME}) + endif() + if (BOUT_TEST_OPTIONS_USE_RUNTEST) add_test(NAME ${TESTNAME} COMMAND ./runtest) bout_test_copy_file(runtest) From afad1c6ad7382e2d84fd861e25f93b4326e5d7dc Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 1 Oct 2019 11:37:50 +0100 Subject: [PATCH 194/428] CMake: Add BOUT_FLAGS_STRING define --- CMakeLists.txt | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3245562ba1..d0886b114b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -578,6 +578,47 @@ if (HAS_PRETTY_FUNCTION) PUBLIC "BOUT_HAS_PRETTY_FUNCTION") endif() +# We want to compile the actual flags used into the library so we can +# see them at runtime. This needs a few steps: + +# 1. Get the macro definitions. They come as a ;-separated list and +# without the -D. We also need to also stick a -D on the front of +# the first item +get_property(BOUT_COMPILE_DEFINITIONS + TARGET bout++ + PROPERTY COMPILE_DEFINITIONS) +string(REPLACE ";" " -D" BOUT_COMPILE_DEFINITIONS "${BOUT_COMPILE_DEFINITIONS}") +string(CONCAT BOUT_COMPILE_DEFINITIONS " -D" "${BOUT_COMPILE_DEFINITIONS}") + +# 2. Get the compiler options. Again, they come as a ;-separated +# list. Note that they don't include optimisation or debug flags: +# they're in the CMAKE_CXX_FLAGS* variables +get_property(BOUT_COMPILE_OPTIONS + TARGET bout++ + PROPERTY COMPILE_OPTIONS) +string(REPLACE ";" " " BOUT_COMPILE_OPTIONS "${BOUT_COMPILE_OPTIONS}") + +# 3. The optimisation and/or debug flags are in the CMAKE_CXX_FLAGS* +# variables. We need both the common flags as well as those for the +# build type actually being used. Note: this might behave weirdly +# on Windows. Might need to expand CMAKE_CONFIGURATION_TYPES +# instead? +string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER) +string(CONCAT BOUT_COMPILE_BUILD_FLAGS + " " + "${CMAKE_CXX_FLAGS}" + "${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") + +# 4. Now we join all the flags from the first three steps together +string(CONCAT BOUT_FLAGS_STRING + "${BOUT_COMPILE_OPTIONS}" + "${BOUT_COMPILE_DEFINITIONS}" + "${BOUT_COMPILE_BUILD_FLAGS}") + +# 5. Finally actually add the flags as a define +target_compile_definitions(bout++ + PRIVATE BOUT_FLAGS_STRING=${BOUT_FLAGS_STRING}) + # Copy FILENAME from source directory to build directory macro(bout_test_copy_file FILENAME) configure_file( From 9dfe950e59afddc5c1979f2df382f7dc10a96084 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 1 Oct 2019 11:39:20 +0100 Subject: [PATCH 195/428] Use command line to change parameter rather than sed-ing BOUT.inp --- tests/integrated/test-drift-instability/runtest.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/integrated/test-drift-instability/runtest.py b/tests/integrated/test-drift-instability/runtest.py index d632e4ddab..71001c9bf6 100755 --- a/tests/integrated/test-drift-instability/runtest.py +++ b/tests/integrated/test-drift-instability/runtest.py @@ -66,9 +66,6 @@ code = 0 # Return code for zeff in zlist: # Create the input file, setting Zeff - # If we get passed Staggered or something like this, use staggered config file - inp='BOUT_stag.inp' if 'stag' in [i.lower()[:4] for i in argv] else 'BOUT.inp' - shell_safe("sed 's/Zeff = 128.0/Zeff = "+str(zeff)+"/g' "+inp+" > data/BOUT.inp") timestep = 5e3 if zeff < 128: # reduce time-step. At large times these cases produce noise @@ -80,7 +77,9 @@ print("Running drift instability test, zeff = ", zeff) # Run the case - s, out = launch_safe("./2fluid timestep="+str(timestep), nproc=nproc, mthread=nthreads, pipe=True) + s, out = launch_safe("./2fluid 2fluid:Zeff={} timestep={}" + .format(zeff, timestep), + nproc=nproc, mthread=nthreads, pipe=True) f = open("run.log."+str(zeff), "w") f.write(out) f.close() From e2f5bdb239fbc10d3879ee790c7d4dea4baa89d1 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 8 Oct 2019 16:29:28 +0100 Subject: [PATCH 196/428] CMake: Add build scripts for more integrated tests --- tests/integrated/CMakeLists.txt | 23 +++++++++++++++++++ .../CMakeLists.txt | 7 ++++++ .../test-drift-instability/CMakeLists.txt | 7 ++++++ .../test-fieldgroupComm/CMakeLists.txt | 7 ++++++ .../CMakeLists.txt | 12 ++++++++++ tests/integrated/test-gyro/CMakeLists.txt | 7 ++++++ .../CMakeLists.txt | 6 +++++ .../test-interpolate/CMakeLists.txt | 5 ++++ tests/integrated/test-invpar/CMakeLists.txt | 7 ++++++ .../test-multigrid_laplace/CMakeLists.txt | 6 +++++ .../test-naulin-laplace/CMakeLists.txt | 6 +++++ .../test-options-netcdf/CMakeLists.txt | 6 +++++ .../test-petsc_laplace/CMakeLists.txt | 7 ++++++ .../CMakeLists.txt | 13 +++++++++++ .../integrated/test-restart-io/CMakeLists.txt | 6 +++++ .../test-restart-io_hdf5/CMakeLists.txt | 7 ++++++ .../integrated/test-restarting/CMakeLists.txt | 6 +++++ tests/integrated/test-smooth/CMakeLists.txt | 7 ++++++ tests/integrated/test-snb/CMakeLists.txt | 6 +++++ .../test-stopCheck-file/CMakeLists.txt | 10 ++++++++ .../test-twistshift-staggered/CMakeLists.txt | 6 +++++ .../integrated/test-twistshift/CMakeLists.txt | 5 ++++ tests/integrated/test-vec/CMakeLists.txt | 6 +++++ tests/integrated/test-yupdown/CMakeLists.txt | 6 +++++ 24 files changed, 184 insertions(+) create mode 100644 tests/integrated/test-drift-instability-staggered/CMakeLists.txt create mode 100644 tests/integrated/test-drift-instability/CMakeLists.txt create mode 100644 tests/integrated/test-fieldgroupComm/CMakeLists.txt create mode 100644 tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt create mode 100644 tests/integrated/test-gyro/CMakeLists.txt create mode 100644 tests/integrated/test-interchange-instability/CMakeLists.txt create mode 100644 tests/integrated/test-interpolate/CMakeLists.txt create mode 100644 tests/integrated/test-invpar/CMakeLists.txt create mode 100644 tests/integrated/test-multigrid_laplace/CMakeLists.txt create mode 100644 tests/integrated/test-naulin-laplace/CMakeLists.txt create mode 100644 tests/integrated/test-options-netcdf/CMakeLists.txt create mode 100644 tests/integrated/test-petsc_laplace/CMakeLists.txt create mode 100644 tests/integrated/test-petsc_laplace_MAST-grid/CMakeLists.txt create mode 100644 tests/integrated/test-restart-io/CMakeLists.txt create mode 100644 tests/integrated/test-restart-io_hdf5/CMakeLists.txt create mode 100644 tests/integrated/test-restarting/CMakeLists.txt create mode 100644 tests/integrated/test-smooth/CMakeLists.txt create mode 100644 tests/integrated/test-snb/CMakeLists.txt create mode 100644 tests/integrated/test-stopCheck-file/CMakeLists.txt create mode 100644 tests/integrated/test-twistshift-staggered/CMakeLists.txt create mode 100644 tests/integrated/test-twistshift/CMakeLists.txt create mode 100644 tests/integrated/test-vec/CMakeLists.txt create mode 100644 tests/integrated/test-yupdown/CMakeLists.txt diff --git a/tests/integrated/CMakeLists.txt b/tests/integrated/CMakeLists.txt index cd67d1270b..085625bb7c 100644 --- a/tests/integrated/CMakeLists.txt +++ b/tests/integrated/CMakeLists.txt @@ -13,3 +13,26 @@ add_subdirectory(test-laplacexy-short) add_subdirectory(test-slepc-solver) add_subdirectory(test-solver) add_subdirectory(test-stopCheck) +add_subdirectory(test-stopCheck-file) +add_subdirectory(test-drift-instability) +add_subdirectory(test-drift-instability-staggered) +add_subdirectory(test-fieldgroupComm) +add_subdirectory(test-griddata-yboundary-guards) +add_subdirectory(test-gyro) +add_subdirectory(test-interchange-instability) +add_subdirectory(test-interpolate) +add_subdirectory(test-invpar) +add_subdirectory(test-multigrid_laplace) +add_subdirectory(test-naulin-laplace) +add_subdirectory(test-options-netcdf) +add_subdirectory(test-petsc_laplace_MAST-grid) +add_subdirectory(test-petsc_laplace) +add_subdirectory(test-restarting) +add_subdirectory(test-restart-io) +add_subdirectory(test-restart-io_hdf5) +add_subdirectory(test-smooth) +add_subdirectory(test-snb) +add_subdirectory(test-twistshift) +add_subdirectory(test-twistshift-staggered) +add_subdirectory(test-vec) +add_subdirectory(test-yupdown) diff --git a/tests/integrated/test-drift-instability-staggered/CMakeLists.txt b/tests/integrated/test-drift-instability-staggered/CMakeLists.txt new file mode 100644 index 0000000000..a616c91250 --- /dev/null +++ b/tests/integrated/test-drift-instability-staggered/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_integrated_test(drift-instability-staggered + SOURCES 2fluid.cxx + EXECUTABLE_NAME 2fluid + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES runtest.py uedge.grd_std.cdl + ) diff --git a/tests/integrated/test-drift-instability/CMakeLists.txt b/tests/integrated/test-drift-instability/CMakeLists.txt new file mode 100644 index 0000000000..df94ab2394 --- /dev/null +++ b/tests/integrated/test-drift-instability/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_integrated_test(drift-instability + SOURCES 2fluid.cxx + EXECUTABLE_NAME 2fluid + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES runtest.py uedge.grd_std.cdl + ) diff --git a/tests/integrated/test-fieldgroupComm/CMakeLists.txt b/tests/integrated/test-fieldgroupComm/CMakeLists.txt new file mode 100644 index 0000000000..ff2326e27b --- /dev/null +++ b/tests/integrated/test-fieldgroupComm/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_integrated_test(fieldgroupComm + SOURCES test_fieldgroupcomm.cxx + EXECUTABLE_NAME test + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES cyclone_68x32.nc + ) diff --git a/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt b/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt new file mode 100644 index 0000000000..3f7446cad4 --- /dev/null +++ b/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt @@ -0,0 +1,12 @@ +bout_add_integrated_test(griddata-yboundary-guards + SOURCES test_griddata.cxx + EXECUTABLE_NAME test_griddata + USE_RUNTEST + EXTRA_FILES + data-doublenull-0/BOUT.inp + data-doublenull-1/BOUT.inp + data-doublenull-2/BOUT.inp + data-singlenull-0/BOUT.inp + data-singlenull-1/BOUT.inp + data-singlenull-2/BOUT.inp + ) diff --git a/tests/integrated/test-gyro/CMakeLists.txt b/tests/integrated/test-gyro/CMakeLists.txt new file mode 100644 index 0000000000..3db23c9ee9 --- /dev/null +++ b/tests/integrated/test-gyro/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_integrated_test(test-gyro + SOURCES test_gyro.cxx + EXECUTABLE_NAME test_gyro + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES cyclone_68x32.nc data/benchmark.0.nc + ) diff --git a/tests/integrated/test-interchange-instability/CMakeLists.txt b/tests/integrated/test-interchange-instability/CMakeLists.txt new file mode 100644 index 0000000000..975172ed33 --- /dev/null +++ b/tests/integrated/test-interchange-instability/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(interchange-instability + SOURCES 2fluid.cxx + EXECUTABLE_NAME 2fluid + USE_RUNTEST + EXTRA_FILES slab.6b5.r1.cdl slab.6b5.r10.cdl data_1/BOUT.inp data_10/BOUT.inp + ) diff --git a/tests/integrated/test-interpolate/CMakeLists.txt b/tests/integrated/test-interpolate/CMakeLists.txt new file mode 100644 index 0000000000..d8a3845849 --- /dev/null +++ b/tests/integrated/test-interpolate/CMakeLists.txt @@ -0,0 +1,5 @@ +bout_add_integrated_test(test_interpolate + SOURCES test_interpolate.cxx + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/integrated/test-invpar/CMakeLists.txt b/tests/integrated/test-invpar/CMakeLists.txt new file mode 100644 index 0000000000..9f8a3b3c54 --- /dev/null +++ b/tests/integrated/test-invpar/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_integrated_test(test-invpar + SOURCES test_invpar.cxx + EXECUTABLE_NAME test_invpar + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES test_io.grd.nc + ) diff --git a/tests/integrated/test-multigrid_laplace/CMakeLists.txt b/tests/integrated/test-multigrid_laplace/CMakeLists.txt new file mode 100644 index 0000000000..e3a30d569e --- /dev/null +++ b/tests/integrated/test-multigrid_laplace/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-multigrid-laplace + SOURCES test_multigrid_laplace.cxx + EXECUTABLE_NAME test_multigrid_laplace + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/integrated/test-naulin-laplace/CMakeLists.txt b/tests/integrated/test-naulin-laplace/CMakeLists.txt new file mode 100644 index 0000000000..22f75bf0b9 --- /dev/null +++ b/tests/integrated/test-naulin-laplace/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-naulin-laplace + SOURCES test_naulin_laplace.cxx + EXECUTABLE_NAME test_naulin_laplace + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/integrated/test-options-netcdf/CMakeLists.txt b/tests/integrated/test-options-netcdf/CMakeLists.txt new file mode 100644 index 0000000000..cfb7a3c016 --- /dev/null +++ b/tests/integrated/test-options-netcdf/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-options-netcdf + SOURCES test-options-netcdf.cxx + USE_RUNTEST + USE_DATA_BOUT_INP + REQUIRES BOUT_HAS_NETCDF + ) diff --git a/tests/integrated/test-petsc_laplace/CMakeLists.txt b/tests/integrated/test-petsc_laplace/CMakeLists.txt new file mode 100644 index 0000000000..1a7934628b --- /dev/null +++ b/tests/integrated/test-petsc_laplace/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_integrated_test(test-petsc-laplace + SOURCES test_petsc_laplace.cxx + EXECUTABLE_NAME test_petsc_laplace + REQUIRES BOUT_HAS_PETSC + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/integrated/test-petsc_laplace_MAST-grid/CMakeLists.txt b/tests/integrated/test-petsc_laplace_MAST-grid/CMakeLists.txt new file mode 100644 index 0000000000..9287892580 --- /dev/null +++ b/tests/integrated/test-petsc_laplace_MAST-grid/CMakeLists.txt @@ -0,0 +1,13 @@ +bout_add_integrated_test(test-petsc-laplace-MAST-grid + SOURCES test_petsc_laplace_MAST_grid.cxx + EXECUTABLE_NAME test_petsc_laplace_MAST_grid + REQUIRES BOUT_HAS_PETSC + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES + grids/grid_MAST_SOL_jyis2.nc + grids/grid_MAST_SOL_jyis34.nc + grids/grid_MAST_SOL_jyis65.nc + grids/grid_MAST_SOL_jyis81.nc + grids/grid_MAST_SOL_jyis113.nc + ) diff --git a/tests/integrated/test-restart-io/CMakeLists.txt b/tests/integrated/test-restart-io/CMakeLists.txt new file mode 100644 index 0000000000..86d389ea10 --- /dev/null +++ b/tests/integrated/test-restart-io/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-restart-io + SOURCES test-restart-io.cxx + USE_RUNTEST + USE_DATA_BOUT_INP + REQUIRES BOUT_HAS_NETCDF + ) diff --git a/tests/integrated/test-restart-io_hdf5/CMakeLists.txt b/tests/integrated/test-restart-io_hdf5/CMakeLists.txt new file mode 100644 index 0000000000..2f1ab901b3 --- /dev/null +++ b/tests/integrated/test-restart-io_hdf5/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_integrated_test(test-restart-io-hdf5 + SOURCES test-restart-io.cxx + EXECUTABLE_NAME test-restart-io + USE_RUNTEST + USE_DATA_BOUT_INP + REQUIRES BOUT_HAS_HDF5 + ) diff --git a/tests/integrated/test-restarting/CMakeLists.txt b/tests/integrated/test-restarting/CMakeLists.txt new file mode 100644 index 0000000000..a9046f4daa --- /dev/null +++ b/tests/integrated/test-restarting/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-restarting + SOURCES test_restarting.cxx + EXECUTABLE_NAME test_restarting + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/integrated/test-smooth/CMakeLists.txt b/tests/integrated/test-smooth/CMakeLists.txt new file mode 100644 index 0000000000..98f01fdb07 --- /dev/null +++ b/tests/integrated/test-smooth/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_integrated_test(test-smooth + SOURCES test_smooth.cxx + EXECUTABLE_NAME test_smooth + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES test_smooth.nc data/benchmark.0.nc + ) diff --git a/tests/integrated/test-snb/CMakeLists.txt b/tests/integrated/test-snb/CMakeLists.txt new file mode 100644 index 0000000000..51393e554a --- /dev/null +++ b/tests/integrated/test-snb/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-snb + SOURCES test_snb.cxx + EXECUTABLE_NAME test_snb + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/integrated/test-stopCheck-file/CMakeLists.txt b/tests/integrated/test-stopCheck-file/CMakeLists.txt new file mode 100644 index 0000000000..4c95afa447 --- /dev/null +++ b/tests/integrated/test-stopCheck-file/CMakeLists.txt @@ -0,0 +1,10 @@ +bout_add_integrated_test(test_stopCheck-file + SOURCES test_stopCheck.cxx + EXECUTABLE_NAME test_stopCheck + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES + data/BOUT.stop + dataSecond/BOUT.inp + dataSecond/otherStop.check + ) diff --git a/tests/integrated/test-twistshift-staggered/CMakeLists.txt b/tests/integrated/test-twistshift-staggered/CMakeLists.txt new file mode 100644 index 0000000000..f84c2eb9e7 --- /dev/null +++ b/tests/integrated/test-twistshift-staggered/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-twistshift-staggered + SOURCES test-twistshift.cxx + EXECUTABLE_NAME test-twistshift + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/integrated/test-twistshift/CMakeLists.txt b/tests/integrated/test-twistshift/CMakeLists.txt new file mode 100644 index 0000000000..e10116fda4 --- /dev/null +++ b/tests/integrated/test-twistshift/CMakeLists.txt @@ -0,0 +1,5 @@ +bout_add_integrated_test(test-twistshift + SOURCES test-twistshift.cxx + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/integrated/test-vec/CMakeLists.txt b/tests/integrated/test-vec/CMakeLists.txt new file mode 100644 index 0000000000..719ff2dfe4 --- /dev/null +++ b/tests/integrated/test-vec/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-vec + SOURCES testVec.cxx + EXECUTABLE_NAME testVec + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/integrated/test-yupdown/CMakeLists.txt b/tests/integrated/test-yupdown/CMakeLists.txt new file mode 100644 index 0000000000..6f0c03b083 --- /dev/null +++ b/tests/integrated/test-yupdown/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_integrated_test(test-yupdown + SOURCES test_yupdown.cxx + EXECUTABLE_NAME test_yupdown + USE_RUNTEST + USE_DATA_BOUT_INP + ) From 729d51239be37cec04008c51ddbfefcd1eda579e Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 8 Oct 2019 16:31:06 +0100 Subject: [PATCH 197/428] Replace test-fieldgroup with unit test Test checks that FieldGroup(int) doesn't compile: this can now be done via std::is_constructable --- tests/integrated/test-fieldgroup/README.md | 15 ---------- tests/integrated/test-fieldgroup/makefile | 6 ---- tests/integrated/test-fieldgroup/runtest | 30 ------------------- .../integrated/test-fieldgroup/test_fail.cxx | 17 ----------- tests/unit/field/test_fieldgroup.cxx | 10 +++++++ 5 files changed, 10 insertions(+), 68 deletions(-) delete mode 100644 tests/integrated/test-fieldgroup/README.md delete mode 100644 tests/integrated/test-fieldgroup/makefile delete mode 100755 tests/integrated/test-fieldgroup/runtest delete mode 100644 tests/integrated/test-fieldgroup/test_fail.cxx diff --git a/tests/integrated/test-fieldgroup/README.md b/tests/integrated/test-fieldgroup/README.md deleted file mode 100644 index 6d8a2b4c2f..0000000000 --- a/tests/integrated/test-fieldgroup/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Test FieldGroup class -===================== - -The FieldGroup class stores a collection of fields, and is used in communications. -Variables stored in FieldGroup should be derived from FieldData. - -This test consists of two parts - -1. Compile a short code (`test_fail.cxx`) which should fail to compile. - This code tries to construct a FieldGroup by passing an int. - -2. A test which adds some fields to a FieldGroup then checks that - the correct number of fields are present. - - diff --git a/tests/integrated/test-fieldgroup/makefile b/tests/integrated/test-fieldgroup/makefile deleted file mode 100644 index ba8c3e8724..0000000000 --- a/tests/integrated/test-fieldgroup/makefile +++ /dev/null @@ -1,6 +0,0 @@ - -BOUT_TOP = ../../.. - -SOURCEC = test_fail.cxx - -include $(BOUT_TOP)/make.config diff --git a/tests/integrated/test-fieldgroup/runtest b/tests/integrated/test-fieldgroup/runtest deleted file mode 100755 index c247f97c8c..0000000000 --- a/tests/integrated/test-fieldgroup/runtest +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# -# This scipt is intended to be quite quiet when it succeeds, -# printing only the test name and "Passed". -# If the test fails then the whole log is printed, hopefully -# helping diagnose where the problem lies. -# - -#requires not make - -echo "" -echo "Running FieldGroup test" - -# Ensure we clean old run, in case it succeeded last time -make clean - -# This test should fail to compile -make > run.log 2>&1 - -if [ $? -eq 0 ]; then - # Compiled ok, something wrong - # Send the run.log file to stdout so it's stored in Travis logs - echo "Fail. Log follows:" - echo "" - cat run.log - exit 1; -fi - -echo "Passed" -exit 0 diff --git a/tests/integrated/test-fieldgroup/test_fail.cxx b/tests/integrated/test-fieldgroup/test_fail.cxx deleted file mode 100644 index e9c6a9d16f..0000000000 --- a/tests/integrated/test-fieldgroup/test_fail.cxx +++ /dev/null @@ -1,17 +0,0 @@ -/// This is a test of FieldGroup which should fail to compile -/// -/// - -#include - -int main(int argc, char **argv) { - - // Construct a FieldGroup with an integer - // Should fail to compile - // (hopefully with a useful error message) - - int i; - FieldGroup g(i); - - return 0; -} diff --git a/tests/unit/field/test_fieldgroup.cxx b/tests/unit/field/test_fieldgroup.cxx index c6484d42f0..167b1c3387 100644 --- a/tests/unit/field/test_fieldgroup.cxx +++ b/tests/unit/field/test_fieldgroup.cxx @@ -7,6 +7,8 @@ #include "vector2d.hxx" #include "vector3d.hxx" +#include + TEST(FieldGroupTest, CreateWithField2D) { Field2D a; FieldGroup group(a); @@ -279,3 +281,11 @@ TEST(FieldGroupTest, ConstIterator) { EXPECT_EQ(count, 2); } + +TEST(FieldGroupTest, NotConstructableFromInt) { + EXPECT_FALSE((std::is_constructible::value)); +} + +TEST(FieldGroupTest, NotConstructableFromBoutReal) { + EXPECT_FALSE((std::is_constructible::value)); +} From 10fb6feb3296062be6747486c41f2db4f8b1f9e3 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 9 Oct 2019 14:58:34 +0100 Subject: [PATCH 198/428] Fix PETSc Laplace MAST test: was always using same grid file --- tests/integrated/test-petsc_laplace_MAST-grid/runtest | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/integrated/test-petsc_laplace_MAST-grid/runtest b/tests/integrated/test-petsc_laplace_MAST-grid/runtest index 9b2fa2c73b..d5853648e6 100755 --- a/tests/integrated/test-petsc_laplace_MAST-grid/runtest +++ b/tests/integrated/test-petsc_laplace_MAST-grid/runtest @@ -38,12 +38,11 @@ for nproc in [1,2,4]: # nxpe = 2 for jy in [2,34,65,81,113]: - cmd = "./test_petsc_laplace_MAST_grid " - "grid=grids/grid_MAST_SOL_jyis"+str(jy)+".nc" + cmd = "./test_petsc_laplace_MAST_grid grid=grids/grid_MAST_SOL_jyis{}.nc".format(jy) shell("rm data/BOUT.dmp.*.nc") - print(" %d processors...." % nproc) + print(" {} processors, grid_MAST_SOL_jyis{}".format(nproc, jy)) s, out = launch_safe(cmd, nproc=nproc, pipe=True) f = open("run.log."+str(nproc), "w") f.write(out) From 324fe70050c5a14ad1c72ca47969fc88aca6cd99 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 9 Oct 2019 14:59:03 +0100 Subject: [PATCH 199/428] Fix PETSc Laplace MAST test: sometimes too many iterations _very_ difficult to reproduce, probably rounding error --- tests/integrated/test-petsc_laplace_MAST-grid/data/BOUT.inp | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integrated/test-petsc_laplace_MAST-grid/data/BOUT.inp b/tests/integrated/test-petsc_laplace_MAST-grid/data/BOUT.inp index ff286e8fcd..c730332f80 100644 --- a/tests/integrated/test-petsc_laplace_MAST-grid/data/BOUT.inp +++ b/tests/integrated/test-petsc_laplace_MAST-grid/data/BOUT.inp @@ -31,6 +31,7 @@ rtol=1.e-10 include_yguards=false maxits=1000 fourth_order=true +gmres_max_steps=32 [SPT] #type=spt From 68234802190bec1128e8a8fef908e1305017d16c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 9 Oct 2019 17:18:06 +0100 Subject: [PATCH 200/428] CMake: add missing source files for SNB --- CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index d0886b114b..c199613ba8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -82,6 +82,7 @@ set(BOUT_SOURCES ./include/bout/rvec.hxx ./include/bout/scorepwrapper.hxx ./include/bout/slepclib.hxx + ./include/bout/snb.hxx ./include/bout/solver.hxx ./include/bout/solverfactory.hxx ./include/bout/surfaceiter.hxx @@ -233,6 +234,7 @@ set(BOUT_SOURCES ./src/physics/gyro_average.cxx ./src/physics/physicsmodel.cxx ./src/physics/smoothing.cxx + ./src/physics/snb.cxx ./src/physics/sourcex.cxx ./src/solver/impls/arkode/arkode.cxx ./src/solver/impls/arkode/arkode.hxx From 981420811c58c21e8882f14656808ec1653d8f99 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 9 Oct 2019 17:18:21 +0100 Subject: [PATCH 201/428] CMake: fix some typos --- CMakeLists.txt | 4 ++-- cmake/FindSLEPc.cmake | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c199613ba8..9473ec3594 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -339,7 +339,7 @@ endif() # Compile time features set(CHECK_LEVELS 0 1 2 3 4) -set(CHECK 3 CACHE STRINGS "Set run-time checking level") +set(CHECK 3 CACHE STRING "Set run-time checking level") set_property(CACHE CHECK PROPERTY STRINGS ${CHECK_LEVELS}) if (NOT CHECK IN_LIST CHECK_LEVELS) message(FATAL_ERROR "CHECK must be one of ${CHECK_LEVELS}") @@ -414,7 +414,7 @@ if (ENABLE_OPENMP) find_package(OpenMP REQUIRED) target_link_libraries(bout++ PUBLIC OpenMP::OpenMP_CXX) set(possible_openmp_schedules static dynamic guided auto) - set(OPENMP_SCHEDULE static CACHE STRINGS "Set OpenMP schedule") + set(OPENMP_SCHEDULE static CACHE STRING "Set OpenMP schedule") if (NOT OPENMP_SCHEDULE IN_LIST possible_openmp_schedules) message(FATAL_ERROR "OPENMP_SCHEDULE must be one of ${possible_openmp_schedules}") endif() diff --git a/cmake/FindSLEPc.cmake b/cmake/FindSLEPc.cmake index 3c0e660282..10deb80e4e 100644 --- a/cmake/FindSLEPc.cmake +++ b/cmake/FindSLEPc.cmake @@ -173,7 +173,7 @@ int main() { ) if (SLEPC_CONFIG_TEST_VERSION_EXITCODE EQUAL 0) - set(SLEPC_VERSION ${OUTPUT} CACHE TYPE STRING) + set(SLEPC_VERSION "${OUTPUT}" CACHE STRING "SLEPC version number") string(REPLACE "." ";" SLEPC_VERSION_LIST ${SLEPC_VERSION}) list(GET SLEPC_VERSION_LIST 0 SLEPC_VERSION_MAJOR) list(GET SLEPC_VERSION_LIST 1 SLEPC_VERSION_MINOR) From 323f1f010857c1aea9a762c1ba6f360029ec36c7 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Oct 2019 10:09:07 +0100 Subject: [PATCH 202/428] CMake: Reorder tests --- tests/integrated/CMakeLists.txt | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/tests/integrated/CMakeLists.txt b/tests/integrated/CMakeLists.txt index 085625bb7c..a136ba41ed 100644 --- a/tests/integrated/CMakeLists.txt +++ b/tests/integrated/CMakeLists.txt @@ -3,35 +3,34 @@ add_subdirectory(test-command-args) add_subdirectory(test-coordinates-initialization) add_subdirectory(test-cyclic) add_subdirectory(test-delp2) -add_subdirectory(test-griddata) -add_subdirectory(test-initial) -add_subdirectory(test-invertable-operator) -add_subdirectory(test-io) -add_subdirectory(test-io_hdf5) -add_subdirectory(test-laplace) -add_subdirectory(test-laplacexy-short) -add_subdirectory(test-slepc-solver) -add_subdirectory(test-solver) -add_subdirectory(test-stopCheck) -add_subdirectory(test-stopCheck-file) add_subdirectory(test-drift-instability) add_subdirectory(test-drift-instability-staggered) add_subdirectory(test-fieldgroupComm) +add_subdirectory(test-griddata) add_subdirectory(test-griddata-yboundary-guards) add_subdirectory(test-gyro) +add_subdirectory(test-initial) add_subdirectory(test-interchange-instability) add_subdirectory(test-interpolate) +add_subdirectory(test-invertable-operator) add_subdirectory(test-invpar) +add_subdirectory(test-io) +add_subdirectory(test-io_hdf5) +add_subdirectory(test-laplace) add_subdirectory(test-multigrid_laplace) add_subdirectory(test-naulin-laplace) add_subdirectory(test-options-netcdf) -add_subdirectory(test-petsc_laplace_MAST-grid) add_subdirectory(test-petsc_laplace) -add_subdirectory(test-restarting) +add_subdirectory(test-petsc_laplace_MAST-grid) add_subdirectory(test-restart-io) add_subdirectory(test-restart-io_hdf5) +add_subdirectory(test-restarting) +add_subdirectory(test-slepc-solver) add_subdirectory(test-smooth) add_subdirectory(test-snb) +add_subdirectory(test-solver) +add_subdirectory(test-stopCheck) +add_subdirectory(test-stopCheck-file) add_subdirectory(test-twistshift) add_subdirectory(test-twistshift-staggered) add_subdirectory(test-vec) From c3a90474b91b38a1aef0cc713967ba0130e010df Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Oct 2019 10:18:22 +0100 Subject: [PATCH 203/428] CMake: Consistent integrated test target names --- tests/integrated/test-command-args/CMakeLists.txt | 3 ++- tests/integrated/test-cyclic/CMakeLists.txt | 3 ++- tests/integrated/test-delp2/CMakeLists.txt | 3 ++- .../test-drift-instability-staggered/CMakeLists.txt | 2 +- tests/integrated/test-drift-instability/CMakeLists.txt | 2 +- tests/integrated/test-fieldgroupComm/CMakeLists.txt | 2 +- .../integrated/test-griddata-yboundary-guards/CMakeLists.txt | 2 +- tests/integrated/test-griddata/CMakeLists.txt | 3 ++- tests/integrated/test-initial/CMakeLists.txt | 3 ++- tests/integrated/test-interchange-instability/CMakeLists.txt | 2 +- tests/integrated/test-interpolate/CMakeLists.txt | 3 ++- tests/integrated/test-invertable-operator/CMakeLists.txt | 3 ++- tests/integrated/test-io/CMakeLists.txt | 3 ++- tests/integrated/test-io_hdf5/CMakeLists.txt | 3 ++- tests/integrated/test-laplace/CMakeLists.txt | 3 ++- tests/integrated/test-solver/CMakeLists.txt | 5 ++++- tests/integrated/test-stopCheck-file/CMakeLists.txt | 2 +- tests/integrated/test-stopCheck/CMakeLists.txt | 3 ++- 18 files changed, 32 insertions(+), 18 deletions(-) diff --git a/tests/integrated/test-command-args/CMakeLists.txt b/tests/integrated/test-command-args/CMakeLists.txt index bd77cad4cc..ea3392a277 100644 --- a/tests/integrated/test-command-args/CMakeLists.txt +++ b/tests/integrated/test-command-args/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(command-args +bout_add_integrated_test(test-command-args SOURCES command-args.cxx + EXECUTABLE_NAME command-args USE_RUNTEST EXTRA_FILES BOUT.inp ) diff --git a/tests/integrated/test-cyclic/CMakeLists.txt b/tests/integrated/test-cyclic/CMakeLists.txt index 81d24d7aa6..9083bb4f47 100644 --- a/tests/integrated/test-cyclic/CMakeLists.txt +++ b/tests/integrated/test-cyclic/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(test_cyclic +bout_add_integrated_test(test-cyclic SOURCES test_cyclic.cxx + EXECUTABLE_NAME test_cylic USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES test_io.grd.nc diff --git a/tests/integrated/test-delp2/CMakeLists.txt b/tests/integrated/test-delp2/CMakeLists.txt index 8665387872..6c3b98851f 100644 --- a/tests/integrated/test-delp2/CMakeLists.txt +++ b/tests/integrated/test-delp2/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(test_delp2 +bout_add_integrated_test(test-delp2 SOURCES test_delp2.cxx + EXECUTABLE_NAME test_delp2 USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-drift-instability-staggered/CMakeLists.txt b/tests/integrated/test-drift-instability-staggered/CMakeLists.txt index a616c91250..c0ca56eb8e 100644 --- a/tests/integrated/test-drift-instability-staggered/CMakeLists.txt +++ b/tests/integrated/test-drift-instability-staggered/CMakeLists.txt @@ -1,4 +1,4 @@ -bout_add_integrated_test(drift-instability-staggered +bout_add_integrated_test(test-drift-instability-staggered SOURCES 2fluid.cxx EXECUTABLE_NAME 2fluid USE_RUNTEST diff --git a/tests/integrated/test-drift-instability/CMakeLists.txt b/tests/integrated/test-drift-instability/CMakeLists.txt index df94ab2394..21020d9eac 100644 --- a/tests/integrated/test-drift-instability/CMakeLists.txt +++ b/tests/integrated/test-drift-instability/CMakeLists.txt @@ -1,4 +1,4 @@ -bout_add_integrated_test(drift-instability +bout_add_integrated_test(test-drift-instability SOURCES 2fluid.cxx EXECUTABLE_NAME 2fluid USE_RUNTEST diff --git a/tests/integrated/test-fieldgroupComm/CMakeLists.txt b/tests/integrated/test-fieldgroupComm/CMakeLists.txt index ff2326e27b..4517eb210d 100644 --- a/tests/integrated/test-fieldgroupComm/CMakeLists.txt +++ b/tests/integrated/test-fieldgroupComm/CMakeLists.txt @@ -1,4 +1,4 @@ -bout_add_integrated_test(fieldgroupComm +bout_add_integrated_test(test-fieldgroupComm SOURCES test_fieldgroupcomm.cxx EXECUTABLE_NAME test USE_RUNTEST diff --git a/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt b/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt index 3f7446cad4..a142b273fa 100644 --- a/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt +++ b/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt @@ -1,4 +1,4 @@ -bout_add_integrated_test(griddata-yboundary-guards +bout_add_integrated_test(test-griddata-yboundary-guards SOURCES test_griddata.cxx EXECUTABLE_NAME test_griddata USE_RUNTEST diff --git a/tests/integrated/test-griddata/CMakeLists.txt b/tests/integrated/test-griddata/CMakeLists.txt index 2701ed41cc..f1759c5d72 100644 --- a/tests/integrated/test-griddata/CMakeLists.txt +++ b/tests/integrated/test-griddata/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(test_griddata +bout_add_integrated_test(test-griddata SOURCES test_griddata.cxx + EXECUTABLE_NAME test_griddata USE_RUNTEST EXTRA_FILES screw/BOUT.inp ) diff --git a/tests/integrated/test-initial/CMakeLists.txt b/tests/integrated/test-initial/CMakeLists.txt index 486f07bd18..2f192f6305 100644 --- a/tests/integrated/test-initial/CMakeLists.txt +++ b/tests/integrated/test-initial/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(test_initial +bout_add_integrated_test(test-initial SOURCES test_initial.cxx + EXECUTABLE_NAME test_initial USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-interchange-instability/CMakeLists.txt b/tests/integrated/test-interchange-instability/CMakeLists.txt index 975172ed33..5e3c41fcb2 100644 --- a/tests/integrated/test-interchange-instability/CMakeLists.txt +++ b/tests/integrated/test-interchange-instability/CMakeLists.txt @@ -1,4 +1,4 @@ -bout_add_integrated_test(interchange-instability +bout_add_integrated_test(test-interchange-instability SOURCES 2fluid.cxx EXECUTABLE_NAME 2fluid USE_RUNTEST diff --git a/tests/integrated/test-interpolate/CMakeLists.txt b/tests/integrated/test-interpolate/CMakeLists.txt index d8a3845849..482db5996a 100644 --- a/tests/integrated/test-interpolate/CMakeLists.txt +++ b/tests/integrated/test-interpolate/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(test_interpolate +bout_add_integrated_test(test-interpolate SOURCES test_interpolate.cxx + EXECUTABLE_NAME test_interpolate USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-invertable-operator/CMakeLists.txt b/tests/integrated/test-invertable-operator/CMakeLists.txt index 2275005dfc..2f4a5d2551 100644 --- a/tests/integrated/test-invertable-operator/CMakeLists.txt +++ b/tests/integrated/test-invertable-operator/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(invertable_operator +bout_add_integrated_test(test-invertable-operator SOURCES invertable_operator.cxx + EXECUTABLE_NAME invertable_operator USE_RUNTEST USE_DATA_BOUT_INP REQUIRES BOUT_HAS_PETSC diff --git a/tests/integrated/test-io/CMakeLists.txt b/tests/integrated/test-io/CMakeLists.txt index b218c85fa4..828ca4c934 100644 --- a/tests/integrated/test-io/CMakeLists.txt +++ b/tests/integrated/test-io/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(test_io +bout_add_integrated_test(test-io SOURCES test_io.cxx + EXECUTABLE_NAME test_io USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES test_io.grd.nc data/benchmark.out.0.nc diff --git a/tests/integrated/test-io_hdf5/CMakeLists.txt b/tests/integrated/test-io_hdf5/CMakeLists.txt index cc08b46e2c..ed5c31fb5b 100644 --- a/tests/integrated/test-io_hdf5/CMakeLists.txt +++ b/tests/integrated/test-io_hdf5/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(test_io_hdf5 +bout_add_integrated_test(test-io-hdf5 SOURCES test_io_hdf5.cxx + EXECUTABLE_NAME test_io_hdf5 USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES test_io.grd.hdf5 data/benchmark.out.0.hdf5 diff --git a/tests/integrated/test-laplace/CMakeLists.txt b/tests/integrated/test-laplace/CMakeLists.txt index 91640e8159..fa2ec7dd3d 100644 --- a/tests/integrated/test-laplace/CMakeLists.txt +++ b/tests/integrated/test-laplace/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(test_laplace +bout_add_integrated_test(test-laplace SOURCES test_laplace.cxx + EXECUTABLE_NAME test_laplace EXTRA_FILES test_laplace.grd.nc data/benchmark.0.nc USE_RUNTEST USE_DATA_BOUT_INP diff --git a/tests/integrated/test-solver/CMakeLists.txt b/tests/integrated/test-solver/CMakeLists.txt index d686b4be47..3cad7a5b13 100644 --- a/tests/integrated/test-solver/CMakeLists.txt +++ b/tests/integrated/test-solver/CMakeLists.txt @@ -1 +1,4 @@ -bout_add_integrated_test(test_solver SOURCES test_solver.cxx) +bout_add_integrated_test(test-solver + SOURCES test_solver.cxx + EXECUTABLE_NAME test_solver + ) diff --git a/tests/integrated/test-stopCheck-file/CMakeLists.txt b/tests/integrated/test-stopCheck-file/CMakeLists.txt index 4c95afa447..93c9089260 100644 --- a/tests/integrated/test-stopCheck-file/CMakeLists.txt +++ b/tests/integrated/test-stopCheck-file/CMakeLists.txt @@ -1,4 +1,4 @@ -bout_add_integrated_test(test_stopCheck-file +bout_add_integrated_test(test-stopCheck-file SOURCES test_stopCheck.cxx EXECUTABLE_NAME test_stopCheck USE_RUNTEST diff --git a/tests/integrated/test-stopCheck/CMakeLists.txt b/tests/integrated/test-stopCheck/CMakeLists.txt index fc2b1c1f70..987b0be489 100644 --- a/tests/integrated/test-stopCheck/CMakeLists.txt +++ b/tests/integrated/test-stopCheck/CMakeLists.txt @@ -1,5 +1,6 @@ -bout_add_integrated_test(test_stopCheck +bout_add_integrated_test(test-stopCheck SOURCES test_stopCheck.cxx + EXECUTABLE_NAME test_stopCheck USE_RUNTEST USE_DATA_BOUT_INP ) From 514eed11d46d0294f9cddaaff90c9f5048a3c623 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Oct 2019 11:20:59 +0100 Subject: [PATCH 204/428] CMake: Name the default test executable like the source file This is consistent with the existing Makefile approach and removes the need for EXECUTABLE_NAME --- CMakeLists.txt | 17 +++++++++++++++-- .../integrated/test-command-args/CMakeLists.txt | 1 - tests/integrated/test-cyclic/CMakeLists.txt | 1 - tests/integrated/test-delp2/CMakeLists.txt | 1 - .../CMakeLists.txt | 1 - .../test-drift-instability/CMakeLists.txt | 1 - .../test-fieldgroupComm/CMakeLists.txt | 1 - .../CMakeLists.txt | 1 - tests/integrated/test-griddata/CMakeLists.txt | 1 - tests/integrated/test-gyro/CMakeLists.txt | 1 - tests/integrated/test-initial/CMakeLists.txt | 1 - .../test-interchange-instability/CMakeLists.txt | 1 - .../integrated/test-interpolate/CMakeLists.txt | 1 - .../test-invertable-operator/CMakeLists.txt | 1 - tests/integrated/test-invpar/CMakeLists.txt | 1 - tests/integrated/test-io/CMakeLists.txt | 1 - tests/integrated/test-io_hdf5/CMakeLists.txt | 1 - tests/integrated/test-laplace/CMakeLists.txt | 1 - .../test-multigrid_laplace/CMakeLists.txt | 1 - .../test-naulin-laplace/CMakeLists.txt | 1 - .../test-petsc_laplace/CMakeLists.txt | 1 - .../test-petsc_laplace_MAST-grid/CMakeLists.txt | 1 - .../test-restart-io_hdf5/CMakeLists.txt | 1 - tests/integrated/test-restarting/CMakeLists.txt | 1 - tests/integrated/test-smooth/CMakeLists.txt | 1 - tests/integrated/test-snb/CMakeLists.txt | 1 - tests/integrated/test-solver/CMakeLists.txt | 1 - .../test-stopCheck-file/CMakeLists.txt | 1 - tests/integrated/test-stopCheck/CMakeLists.txt | 1 - .../test-twistshift-staggered/CMakeLists.txt | 1 - tests/integrated/test-vec/CMakeLists.txt | 1 - tests/integrated/test-yupdown/CMakeLists.txt | 1 - 32 files changed, 15 insertions(+), 33 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9473ec3594..01df06c1be 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -629,7 +629,8 @@ macro(bout_test_copy_file FILENAME) COPYONLY) endmacro() -# Add a new integrated test +# Add a new integrated test. By default, the executable is named like +# the first source, stripped of its file extension. # # Required arguments: # @@ -648,7 +649,8 @@ endmacro() # # - REQUIRES: list of variables that must be truthy to enable test # -# - EXECUTABLE_NAME: name of the executable, if different from the test name +# - EXECUTABLE_NAME: name of the executable, if different from the +# first source name function(bout_add_integrated_test TESTNAME) set(options USE_RUNTEST USE_DATA_BOUT_INP) @@ -669,6 +671,17 @@ function(bout_add_integrated_test TESTNAME) if (BOUT_TEST_OPTIONS_EXECUTABLE_NAME) set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_OPTIONS_EXECUTABLE_NAME}) + else() + # If more than one source file, just get the first one + list(LENGTH ${BOUT_TEST_OPTIONS_SOURCES} BOUT_SOURCES_LENGTH) + if (BOUT_SOURCES_LENGTH GREATER 0) + list(GET ${BOUT_TEST_OPTIONS_SOURCES} 0 BOUT_TEST_FIRST_SOURCE) + else() + set(BOUT_TEST_FIRST_SOURCE ${BOUT_TEST_OPTIONS_SOURCES}) + endif() + # Strip the directory and file extension from the source file + get_filename_component(BOUT_TEST_EXECUTABLE_NAME ${BOUT_TEST_FIRST_SOURCE} NAME_WE) + set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_EXECUTABLE_NAME}) endif() if (BOUT_TEST_OPTIONS_USE_RUNTEST) diff --git a/tests/integrated/test-command-args/CMakeLists.txt b/tests/integrated/test-command-args/CMakeLists.txt index ea3392a277..1a2cf88a5e 100644 --- a/tests/integrated/test-command-args/CMakeLists.txt +++ b/tests/integrated/test-command-args/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-command-args SOURCES command-args.cxx - EXECUTABLE_NAME command-args USE_RUNTEST EXTRA_FILES BOUT.inp ) diff --git a/tests/integrated/test-cyclic/CMakeLists.txt b/tests/integrated/test-cyclic/CMakeLists.txt index 9083bb4f47..38c1825b45 100644 --- a/tests/integrated/test-cyclic/CMakeLists.txt +++ b/tests/integrated/test-cyclic/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-cyclic SOURCES test_cyclic.cxx - EXECUTABLE_NAME test_cylic USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES test_io.grd.nc diff --git a/tests/integrated/test-delp2/CMakeLists.txt b/tests/integrated/test-delp2/CMakeLists.txt index 6c3b98851f..46f10c9b0e 100644 --- a/tests/integrated/test-delp2/CMakeLists.txt +++ b/tests/integrated/test-delp2/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-delp2 SOURCES test_delp2.cxx - EXECUTABLE_NAME test_delp2 USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-drift-instability-staggered/CMakeLists.txt b/tests/integrated/test-drift-instability-staggered/CMakeLists.txt index c0ca56eb8e..340a5429e2 100644 --- a/tests/integrated/test-drift-instability-staggered/CMakeLists.txt +++ b/tests/integrated/test-drift-instability-staggered/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-drift-instability-staggered SOURCES 2fluid.cxx - EXECUTABLE_NAME 2fluid USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES runtest.py uedge.grd_std.cdl diff --git a/tests/integrated/test-drift-instability/CMakeLists.txt b/tests/integrated/test-drift-instability/CMakeLists.txt index 21020d9eac..31037226a6 100644 --- a/tests/integrated/test-drift-instability/CMakeLists.txt +++ b/tests/integrated/test-drift-instability/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-drift-instability SOURCES 2fluid.cxx - EXECUTABLE_NAME 2fluid USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES runtest.py uedge.grd_std.cdl diff --git a/tests/integrated/test-fieldgroupComm/CMakeLists.txt b/tests/integrated/test-fieldgroupComm/CMakeLists.txt index 4517eb210d..24b0924d30 100644 --- a/tests/integrated/test-fieldgroupComm/CMakeLists.txt +++ b/tests/integrated/test-fieldgroupComm/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-fieldgroupComm SOURCES test_fieldgroupcomm.cxx - EXECUTABLE_NAME test USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES cyclone_68x32.nc diff --git a/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt b/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt index a142b273fa..47d6f5dea4 100644 --- a/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt +++ b/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-griddata-yboundary-guards SOURCES test_griddata.cxx - EXECUTABLE_NAME test_griddata USE_RUNTEST EXTRA_FILES data-doublenull-0/BOUT.inp diff --git a/tests/integrated/test-griddata/CMakeLists.txt b/tests/integrated/test-griddata/CMakeLists.txt index f1759c5d72..8e03b5de0d 100644 --- a/tests/integrated/test-griddata/CMakeLists.txt +++ b/tests/integrated/test-griddata/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-griddata SOURCES test_griddata.cxx - EXECUTABLE_NAME test_griddata USE_RUNTEST EXTRA_FILES screw/BOUT.inp ) diff --git a/tests/integrated/test-gyro/CMakeLists.txt b/tests/integrated/test-gyro/CMakeLists.txt index 3db23c9ee9..94f74c5776 100644 --- a/tests/integrated/test-gyro/CMakeLists.txt +++ b/tests/integrated/test-gyro/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-gyro SOURCES test_gyro.cxx - EXECUTABLE_NAME test_gyro USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES cyclone_68x32.nc data/benchmark.0.nc diff --git a/tests/integrated/test-initial/CMakeLists.txt b/tests/integrated/test-initial/CMakeLists.txt index 2f192f6305..cf413d2d32 100644 --- a/tests/integrated/test-initial/CMakeLists.txt +++ b/tests/integrated/test-initial/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-initial SOURCES test_initial.cxx - EXECUTABLE_NAME test_initial USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-interchange-instability/CMakeLists.txt b/tests/integrated/test-interchange-instability/CMakeLists.txt index 5e3c41fcb2..971a70805a 100644 --- a/tests/integrated/test-interchange-instability/CMakeLists.txt +++ b/tests/integrated/test-interchange-instability/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-interchange-instability SOURCES 2fluid.cxx - EXECUTABLE_NAME 2fluid USE_RUNTEST EXTRA_FILES slab.6b5.r1.cdl slab.6b5.r10.cdl data_1/BOUT.inp data_10/BOUT.inp ) diff --git a/tests/integrated/test-interpolate/CMakeLists.txt b/tests/integrated/test-interpolate/CMakeLists.txt index 482db5996a..eb544d9c97 100644 --- a/tests/integrated/test-interpolate/CMakeLists.txt +++ b/tests/integrated/test-interpolate/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-interpolate SOURCES test_interpolate.cxx - EXECUTABLE_NAME test_interpolate USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-invertable-operator/CMakeLists.txt b/tests/integrated/test-invertable-operator/CMakeLists.txt index 2f4a5d2551..9931b2a9b5 100644 --- a/tests/integrated/test-invertable-operator/CMakeLists.txt +++ b/tests/integrated/test-invertable-operator/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-invertable-operator SOURCES invertable_operator.cxx - EXECUTABLE_NAME invertable_operator USE_RUNTEST USE_DATA_BOUT_INP REQUIRES BOUT_HAS_PETSC diff --git a/tests/integrated/test-invpar/CMakeLists.txt b/tests/integrated/test-invpar/CMakeLists.txt index 9f8a3b3c54..418709e7df 100644 --- a/tests/integrated/test-invpar/CMakeLists.txt +++ b/tests/integrated/test-invpar/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-invpar SOURCES test_invpar.cxx - EXECUTABLE_NAME test_invpar USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES test_io.grd.nc diff --git a/tests/integrated/test-io/CMakeLists.txt b/tests/integrated/test-io/CMakeLists.txt index 828ca4c934..dea5c7ee1b 100644 --- a/tests/integrated/test-io/CMakeLists.txt +++ b/tests/integrated/test-io/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-io SOURCES test_io.cxx - EXECUTABLE_NAME test_io USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES test_io.grd.nc data/benchmark.out.0.nc diff --git a/tests/integrated/test-io_hdf5/CMakeLists.txt b/tests/integrated/test-io_hdf5/CMakeLists.txt index ed5c31fb5b..25eb3a4e6f 100644 --- a/tests/integrated/test-io_hdf5/CMakeLists.txt +++ b/tests/integrated/test-io_hdf5/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-io-hdf5 SOURCES test_io_hdf5.cxx - EXECUTABLE_NAME test_io_hdf5 USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES test_io.grd.hdf5 data/benchmark.out.0.hdf5 diff --git a/tests/integrated/test-laplace/CMakeLists.txt b/tests/integrated/test-laplace/CMakeLists.txt index fa2ec7dd3d..488b0b7314 100644 --- a/tests/integrated/test-laplace/CMakeLists.txt +++ b/tests/integrated/test-laplace/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-laplace SOURCES test_laplace.cxx - EXECUTABLE_NAME test_laplace EXTRA_FILES test_laplace.grd.nc data/benchmark.0.nc USE_RUNTEST USE_DATA_BOUT_INP diff --git a/tests/integrated/test-multigrid_laplace/CMakeLists.txt b/tests/integrated/test-multigrid_laplace/CMakeLists.txt index e3a30d569e..482d91f38f 100644 --- a/tests/integrated/test-multigrid_laplace/CMakeLists.txt +++ b/tests/integrated/test-multigrid_laplace/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-multigrid-laplace SOURCES test_multigrid_laplace.cxx - EXECUTABLE_NAME test_multigrid_laplace USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-naulin-laplace/CMakeLists.txt b/tests/integrated/test-naulin-laplace/CMakeLists.txt index 22f75bf0b9..62e723e727 100644 --- a/tests/integrated/test-naulin-laplace/CMakeLists.txt +++ b/tests/integrated/test-naulin-laplace/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-naulin-laplace SOURCES test_naulin_laplace.cxx - EXECUTABLE_NAME test_naulin_laplace USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-petsc_laplace/CMakeLists.txt b/tests/integrated/test-petsc_laplace/CMakeLists.txt index 1a7934628b..6a4a670098 100644 --- a/tests/integrated/test-petsc_laplace/CMakeLists.txt +++ b/tests/integrated/test-petsc_laplace/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-petsc-laplace SOURCES test_petsc_laplace.cxx - EXECUTABLE_NAME test_petsc_laplace REQUIRES BOUT_HAS_PETSC USE_RUNTEST USE_DATA_BOUT_INP diff --git a/tests/integrated/test-petsc_laplace_MAST-grid/CMakeLists.txt b/tests/integrated/test-petsc_laplace_MAST-grid/CMakeLists.txt index 9287892580..625365d846 100644 --- a/tests/integrated/test-petsc_laplace_MAST-grid/CMakeLists.txt +++ b/tests/integrated/test-petsc_laplace_MAST-grid/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-petsc-laplace-MAST-grid SOURCES test_petsc_laplace_MAST_grid.cxx - EXECUTABLE_NAME test_petsc_laplace_MAST_grid REQUIRES BOUT_HAS_PETSC USE_RUNTEST USE_DATA_BOUT_INP diff --git a/tests/integrated/test-restart-io_hdf5/CMakeLists.txt b/tests/integrated/test-restart-io_hdf5/CMakeLists.txt index 2f1ab901b3..09cdf40f1d 100644 --- a/tests/integrated/test-restart-io_hdf5/CMakeLists.txt +++ b/tests/integrated/test-restart-io_hdf5/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-restart-io-hdf5 SOURCES test-restart-io.cxx - EXECUTABLE_NAME test-restart-io USE_RUNTEST USE_DATA_BOUT_INP REQUIRES BOUT_HAS_HDF5 diff --git a/tests/integrated/test-restarting/CMakeLists.txt b/tests/integrated/test-restarting/CMakeLists.txt index a9046f4daa..83cb9c808f 100644 --- a/tests/integrated/test-restarting/CMakeLists.txt +++ b/tests/integrated/test-restarting/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-restarting SOURCES test_restarting.cxx - EXECUTABLE_NAME test_restarting USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-smooth/CMakeLists.txt b/tests/integrated/test-smooth/CMakeLists.txt index 98f01fdb07..b7063beb38 100644 --- a/tests/integrated/test-smooth/CMakeLists.txt +++ b/tests/integrated/test-smooth/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-smooth SOURCES test_smooth.cxx - EXECUTABLE_NAME test_smooth USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES test_smooth.nc data/benchmark.0.nc diff --git a/tests/integrated/test-snb/CMakeLists.txt b/tests/integrated/test-snb/CMakeLists.txt index 51393e554a..be85526e05 100644 --- a/tests/integrated/test-snb/CMakeLists.txt +++ b/tests/integrated/test-snb/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-snb SOURCES test_snb.cxx - EXECUTABLE_NAME test_snb USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-solver/CMakeLists.txt b/tests/integrated/test-solver/CMakeLists.txt index 3cad7a5b13..f20e11f1eb 100644 --- a/tests/integrated/test-solver/CMakeLists.txt +++ b/tests/integrated/test-solver/CMakeLists.txt @@ -1,4 +1,3 @@ bout_add_integrated_test(test-solver SOURCES test_solver.cxx - EXECUTABLE_NAME test_solver ) diff --git a/tests/integrated/test-stopCheck-file/CMakeLists.txt b/tests/integrated/test-stopCheck-file/CMakeLists.txt index 93c9089260..60b9b23489 100644 --- a/tests/integrated/test-stopCheck-file/CMakeLists.txt +++ b/tests/integrated/test-stopCheck-file/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-stopCheck-file SOURCES test_stopCheck.cxx - EXECUTABLE_NAME test_stopCheck USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES diff --git a/tests/integrated/test-stopCheck/CMakeLists.txt b/tests/integrated/test-stopCheck/CMakeLists.txt index 987b0be489..93cf5fb67b 100644 --- a/tests/integrated/test-stopCheck/CMakeLists.txt +++ b/tests/integrated/test-stopCheck/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-stopCheck SOURCES test_stopCheck.cxx - EXECUTABLE_NAME test_stopCheck USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-twistshift-staggered/CMakeLists.txt b/tests/integrated/test-twistshift-staggered/CMakeLists.txt index f84c2eb9e7..9222f03a62 100644 --- a/tests/integrated/test-twistshift-staggered/CMakeLists.txt +++ b/tests/integrated/test-twistshift-staggered/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-twistshift-staggered SOURCES test-twistshift.cxx - EXECUTABLE_NAME test-twistshift USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-vec/CMakeLists.txt b/tests/integrated/test-vec/CMakeLists.txt index 719ff2dfe4..6e11dd2019 100644 --- a/tests/integrated/test-vec/CMakeLists.txt +++ b/tests/integrated/test-vec/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-vec SOURCES testVec.cxx - EXECUTABLE_NAME testVec USE_RUNTEST USE_DATA_BOUT_INP ) diff --git a/tests/integrated/test-yupdown/CMakeLists.txt b/tests/integrated/test-yupdown/CMakeLists.txt index 6f0c03b083..dbb7d14611 100644 --- a/tests/integrated/test-yupdown/CMakeLists.txt +++ b/tests/integrated/test-yupdown/CMakeLists.txt @@ -1,6 +1,5 @@ bout_add_integrated_test(test-yupdown SOURCES test_yupdown.cxx - EXECUTABLE_NAME test_yupdown USE_RUNTEST USE_DATA_BOUT_INP ) From 4ec0882ccf09f51cb0e4847d7806e418f38f48d1 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 5 Nov 2019 14:18:08 +0000 Subject: [PATCH 205/428] Don't delete test executables --- tests/integrated/test-initial/runtest | 1 - tests/integrated/test-laplace/runtest | 1 - tests/integrated/test-multigrid_laplace/runtest | 1 - tests/integrated/test-multigrid_laplace/runtest_multiple_grids | 1 - tests/integrated/test-multigrid_laplace/runtest_unsheared | 1 - 5 files changed, 5 deletions(-) diff --git a/tests/integrated/test-initial/runtest b/tests/integrated/test-initial/runtest index 47256944b3..4ccec13c8c 100755 --- a/tests/integrated/test-initial/runtest +++ b/tests/integrated/test-initial/runtest @@ -171,7 +171,6 @@ for coord in ["var_x", "var_y", "var_z"]: varlist.remove(coord) print("Making initial conditions test") -shell_safe("make clean") shell_safe("make > make.log") nprocs = [1, 2, 3, 4] diff --git a/tests/integrated/test-laplace/runtest b/tests/integrated/test-laplace/runtest index 8b641ac4e2..88095a1e1c 100755 --- a/tests/integrated/test-laplace/runtest +++ b/tests/integrated/test-laplace/runtest @@ -25,7 +25,6 @@ from sys import stdout, exit print("Making Laplacian inversion test") -shell("rm test_laplace") shell_safe("make > make.log") # Read benchmark values diff --git a/tests/integrated/test-multigrid_laplace/runtest b/tests/integrated/test-multigrid_laplace/runtest index 3c0b0e19f6..8c2949a7a8 100755 --- a/tests/integrated/test-multigrid_laplace/runtest +++ b/tests/integrated/test-multigrid_laplace/runtest @@ -20,7 +20,6 @@ from sys import exit print("Making multigrid Laplacian inversion test") -shell("rm test_multigrid_laplace") shell_safe("make > make.log") print("Running multigrid Laplacian inversion test") diff --git a/tests/integrated/test-multigrid_laplace/runtest_multiple_grids b/tests/integrated/test-multigrid_laplace/runtest_multiple_grids index 0fa04ac26a..f778718bbb 100755 --- a/tests/integrated/test-multigrid_laplace/runtest_multiple_grids +++ b/tests/integrated/test-multigrid_laplace/runtest_multiple_grids @@ -20,7 +20,6 @@ from sys import exit print("Making multigrid Laplacian inversion test") -shell("rm test_multigrid_laplace") shell_safe("make > make.log") print("Running multigrid Laplacian inversion test") diff --git a/tests/integrated/test-multigrid_laplace/runtest_unsheared b/tests/integrated/test-multigrid_laplace/runtest_unsheared index cd99d6d9d9..2c1c10553b 100755 --- a/tests/integrated/test-multigrid_laplace/runtest_unsheared +++ b/tests/integrated/test-multigrid_laplace/runtest_unsheared @@ -20,7 +20,6 @@ from sys import exit print("Making multigrid Laplacian inversion test") -shell("rm test_multigrid_laplace") shell_safe("make > make.log") print("Running multigrid Laplacian inversion test") From 643b99479b97b3f745fb3daa75cd00ad5cde4f31 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 5 Nov 2019 14:18:36 +0000 Subject: [PATCH 206/428] Use default executable name for fieldgroupcomm test --- tests/integrated/test-fieldgroupComm/makefile | 2 -- tests/integrated/test-fieldgroupComm/runtest | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/integrated/test-fieldgroupComm/makefile b/tests/integrated/test-fieldgroupComm/makefile index be91057e94..f3215eccde 100644 --- a/tests/integrated/test-fieldgroupComm/makefile +++ b/tests/integrated/test-fieldgroupComm/makefile @@ -1,5 +1,3 @@ BOUT_TOP = ../../../ SOURCEC = test_fieldgroupcomm.cxx -TARGET = test include $(BOUT_TOP)/make.config -EXTRA_INCS += -I${CURDIR} diff --git a/tests/integrated/test-fieldgroupComm/runtest b/tests/integrated/test-fieldgroupComm/runtest index b1e75767eb..389474afae 100755 --- a/tests/integrated/test-fieldgroupComm/runtest +++ b/tests/integrated/test-fieldgroupComm/runtest @@ -25,7 +25,7 @@ seterr(divide='ignore', invalid='ignore') varCorrect="fld1" varsComp = ["fld2", "fld3"] name = "FieldGroup comm" -exeName = "test" +exeName = "test_fieldgroupcomm" tol = 1e-10 # Relative tolerance From 358071b15106cb0c2858dca98227bc7600a6b9a8 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 5 Nov 2019 14:19:10 +0000 Subject: [PATCH 207/428] CMake: Add all "check" targets, remove tests from `make all` --- CMakeLists.txt | 41 +++++++++++++++++++++++++++++++++++++-- tests/unit/CMakeLists.txt | 1 + 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 01df06c1be..2b1d779a1a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -621,6 +621,9 @@ string(CONCAT BOUT_FLAGS_STRING target_compile_definitions(bout++ PRIVATE BOUT_FLAGS_STRING=${BOUT_FLAGS_STRING}) +################################################## +# Tests + # Copy FILENAME from source directory to build directory macro(bout_test_copy_file FILENAME) configure_file( @@ -669,6 +672,8 @@ function(bout_add_integrated_test TESTNAME) target_link_libraries(${TESTNAME} bout++) target_include_directories(${TESTNAME} PRIVATE $) + # Set the name of the executable. We either take it as an option, + # or use the first source file, stripping the file suffix if (BOUT_TEST_OPTIONS_EXECUTABLE_NAME) set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_OPTIONS_EXECUTABLE_NAME}) else() @@ -684,28 +689,60 @@ function(bout_add_integrated_test TESTNAME) set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_EXECUTABLE_NAME}) endif() + # Set the actual test command if (BOUT_TEST_OPTIONS_USE_RUNTEST) add_test(NAME ${TESTNAME} COMMAND ./runtest) bout_test_copy_file(runtest) else() add_test(NAME ${TESTNAME} COMMAND ${TESTNAME}) endif() + + # Copy the input file if needed if (BOUT_TEST_OPTIONS_USE_DATA_BOUT_INP) bout_test_copy_file(data/BOUT.inp) endif() + + # Copy any other needed files if (BOUT_TEST_OPTIONS_EXTRA_FILES) foreach (FILE ${BOUT_TEST_OPTIONS_EXTRA_FILES}) bout_test_copy_file("${FILE}") endforeach() endif() + set_target_properties(${TESTNAME} PROPERTIES FOLDER tests/integrated) + + # Add the test to the build-check-integrated-tests target + add_dependencies(build-check-integrated-tests ${TESTNAME}) endfunction() option(PACKAGE_TESTS "Build the tests" ON) if(PACKAGE_TESTS) enable_testing() - add_subdirectory(tests/unit) - add_subdirectory(tests/integrated) + + # Targets for just building the tests + # Tests need to add themselves as dependencies to these targets + add_custom_target(build-check-unit-tests) + add_custom_target(build-check-integrated-tests) + + # Build all the tests + add_custom_target(build-check) + add_dependencies(build-check build-check-unit-tests build-check-integrated-tests) + + add_subdirectory(tests/unit EXCLUDE_FROM_ALL) + add_subdirectory(tests/integrated EXCLUDE_FROM_ALL) + + # Targets for running the tests + add_custom_target(check-unit-tests + COMMAND ctest -R serial_tests --output-on-failure) + add_dependencies(check-unit-tests build-check-unit-tests) + + add_custom_target(check-integrated-tests + COMMAND ctest -R "test-" --output-on-failure) + add_dependencies(check-integrated-tests build-check-integrated-tests) + + # Run all the tests + add_custom_target(check) + add_dependencies(check check-unit-tests check-integrated-tests) endif() ################################################## diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index fce0a1a124..2bee9a2f53 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -66,3 +66,4 @@ target_include_directories(serial_tests PUBLIC .) target_link_libraries(serial_tests gtest gmock bout++::bout++) add_test(NAME serial_tests COMMAND serial_tests) set_target_properties(serial_tests PROPERTIES FOLDER tests/unit) +add_dependencies(build-check-unit-tests serial_tests) From 9b59cdc20a0ee4d4197e3dd5edf9133e96610fe1 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 5 Nov 2019 17:24:32 +0000 Subject: [PATCH 208/428] Move test input file into data directory Previously changed test in fab73286 to not copy+sed input file, forgot that the input file in data/ wasn't in repo --- tests/integrated/test-drift-instability/{ => data}/BOUT.inp | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/integrated/test-drift-instability/{ => data}/BOUT.inp (100%) diff --git a/tests/integrated/test-drift-instability/BOUT.inp b/tests/integrated/test-drift-instability/data/BOUT.inp similarity index 100% rename from tests/integrated/test-drift-instability/BOUT.inp rename to tests/integrated/test-drift-instability/data/BOUT.inp From 3f2afde452057a154dcd33f25e0ede89284ada65 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 6 Nov 2019 10:54:43 +0000 Subject: [PATCH 209/428] CMake: Handle empty CMAKE_BUILD_TYPE --- CMakeLists.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2b1d779a1a..5818ccc541 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -605,7 +605,11 @@ string(REPLACE ";" " " BOUT_COMPILE_OPTIONS "${BOUT_COMPILE_OPTIONS}") # build type actually being used. Note: this might behave weirdly # on Windows. Might need to expand CMAKE_CONFIGURATION_TYPES # instead? -string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER) +if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) + set(CMAKE_BUILD_TYPE_UPPER "DEBUG") +else() + string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER) +endif() string(CONCAT BOUT_COMPILE_BUILD_FLAGS " " "${CMAKE_CXX_FLAGS}" From 22dda77885cde59f56806a0be2de4293776c089c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 5 Jul 2019 17:42:42 +0100 Subject: [PATCH 210/428] Remove duplicated DEPRECATED --- include/difops.hxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/difops.hxx b/include/difops.hxx index 86959f773f..9692cc6f3d 100644 --- a/include/difops.hxx +++ b/include/difops.hxx @@ -73,8 +73,8 @@ inline const DEPRECATED(Field3D Grad_par(const Field3D& var, CELL_LOC outloc, DIFF_METHOD method)) { return Grad_par(var, outloc, toString(method)); }; -DEPRECATED(inline const DEPRECATED( - Field3D Grad_par(const Field3D& var, DIFF_METHOD method, CELL_LOC outloc))) { +DEPRECATED(inline const + Field3D Grad_par(const Field3D& var, DIFF_METHOD method, CELL_LOC outloc)) { return Grad_par(var, outloc, toString(method)); }; From 9c394f3370cb79a680f0052f024613c7ffa16f73 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 2 Oct 2019 13:38:27 +0100 Subject: [PATCH 211/428] Workaround for bug in MSVC: explicitly name type of std::bind return --- include/bout/index_derivs.hxx | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/include/bout/index_derivs.hxx b/include/bout/index_derivs.hxx index 612c5b3b48..2ce2d40a29 100644 --- a/include/bout/index_derivs.hxx +++ b/include/bout/index_derivs.hxx @@ -176,7 +176,9 @@ struct registerMethod { case (DERIV::Upwind): case (DERIV::Flux): { if (nGuards == 1) { - const auto theFunc = std::bind( + const std::function + theFunc = std::bind( // Method to store in function &Method::template upwindOrFlux, @@ -185,7 +187,9 @@ struct registerMethod { method, _1, _2, _3, _4); derivativeRegister.registerDerivative(theFunc, Direction{}, Stagger{}, method); } else { - const auto theFunc = std::bind( + const std::function + theFunc = std::bind( // Method to store in function &Method::template upwindOrFlux, From f6e9d5f759d5de44c79ab9e1ccef7a24ebeaaadf Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 2 Oct 2019 14:13:01 +0100 Subject: [PATCH 212/428] Fix for undefined behaviour with isspace Argument must be representable as `unsigned char`, so must `static_cast` argument --- src/sys/expressionparser.cxx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/sys/expressionparser.cxx b/src/sys/expressionparser.cxx index f76e1df0c0..e3bcdf8279 100644 --- a/src/sys/expressionparser.cxx +++ b/src/sys/expressionparser.cxx @@ -304,7 +304,7 @@ ExpressionParser::LexInfo::LexInfo(const std::string& input, std::string reserve } char ExpressionParser::LexInfo::nextToken() { - while (isspace(LastChar)) + while (isspace(static_cast(LastChar))) LastChar = static_cast(ss.get()); if (!ss.good()) { @@ -313,7 +313,7 @@ char ExpressionParser::LexInfo::nextToken() { } // Handle numbers - if (isdigit(LastChar) || (LastChar == '.')) { // Number: [0-9.]+ + if (isdigit(static_cast(LastChar)) || (LastChar == '.')) { // Number: [0-9.]+ bool gotdecimal = false, gotexponent = false; std::string NumStr; @@ -332,11 +332,11 @@ char ExpressionParser::LexInfo::nextToken() { // Next character should be a '+' or '-' or digit NumStr += 'e'; LastChar = static_cast(ss.get()); - if ((LastChar != '+') && (LastChar != '-') && !isdigit(LastChar)) { + if ((LastChar != '+') && (LastChar != '-') && !isdigit(static_cast(LastChar))) { throw ParseException( "ExpressionParser error: Expecting '+', '-' or number after 'e'"); } - } else if (!isdigit(LastChar)) + } else if (!isdigit(static_cast(LastChar))) break; NumStr += LastChar; @@ -388,7 +388,7 @@ char ExpressionParser::LexInfo::nextToken() { curident += LastChar; } LastChar = static_cast(ss.get()); - } while ((LastChar != EOF && !isspace(LastChar) + } while ((LastChar != EOF && !isspace(static_cast(LastChar)) && (reserved_chars.find(LastChar) == std::string::npos)) || (LastChar == '\\') || (LastChar == '`')); curtok = -2; From 6fff65bf3e673f5dcd67ddffa394a804526a6df0 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 2 Oct 2019 14:45:48 +0100 Subject: [PATCH 213/428] Add definitions of UNUSED and MAYBE_UNUSED for MSVC --- include/unused.hxx | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/unused.hxx b/include/unused.hxx index c3d6f63328..eab3562552 100644 --- a/include/unused.hxx +++ b/include/unused.hxx @@ -27,6 +27,8 @@ /// void someFunction(int UNUSED(x)) {}; #if defined(__GNUC__) # define UNUSED(x) UNUSED_ ## x __attribute__((unused)) +#elif defined(_MSC_VER) +#define UNUSED(x) __pragma(warning(suppress : 4100)) UNUSED_ ## x #elif defined(__LCLINT__) # define UNUSED(x) /*@unused@*/ x #elif defined(__cplusplus) @@ -48,6 +50,8 @@ #ifndef MAYBE_UNUSED #if defined(__GNUC__) # define MAYBE_UNUSED(x) [[gnu::unused]] x +#elif defined(_MSC_VER) +# define MAYBE_UNUSED(x) __pragma(warning(suppress : 4100)) x #else # define MAYBE_UNUSED(x) x #endif From b6965c279eae553ca42e0119eb3b615f03eb1909 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 2 Oct 2019 14:49:22 +0100 Subject: [PATCH 214/428] Use fully resolved name for fft functions Workaround for name resolution bug in MSVC --- src/invert/fft_fftw.cxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/invert/fft_fftw.cxx b/src/invert/fft_fftw.cxx index f0ecefa8ea..e60eacc15f 100644 --- a/src/invert/fft_fftw.cxx +++ b/src/invert/fft_fftw.cxx @@ -493,7 +493,7 @@ Array rfft(const Array& in) { int size{in.size()}; Array out{(size / 2) + 1}; - rfft(in.begin(), size, out.begin()); + bout::fft::rfft(in.begin(), size, out.begin()); return out; } @@ -503,7 +503,7 @@ Array irfft(const Array& in, int length) { Array out{length}; - irfft(in.begin(), length, out.begin()); + bout::fft::irfft(in.begin(), length, out.begin()); return out; } From 894b275901d412fb9e69e4f752f0a44f880db97a Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 2 Oct 2019 14:50:19 +0100 Subject: [PATCH 215/428] Ignore various Windows/Visual Studio files --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index e23b756987..e6a6914573 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,6 @@ src/.libfast .BOUT.pid.* build/ build*/ +/.vs +/CMakeSettings.json +/CppProperties.json From 5eee2fd145a7e862332b547973cc92b09a81b1f0 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 2 Oct 2019 14:51:01 +0100 Subject: [PATCH 216/428] Fix non-guarded find_package for NLS --- CMakeLists.txt | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5818ccc541..a89b8d4cfe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -531,19 +531,21 @@ message(STATUS "SUNDIALS support: ${USE_SUNDIALS}") set(BOUT_HAS_SUNDIALS ${USE_SUNDIALS}) option(USE_NLS "Enable Native Language Support" ON) -find_package(Gettext) -if (GETTEXT_FOUND) - target_compile_definitions(bout++ - PUBLIC "BOUT_HAS_GETTEXT") - find_package(Intl) - if (Intl_FOUND) - target_link_libraries(bout++ - PUBLIC ${Intl_LIBRARIES}) - target_include_directories(bout++ - PUBLIC ${Intl_INCLUDE_DIRS}) +if (USE_NLS) + find_package(Gettext) + if (GETTEXT_FOUND) + target_compile_definitions(bout++ + PUBLIC "BOUT_HAS_GETTEXT") + find_package(Intl) + if (Intl_FOUND) + target_link_libraries(bout++ + PUBLIC ${Intl_LIBRARIES}) + target_include_directories(bout++ + PUBLIC ${Intl_INCLUDE_DIRS}) + endif() endif() endif() -set(BOUT_HAS_GETTEXT ${GETTEXT_FOUND}) +set(BOUT_HAS_GETTEXT ${USE_NLS}) option(USE_SCOREP "Enable support for Score-P based instrumentation" OFF) if (USE_SCOREP) From da2e1a49313c9783e4eee263aebe6ce4ad48ebab Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 2 Oct 2019 15:30:21 +0100 Subject: [PATCH 217/428] Add windows headers and shims for BOUT++ initialisation --- src/bout++.cxx | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/bout++.cxx b/src/bout++.cxx index ff117907fd..3dfad83924 100644 --- a/src/bout++.cxx +++ b/src/bout++.cxx @@ -61,9 +61,21 @@ const char DEFAULT_DIR[] = "data"; #include #include -// POSIX headers #include + +// Define S_ISDIR if not defined by system headers (that is, MSVC) +// Taken from https://github.com/curl/curl/blob/e59540139a398dc70fde6aec487b19c5085105af/lib/curl_setup.h#L748-L751 +#if !defined(S_ISDIR) && defined(S_IFMT) && defined(S_IFDIR) +#define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR) +#endif + +#ifdef _MSC_VER +#include +inline auto getpid() -> int { return GetCurrentProcessId(); } +#else +// POSIX headers #include +#endif #ifdef BOUT_FPE #include @@ -181,8 +193,10 @@ void setupSignalHandler(SignalHandler signal_handler) { feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW); #endif +#ifndef _MSC_VER /// Trap SIGUSR1 to allow a clean exit after next write std::signal(SIGUSR1, signal_handler); +#endif } // This is currently just an alias to the existing handler @@ -785,12 +799,14 @@ void bout_signal_handler(int sig) { case SIGINT: throw BoutException("\n****** SigInt caught ******\n\n"); break; +#ifndef _MSC_VER case SIGKILL: throw BoutException("\n****** SigKill caught ******\n\n"); break; case SIGUSR1: user_requested_exit = true; break; +#endif default: throw BoutException("\n****** Signal %d caught ******\n\n", sig); break; From b234f2ecc7fab9f49c9ae4fad733beaf27fb8de7 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 2 Oct 2019 16:18:59 +0100 Subject: [PATCH 218/428] Provide shim for non-standard finite function This is a BSD extension in GNU's libc --- include/utils.hxx | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/utils.hxx b/include/utils.hxx index 735f422ca3..451a1faf11 100644 --- a/include/utils.hxx +++ b/include/utils.hxx @@ -45,6 +45,11 @@ #include #include +#ifdef _MSC_VER +// finite is not actually standard C++, it's a BSD extention for C +inline auto finite(BoutReal x) -> bool { return std::isfinite(x); } +#endif + namespace bout { namespace utils { #ifndef __cpp_lib_make_unique From 31d3e98c9d4a922fe647978e56731c559820797d Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 2 Oct 2019 16:42:41 +0100 Subject: [PATCH 219/428] Add shim for POSIX strcasecmp on MSVC Also conditionally include strings.h --- include/utils.hxx | 1 + 1 file changed, 1 insertion(+) diff --git a/include/utils.hxx b/include/utils.hxx index 451a1faf11..fab1f668b4 100644 --- a/include/utils.hxx +++ b/include/utils.hxx @@ -48,6 +48,7 @@ #ifdef _MSC_VER // finite is not actually standard C++, it's a BSD extention for C inline auto finite(BoutReal x) -> bool { return std::isfinite(x); } +inline auto strcasecmp(const char* s1, const char* s2) -> int { return _stricmp(s1, s2); } #endif namespace bout { From a4dcdc470415009ece0c28f4ddcf100207ffd45a Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 8 Oct 2019 09:58:38 +0100 Subject: [PATCH 220/428] Silence some warnings about unused variables from exceptions Also make sure to catch them by const-ref --- src/fileio/datafile.cxx | 6 +++--- src/invert/laplace/invert_laplace.cxx | 4 ++-- src/mesh/parallel_boundary_op.cxx | 8 ++++---- src/solver/solver.cxx | 2 +- src/sys/options.cxx | 4 ++-- src/sys/optionsreader.cxx | 4 ++-- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index 240861e536..9c8f3593c7 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -1682,7 +1682,7 @@ void Datafile::setAttribute(const std::string &varname, const std::string &attrn bool Datafile::read_f2d(const std::string &name, Field2D *f, bool save_repeat) { try { file->readFieldAttributes(name, *f); - } catch (const BoutException &e) { + } catch (const BoutException&) { if (init_missing) { output_warn.write("\tWARNING: Could not read 2D field %s attributes.\n", name.c_str()); } else { @@ -1720,7 +1720,7 @@ bool Datafile::read_f2d(const std::string &name, Field2D *f, bool save_repeat) { bool Datafile::read_f3d(const std::string &name, Field3D *f, bool save_repeat) { try { file->readFieldAttributes(name, *f); - } catch (const BoutException &e) { + } catch (const BoutException&) { if (init_missing) { output_warn.write("\tWARNING: Could not read 3D field %s attributes.\n", name.c_str()); } else { @@ -1764,7 +1764,7 @@ bool Datafile::read_f3d(const std::string &name, Field3D *f, bool save_repeat) { bool Datafile::read_fperp(const std::string &name, FieldPerp *f, bool save_repeat) { try { file->readFieldAttributes(name, *f); - } catch (const BoutException &e) { + } catch (const BoutException&) { if (init_missing) { output_warn.write("\tWARNING: Could not read FieldPerp %s attributes.\n", name.c_str()); } else { diff --git a/src/invert/laplace/invert_laplace.cxx b/src/invert/laplace/invert_laplace.cxx index 04046b263c..cb3760f337 100644 --- a/src/invert/laplace/invert_laplace.cxx +++ b/src/invert/laplace/invert_laplace.cxx @@ -171,7 +171,7 @@ Field3D Laplacian::solve(const Field3D& b) { // 2. Send it to the solver of the implementation (determined during creation) x = solve(sliceXZ(b,jy)); } - } catch (BoutIterationFail &itfail) { + } catch (const BoutIterationFail&) { status = 1; } BoutParallelThrowRhsFail(status, "Laplacian inversion took too many iterations."); @@ -223,7 +223,7 @@ Field3D Laplacian::solve(const Field3D& b, const Field3D& x0) { // 2. Send them to the solver of the implementation (determined during creation) x = solve(sliceXZ(b,jy), sliceXZ(x0,jy)); } - } catch (BoutIterationFail &itfail) { + } catch (const BoutIterationFail&) { status = 1; } BoutParallelThrowRhsFail(status, "Laplacian inversion took too many iterations."); diff --git a/src/mesh/parallel_boundary_op.cxx b/src/mesh/parallel_boundary_op.cxx index 4e62709483..791a54a53d 100644 --- a/src/mesh/parallel_boundary_op.cxx +++ b/src/mesh/parallel_boundary_op.cxx @@ -72,7 +72,7 @@ BoundaryOpPar* BoundaryOpPar_dirichlet::clone(BoundaryRegionPar *region, const s try { real_value = stringToReal(args.front()); return new BoundaryOpPar_dirichlet(region, real_value); - } catch (BoutException& e) { + } catch (const BoutException&) { std::shared_ptr newgen = nullptr; // First argument should be an expression newgen = FieldFactory::get()->parse(args.front()); @@ -117,7 +117,7 @@ BoundaryOpPar* BoundaryOpPar_dirichlet_O3::clone(BoundaryRegionPar *region, cons try { real_value = stringToReal(args.front()); return new BoundaryOpPar_dirichlet_O3(region, real_value); - } catch (BoutException& e) { + } catch (const BoutException&) { std::shared_ptr newgen = nullptr; // First argument should be an expression newgen = FieldFactory::get()->parse(args.front()); @@ -169,7 +169,7 @@ BoundaryOpPar* BoundaryOpPar_dirichlet_interp::clone(BoundaryRegionPar *region, try { real_value = stringToReal(args.front()); return new BoundaryOpPar_dirichlet_interp(region, real_value); - } catch (BoutException& e) { + } catch (const BoutException&) { std::shared_ptr newgen = nullptr; // First argument should be an expression newgen = FieldFactory::get()->parse(args.front()); @@ -218,7 +218,7 @@ BoundaryOpPar* BoundaryOpPar_neumann::clone(BoundaryRegionPar *region, const std try { real_value = stringToReal(args.front()); return new BoundaryOpPar_neumann(region, real_value); - } catch (BoutException& e) { + } catch (const BoutException&) { std::shared_ptr newgen = nullptr; // First argument should be an expression newgen = FieldFactory::get()->parse(args.front()); diff --git a/src/solver/solver.cxx b/src/solver/solver.cxx index 93d189ccad..8d035ee2de 100644 --- a/src/solver/solver.cxx +++ b/src/solver/solver.cxx @@ -773,7 +773,7 @@ int Solver::call_monitors(BoutReal simtime, int iter, int NOUT) { throw BoutException(_("Monitor signalled to quit")); } } - } catch (BoutException& e) { + } catch (const BoutException&) { for (const auto& it : monitors) { it->cleanup(); } diff --git a/src/sys/options.cxx b/src/sys/options.cxx index fb5a89500e..f93183c372 100644 --- a/src/sys/options.cxx +++ b/src/sys/options.cxx @@ -370,7 +370,7 @@ template <> Field3D Options::as(const Field3D& similar_to) const { // Get metadata from similar_to, fill field with scalar_value return filledFrom(similar_to, scalar_value); - } catch (const std::bad_cast &e) { + } catch (const std::bad_cast&) { // Convert from a string using FieldFactory if (bout::utils::holds_alternative(value)) { @@ -425,7 +425,7 @@ template <> Field2D Options::as(const Field2D& similar_to) const { // Get metadata from similar_to, fill field with scalar_value return filledFrom(similar_to, scalar_value); - } catch (const std::bad_cast &e) { + } catch (const std::bad_cast&) { // Convert from a string using FieldFactory if (bout::utils::holds_alternative(value)) { diff --git a/src/sys/optionsreader.cxx b/src/sys/optionsreader.cxx index c60fc2b2f6..c2c6d21e8b 100644 --- a/src/sys/optionsreader.cxx +++ b/src/sys/optionsreader.cxx @@ -38,7 +38,7 @@ void OptionsReader::read(Options *options, const char *file, ...) { try { parser->read(options, filename); - } catch (BoutException &e) { + } catch (const BoutException&) { delete[] filename; delete parser; throw; @@ -64,7 +64,7 @@ void OptionsReader::write(Options *options, const char *file, ...) { try { parser->write(options, filename); - } catch (BoutException &e) { + } catch (const BoutException&) { delete[] filename; delete parser; throw; From 71842c326718e082d60560de5b7ce02ec445adaf Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 8 Oct 2019 10:36:02 +0100 Subject: [PATCH 221/428] Silence some conversion warnings with explicit cast --- include/bout/fieldgroup.hxx | 4 ++-- include/bout/solver.hxx | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/bout/fieldgroup.hxx b/include/bout/fieldgroup.hxx index d0d5130286..e5ae52de82 100644 --- a/include/bout/fieldgroup.hxx +++ b/include/bout/fieldgroup.hxx @@ -153,12 +153,12 @@ public: /// Return number of fields int size() const { - return fvec.size(); + return static_cast(fvec.size()); } /// Return number of Field3Ds int size_field3d() const { - return f3vec.size(); + return static_cast(f3vec.size()); } /// Test whether this group is empty diff --git a/include/bout/solver.hxx b/include/bout/solver.hxx index aa9e0c21e3..151fc670f4 100644 --- a/include/bout/solver.hxx +++ b/include/bout/solver.hxx @@ -281,9 +281,9 @@ public: // Solver status. Optional functions used to query the solver /// Number of 2D variables. Vectors count as 3 - virtual int n2Dvars() const { return f2d.size(); } + virtual int n2Dvars() const { return static_cast(f2d.size()); } /// Number of 3D variables. Vectors count as 3 - virtual int n3Dvars() const { return f3d.size(); } + virtual int n3Dvars() const { return static_cast(f3d.size()); } /// Get and reset the number of calls to the RHS function int resetRHSCounter(); From d5e6debafac7292d6755fab8058b787231ada705 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 8 Oct 2019 12:36:49 +0100 Subject: [PATCH 222/428] Rename function parameter to avoid shadowing member variable --- include/field2d.hxx | 4 ++-- include/field3d.hxx | 4 ++-- include/fieldperp.hxx | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/field2d.hxx b/include/field2d.hxx index 1d4176029d..f90a2be5fe 100644 --- a/include/field2d.hxx +++ b/include/field2d.hxx @@ -120,8 +120,8 @@ class Field2D : public Field, public FieldData { int getNz() const override {return 1;}; // these methods return Field2D to allow method chaining - Field2D& setLocation(CELL_LOC location) { - Field::setLocation(location); + Field2D& setLocation(CELL_LOC new_location) { + Field::setLocation(new_location); return *this; } Field2D& setDirectionY(YDirectionType d) { diff --git a/include/field3d.hxx b/include/field3d.hxx index 273ed9b329..75fe892bed 100644 --- a/include/field3d.hxx +++ b/include/field3d.hxx @@ -227,8 +227,8 @@ class Field3D : public Field, public FieldData { int getNz() const override {return nz;}; // these methods return Field3D to allow method chaining - Field3D& setLocation(CELL_LOC location) { - Field::setLocation(location); + Field3D& setLocation(CELL_LOC new_location) { + Field::setLocation(new_location); return *this; } Field3D& setDirectionY(YDirectionType d) { diff --git a/include/fieldperp.hxx b/include/fieldperp.hxx index 8d0cc3ee27..45ff4edb63 100644 --- a/include/fieldperp.hxx +++ b/include/fieldperp.hxx @@ -123,8 +123,8 @@ class FieldPerp : public Field { } // these methods return FieldPerp to allow method chaining - FieldPerp& setLocation(CELL_LOC location) { - Field::setLocation(location); + FieldPerp& setLocation(CELL_LOC new_location) { + Field::setLocation(new_location); return *this; } FieldPerp& setDirectionY(YDirectionType d) { From 1e5912ab05d2cd1076acb872c12dc05f2b7b420b Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 16 Dec 2019 10:41:53 +0000 Subject: [PATCH 223/428] Fix MACRO_FOR_EACH for MSVC using intermediate expansion --- include/bout/macro_for_each.hxx | 46 +++++++++++++++++---------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/include/bout/macro_for_each.hxx b/include/bout/macro_for_each.hxx index cf9660131c..6d140880c8 100644 --- a/include/bout/macro_for_each.hxx +++ b/include/bout/macro_for_each.hxx @@ -13,30 +13,32 @@ // https://github.com/cormacc/va_args_iterators // +/// Intermediate expansion needed for MSVC due to non-compliant preprocessor +#define BOUT_EXPAND(x) x /// _me_x set of macros expand a number of arguments without ';' between them #define _me_1(_call, x) _call(x) -#define _me_2(_call, x, ...) _call(x) _me_1(_call, __VA_ARGS__) -#define _me_3(_call, x, ...) _call(x) _me_2(_call, __VA_ARGS__) -#define _me_4(_call, x, ...) _call(x) _me_3(_call, __VA_ARGS__) -#define _me_5(_call, x, ...) _call(x) _me_4(_call, __VA_ARGS__) -#define _me_6(_call, x, ...) _call(x) _me_5(_call, __VA_ARGS__) -#define _me_7(_call, x, ...) _call(x) _me_6(_call, __VA_ARGS__) -#define _me_8(_call, x, ...) _call(x) _me_7(_call, __VA_ARGS__) -#define _me_9(_call, x, ...) _call(x) _me_8(_call, __VA_ARGS__) -#define _me_10(_call, x, ...) _call(x) _me_9(_call, __VA_ARGS__) +#define _me_2(_call, x, ...) _call(x) BOUT_EXPAND(_me_1(_call, __VA_ARGS__)) +#define _me_3(_call, x, ...) _call(x) BOUT_EXPAND(_me_2(_call, __VA_ARGS__)) +#define _me_4(_call, x, ...) _call(x) BOUT_EXPAND(_me_3(_call, __VA_ARGS__)) +#define _me_5(_call, x, ...) _call(x) BOUT_EXPAND(_me_4(_call, __VA_ARGS__)) +#define _me_6(_call, x, ...) _call(x) BOUT_EXPAND(_me_5(_call, __VA_ARGS__)) +#define _me_7(_call, x, ...) _call(x) BOUT_EXPAND(_me_6(_call, __VA_ARGS__)) +#define _me_8(_call, x, ...) _call(x) BOUT_EXPAND(_me_7(_call, __VA_ARGS__)) +#define _me_9(_call, x, ...) _call(x) BOUT_EXPAND(_me_8(_call, __VA_ARGS__)) +#define _me_10(_call, x, ...) _call(x) BOUT_EXPAND(_me_9(_call, __VA_ARGS__)) /// _fe_x set of macros expand a number of arguments with ';' between them #define _fe_1(_call, x) _call(x); -#define _fe_2(_call, x, ...) _call(x); _fe_1(_call, __VA_ARGS__) -#define _fe_3(_call, x, ...) _call(x); _fe_2(_call, __VA_ARGS__) -#define _fe_4(_call, x, ...) _call(x); _fe_3(_call, __VA_ARGS__) -#define _fe_5(_call, x, ...) _call(x); _fe_4(_call, __VA_ARGS__) -#define _fe_6(_call, x, ...) _call(x); _fe_5(_call, __VA_ARGS__) -#define _fe_7(_call, x, ...) _call(x); _fe_6(_call, __VA_ARGS__) -#define _fe_8(_call, x, ...) _call(x); _fe_7(_call, __VA_ARGS__) -#define _fe_9(_call, x, ...) _call(x); _fe_8(_call, __VA_ARGS__) -#define _fe_10(_call, x, ...) _call(x); _fe_9(_call, __VA_ARGS__) +#define _fe_2(_call, x, ...) _call(x); BOUT_EXPAND(_fe_1(_call, __VA_ARGS__)) +#define _fe_3(_call, x, ...) _call(x); BOUT_EXPAND(_fe_2(_call, __VA_ARGS__)) +#define _fe_4(_call, x, ...) _call(x); BOUT_EXPAND(_fe_3(_call, __VA_ARGS__)) +#define _fe_5(_call, x, ...) _call(x); BOUT_EXPAND(_fe_4(_call, __VA_ARGS__)) +#define _fe_6(_call, x, ...) _call(x); BOUT_EXPAND(_fe_5(_call, __VA_ARGS__)) +#define _fe_7(_call, x, ...) _call(x); BOUT_EXPAND(_fe_6(_call, __VA_ARGS__)) +#define _fe_8(_call, x, ...) _call(x); BOUT_EXPAND(_fe_7(_call, __VA_ARGS__)) +#define _fe_9(_call, x, ...) _call(x); BOUT_EXPAND(_fe_8(_call, __VA_ARGS__)) +#define _fe_10(_call, x, ...) _call(x); BOUT_EXPAND(_fe_9(_call, __VA_ARGS__)) /// When called with __VA_ARGS__ first, this evaluates to an argument which depends /// on the length of __VA_ARGS__. This is used to find the appropriate macro to @@ -63,10 +65,10 @@ /// to avoid surprising results. /// #define MACRO_FOR_EACH(mac, ...) \ - _GET_FOR_EACH_EXPANSION(__VA_ARGS__, \ + BOUT_EXPAND(_GET_FOR_EACH_EXPANSION(__VA_ARGS__, \ _me_10, _me_9, _me_8, _me_7, _me_6, _me_5, \ _me_4, _me_3, _me_2, _me_1) \ - (mac, __VA_ARGS__) + (mac, __VA_ARGS__)) /// Apply a function (first argument) to each /// of the following arguments. @@ -88,9 +90,9 @@ /// to avoid surprising results. /// #define MACRO_FOR_EACH_FN(fn, ...) \ - _GET_FOR_EACH_EXPANSION(__VA_ARGS__, \ + BOUT_EXPAND(_GET_FOR_EACH_EXPANSION(__VA_ARGS__, \ _fe_10, _fe_9, _fe_8, _fe_7, _fe_6, _fe_5, \ _fe_4, _fe_3, _fe_2, _fe_1) \ - (fn, __VA_ARGS__) + (fn, __VA_ARGS__)) #endif From 0b1392a23b5d007ddeb73458a86c4a0fcf2577fb Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 16 Dec 2019 10:56:50 +0000 Subject: [PATCH 224/428] Fix Datafile members missing from copy/move constructors --- src/fileio/datafile.cxx | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/src/fileio/datafile.cxx b/src/fileio/datafile.cxx index 9c8f3593c7..f7416baf9b 100644 --- a/src/fileio/datafile.cxx +++ b/src/fileio/datafile.cxx @@ -73,9 +73,9 @@ Datafile::Datafile(Options* opt, Mesh* mesh_in) } Datafile::Datafile(Datafile &&other) noexcept - : parallel(other.parallel), flush(other.flush), guards(other.guards), + : mesh(other.mesh), parallel(other.parallel), flush(other.flush), guards(other.guards), floats(other.floats), openclose(other.openclose), Lx(other.Lx), Ly(other.Ly), - Lz(other.Lz), enabled(other.enabled), shiftOutput(other.shiftOutput), + Lz(other.Lz), enabled(other.enabled), init_missing(other.init_missing), shiftOutput(other.shiftOutput), shiftInput(other.shiftInput), flushFrequencyCounter(other.flushFrequencyCounter), flushFrequency(other.flushFrequency), file(std::move(other.file)), writable(other.writable), appending(other.appending), first_time(other.first_time), @@ -91,20 +91,21 @@ Datafile::Datafile(Datafile &&other) noexcept other.file = nullptr; } -Datafile::Datafile(const Datafile &other) : - mesh(other.mesh), parallel(other.parallel), flush(other.flush), guards(other.guards), - floats(other.floats), openclose(other.openclose), Lx(other.Lx), Ly(other.Ly), Lz(other.Lz), - enabled(other.enabled), shiftOutput(other.shiftOutput), shiftInput(other.shiftInput), flushFrequencyCounter(other.flushFrequencyCounter), flushFrequency(other.flushFrequency), - file(nullptr), writable(other.writable), appending(other.appending), first_time(other.first_time), - int_arr(other.int_arr), int_vec_arr(other.int_vec_arr), - string_arr(other.string_arr), BoutReal_arr(other.BoutReal_arr), - bool_arr(other.bool_arr), f2d_arr(other.f2d_arr), f3d_arr(other.f3d_arr), - v2d_arr(other.v2d_arr), v3d_arr(other.v3d_arr) -{ - filenamelen=other.filenamelen; - filename=new char[filenamelen]; - strncpy(filename,other.filename,filenamelen); - // Same added variables, but the file not the same +Datafile::Datafile(const Datafile& other) + : mesh(other.mesh), parallel(other.parallel), flush(other.flush), + guards(other.guards), floats(other.floats), openclose(other.openclose), + Lx(other.Lx), Ly(other.Ly), Lz(other.Lz), enabled(other.enabled), + init_missing(other.init_missing), shiftOutput(other.shiftOutput), + shiftInput(other.shiftInput), flushFrequencyCounter(other.flushFrequencyCounter), + flushFrequency(other.flushFrequency), file(nullptr), writable(other.writable), + appending(other.appending), first_time(other.first_time), int_arr(other.int_arr), + int_vec_arr(other.int_vec_arr), string_arr(other.string_arr), + BoutReal_arr(other.BoutReal_arr), bool_arr(other.bool_arr), f2d_arr(other.f2d_arr), + f3d_arr(other.f3d_arr), v2d_arr(other.v2d_arr), v3d_arr(other.v3d_arr) { + filenamelen = other.filenamelen; + filename = new char[filenamelen]; + strncpy(filename, other.filename, filenamelen); + // Same added variables, but the file not the same } Datafile& Datafile::operator=(Datafile &&rhs) noexcept { From be492b15ae2febbb6725c5d10657bca8a9ec28ea Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 16 Dec 2019 15:35:03 +0000 Subject: [PATCH 225/428] Add some documentation for compiling on Windows --- manual/sphinx/user_docs/advanced_install.rst | 44 ++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/manual/sphinx/user_docs/advanced_install.rst b/manual/sphinx/user_docs/advanced_install.rst index 7ebc5d2873..20d66ec5c8 100644 --- a/manual/sphinx/user_docs/advanced_install.rst +++ b/manual/sphinx/user_docs/advanced_install.rst @@ -804,6 +804,50 @@ make.config, replacing ``-lnetcdf_c++`` with -lnetcdf64\_c++, and sed 's/netcdf/netcdf64/g' make.config > make.config.new mv make.config.new make.config +Compiling on Windows +~~~~~~~~~~~~~~~~~~~~ + +It is possible to compile BOUT++ on Windows using the CMake +interface. Support is currently very experimental, and some features do +not work. Testing has been done with MSVC 19.24 and Visual Studio 16.4, +although previous versions may still work. + +The main difficulty of using BOUT++ on Windows is getting the +dependencies sorted. The easiest way to install dependencies on Windows +is using `vcpkg `_. You may need to +set the CMake toolchain file if calling ``cmake`` from PowerShell, or on +older versions of Visual Studio. This will be a file somewhere like +``C:/vcpkg/scripts/buildsystems/vcpkg.cmake`` + +The minimal required CMake options are as follows: + +.. code-block:: bash + + -DENABLE_BACKTRACE=OFF \ + -DCMAKE_CXX_FLAGS="/permissive- /EHsc /bigobj" \ + -DBUILD_SHARED_LIBS=OFF + +``ENABLE_BACKTRACE`` must be turned off due to the currently required +``addr2line`` executable not being available on Windows. + +The following flags for the MSVC compiler are required: + +- ``/permissive-`` for standards compliance, such as treating the binary + operator alternative tokens (``and``, ``or``, etc) as tokens +- ``/EHsc`` for standard C++ exception handling, and to assume that + ``extern "C"`` functions never throw +- ``/bigobj`` to increase the number of sections in the .obj file, + required for the template-heavy derivatives machinery + +No modification to the source has been done to export the correct +symbols for shared libraries on Windows, so you must either specifiy +``-DBUILD_SHARED_LIBS=OFF`` to only build static libraries, or, if you +really want shared libraries, ``-DCMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=ON``. +The latter is untested, use at your own risk! + +The unit tests should all pass, but most of the integrated tests will +not run work out of the box yet. + Issues ------ From 1e6f0c50b073ed6950792d184c0defd0ebef49d3 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 16 Dec 2019 16:44:37 +0000 Subject: [PATCH 226/428] Replace [[gnu::deprecated]] attribute with standard form --- include/bout/coordinates.hxx | 10 +++---- include/bout/mesh.hxx | 44 +++++++++++++++--------------- include/bout/paralleltransform.hxx | 16 +++++------ include/derivs.hxx | 16 +++++------ include/difops.hxx | 20 +++++++------- include/field.hxx | 22 +++++++-------- include/field2d.hxx | 10 +++---- include/field3d.hxx | 24 ++++++++-------- include/fieldperp.hxx | 4 +-- include/interpolation.hxx | 6 ++-- include/msg_stack.hxx | 4 +-- include/vector2d.hxx | 2 +- include/vector3d.hxx | 2 +- 13 files changed, 90 insertions(+), 90 deletions(-) diff --git a/include/bout/coordinates.hxx b/include/bout/coordinates.hxx index aeeb0a5a95..7b423a33c9 100644 --- a/include/bout/coordinates.hxx +++ b/include/bout/coordinates.hxx @@ -145,14 +145,14 @@ public: #error This utility macro should not clash with another one #else #define DERIV_FUNC_REGION_ENUM_TO_STRING(func, T) \ - [[gnu::deprecated("Please use Coordinates::#func(const #T& f, " \ + [[deprecated("Please use Coordinates::#func(const #T& f, " \ "CELL_LOC outloc = CELL_DEFAULT, const std::string& method = \"DEFAULT\", " \ "const std::string& region = \"RGN_ALL\") instead")]] \ inline T func(const T& f, CELL_LOC outloc, const std::string& method, \ REGION region) { \ return func(f, outloc, method, toString(region)); \ } \ - [[gnu::deprecated("Please use Coordinates::#func(const #T& f, " \ + [[deprecated("Please use Coordinates::#func(const #T& f, " \ "CELL_LOC outloc = CELL_DEFAULT, const std::string& method = \"DEFAULT\", " \ "const std::string& region = \"RGN_ALL\") instead")]] \ inline T func(const T& f, CELL_LOC outloc, DIFF_METHOD method, \ @@ -165,7 +165,7 @@ public: #error This utility macro should not clash with another one #else #define GRAD_FUNC_REGION_ENUM_TO_STRING(func, T) \ - [[gnu::deprecated("Please use Coordinates::#func(const #T& f, " \ + [[deprecated("Please use Coordinates::#func(const #T& f, " \ "CELL_LOC outloc = CELL_DEFAULT, const std::string& method = \"DEFAULT\") " \ "instead")]] \ inline T func(const T& f, CELL_LOC outloc, DIFF_METHOD method) { \ @@ -197,7 +197,7 @@ public: /// Advection along magnetic field V*b.Grad(f) Field2D Vpar_Grad_par(const Field2D& v, const Field2D& f, CELL_LOC outloc = CELL_DEFAULT, const std::string& method = "DEFAULT"); - [[gnu::deprecated("Please use Coordinates::Vpar_Grad_par(const Field2D& v, " + [[deprecated("Please use Coordinates::Vpar_Grad_par(const Field2D& v, " "const Field2D& f, CELL_LOC outloc = CELL_DEFAULT, " "const std::string& method = \"DEFAULT\") instead")]] inline Field2D Vpar_Grad_par(const Field2D& v, const Field2D& f, CELL_LOC outloc, @@ -207,7 +207,7 @@ public: Field3D Vpar_Grad_par(const Field3D& v, const Field3D& f, CELL_LOC outloc = CELL_DEFAULT, const std::string& method = "DEFAULT"); - [[gnu::deprecated("Please use Coordinates::Vpar_Grad_par(const Field3D& v, " + [[deprecated("Please use Coordinates::Vpar_Grad_par(const Field3D& v, " "const Field3D& f, CELL_LOC outloc = CELL_DEFAULT, " "const std::string& method = \"DEFAULT\") instead")]] inline Field3D Vpar_Grad_par(const Field3D& v, const Field3D& f, CELL_LOC outloc, diff --git a/include/bout/mesh.hxx b/include/bout/mesh.hxx index 8cf245b560..e9d387c454 100644 --- a/include/bout/mesh.hxx +++ b/include/bout/mesh.hxx @@ -293,7 +293,7 @@ class Mesh { /// @param[in] buffer A buffer of data to send /// @param[in] size The length of \p buffer /// @param[in] tag A label, must be the same at receive - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual MPI_Request sendToProc(int xproc, int yproc, BoutReal *buffer, int size, int tag) = 0; /// Low-level communication routine @@ -306,7 +306,7 @@ class Mesh { /// @param[inout] buffer The buffer to fill with data. Must already be allocated of length \p size /// @param[in] size The length of \p buffer /// @param[in] tag A label, must be the same as send - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual comm_handle receiveFromProc(int xproc, int yproc, BoutReal *buffer, int size, int tag) = 0; virtual int getNXPE() = 0; ///< The number of processors in the X direction @@ -389,25 +389,25 @@ class Mesh { virtual bool lastY() const = 0; ///< Is this processor last in Y? i.e. is there a boundary at upper Y? virtual bool firstY(int xpos) const = 0; ///< Is this processor first in Y? i.e. is there a boundary at lower Y? virtual bool lastY(int xpos) const = 0; ///< Is this processor last in Y? i.e. is there a boundary at upper Y? - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual int UpXSplitIndex() = 0; ///< If the upper Y guard cells are split in two, return the X index where the split occurs - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual int DownXSplitIndex() = 0; ///< If the lower Y guard cells are split in two, return the X index where the split occurs /// Send data - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual int sendYOutIndest(BoutReal *buffer, int size, int tag) = 0; /// - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual int sendYOutOutdest(BoutReal *buffer, int size, int tag) = 0; /// - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual int sendYInIndest(BoutReal *buffer, int size, int tag) = 0; /// - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual int sendYInOutdest(BoutReal *buffer, int size, int tag) = 0; /// Non-blocking receive. Must be followed by a call to wait() @@ -415,7 +415,7 @@ class Mesh { /// @param[out] buffer A buffer of length \p size which must already be allocated /// @param[in] size The number of BoutReals expected /// @param[in] tag The tag number of the expected message - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual comm_handle irecvYOutIndest(BoutReal *buffer, int size, int tag) = 0; /// Non-blocking receive. Must be followed by a call to wait() @@ -423,7 +423,7 @@ class Mesh { /// @param[out] buffer A buffer of length \p size which must already be allocated /// @param[in] size The number of BoutReals expected /// @param[in] tag The tag number of the expected message - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual comm_handle irecvYOutOutdest(BoutReal *buffer, int size, int tag) = 0; /// Non-blocking receive. Must be followed by a call to wait() @@ -431,7 +431,7 @@ class Mesh { /// @param[out] buffer A buffer of length \p size which must already be allocated /// @param[in] size The number of BoutReals expected /// @param[in] tag The tag number of the expected message - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual comm_handle irecvYInIndest(BoutReal *buffer, int size, int tag) = 0; /// Non-blocking receive. Must be followed by a call to wait() @@ -439,7 +439,7 @@ class Mesh { /// @param[out] buffer A buffer of length \p size which must already be allocated /// @param[in] size The number of BoutReals expected /// @param[in] tag The tag number of the expected message - [[gnu::deprecated("This experimental functionality will be removed in 5.0")]] + [[deprecated("This experimental functionality will be removed in 5.0")]] virtual comm_handle irecvYInOutdest(BoutReal *buffer, int size, int tag) = 0; // Boundary region iteration @@ -487,11 +487,11 @@ class Mesh { /// Returns the global X index given a local index /// If the local index includes the boundary cells, then so does the global. - [[gnu::deprecated("Use getGlobalXIndex instead")]] + [[deprecated("Use getGlobalXIndex instead")]] int XGLOBAL(int xloc) const { return getGlobalXIndex(xloc); } /// Returns the global Y index given a local index /// The local index must include the boundary, the global index does not. - [[gnu::deprecated("Use getGlobalYIndex or getGlobalYIndexNoBoundaries instead")]] + [[deprecated("Use getGlobalYIndex or getGlobalYIndexNoBoundaries instead")]] virtual int YGLOBAL(int yloc) const { return getGlobalYIndexNoBoundaries(yloc); } /// Returns the local X index given a global index @@ -794,44 +794,44 @@ class Mesh { return bout::derivatives::index::FDDZ(vel, f, outloc, method, region); } - [[gnu::deprecated("Please use free function toFieldAligned instead")]] + [[deprecated("Please use free function toFieldAligned instead")]] const Field3D toFieldAligned(const Field3D &f, const REGION region = RGN_ALL) { return ::toFieldAligned(f, toString(region)); } - [[gnu::deprecated("Please use free function fromFieldAligned instead")]] + [[deprecated("Please use free function fromFieldAligned instead")]] const Field3D fromFieldAligned(const Field3D &f, const REGION region = RGN_ALL) { return ::fromFieldAligned(f, toString(region)); } - [[gnu::deprecated("Please use free function toFieldAligned instead")]] + [[deprecated("Please use free function toFieldAligned instead")]] const Field2D toFieldAligned(const Field2D &f, const REGION region = RGN_ALL) { return ::toFieldAligned(f, toString(region)); } - [[gnu::deprecated("Please use free function fromFieldAligned instead")]] + [[deprecated("Please use free function fromFieldAligned instead")]] const Field2D fromFieldAligned(const Field2D &f, const REGION region = RGN_ALL) { return ::fromFieldAligned(f, toString(region)); } - [[gnu::deprecated("Please use " + [[deprecated("Please use " "Coordinates::getParallelTransform().canToFromFieldAligned instead")]] bool canToFromFieldAligned() { return getCoordinates()->getParallelTransform().canToFromFieldAligned(); } - [[gnu::deprecated("Please use Coordinates::setParallelTransform instead")]] + [[deprecated("Please use Coordinates::setParallelTransform instead")]] void setParallelTransform(std::unique_ptr pt) { getCoordinates()->setParallelTransform(std::move(pt)); } - [[gnu::deprecated("This call is now unnecessary")]] + [[deprecated("This call is now unnecessary")]] void setParallelTransform() { // The ParallelTransform is set from options in the Coordinates // constructor, so this method doesn't need to do anything } - [[gnu::deprecated("Please use Coordinates::getParallelTransform instead")]] + [[deprecated("Please use Coordinates::getParallelTransform instead")]] ParallelTransform& getParallelTransform() { return getCoordinates()->getParallelTransform(); } diff --git a/include/bout/paralleltransform.hxx b/include/bout/paralleltransform.hxx index 5ad8c729e1..260fda4f22 100644 --- a/include/bout/paralleltransform.hxx +++ b/include/bout/paralleltransform.hxx @@ -28,7 +28,7 @@ public: /// Given a 3D field, calculate and set the Y up down fields virtual void calcParallelSlices(Field3D &f) = 0; - [[gnu::deprecated("Please use ParallelTransform::calcParallelSlices instead")]] + [[deprecated("Please use ParallelTransform::calcParallelSlices instead")]] void calcYupYdown(Field3D& f) { calcParallelSlices(f); } @@ -39,7 +39,7 @@ public: return calcParallelSlices(f); } - [[gnu::deprecated("Please use ParallelTransform::integrateParallelSlices instead")]] + [[deprecated("Please use ParallelTransform::integrateParallelSlices instead")]] void integrateYupYdown(Field3D& f) { integrateParallelSlices(f); } @@ -47,13 +47,13 @@ public: /// Convert a field into field-aligned coordinates /// so that the y index is along the magnetic field virtual const Field3D toFieldAligned(const Field3D &f, const std::string& region = "RGN_ALL") = 0; - [[gnu::deprecated("Please use toFieldAligned(const Field3D& f, " + [[deprecated("Please use toFieldAligned(const Field3D& f, " "const std::string& region = \"RGN_ALL\") instead")]] const Field3D toFieldAligned(const Field3D &f, REGION region) { return toFieldAligned(f, toString(region)); } virtual const FieldPerp toFieldAligned(const FieldPerp &f, const std::string& region = "RGN_ALL") = 0; - [[gnu::deprecated("Please use toFieldAligned(const FieldPerp& f, " + [[deprecated("Please use toFieldAligned(const FieldPerp& f, " "const std::string& region = \"RGN_ALL\") instead")]] const FieldPerp toFieldAligned(const FieldPerp &f, REGION region) { return toFieldAligned(f, toString(region)); @@ -62,13 +62,13 @@ public: /// Convert back from field-aligned coordinates /// into standard form virtual const Field3D fromFieldAligned(const Field3D &f, const std::string& region = "RGN_ALL") = 0; - [[gnu::deprecated("Please use fromFieldAligned(const Field3D& f, " + [[deprecated("Please use fromFieldAligned(const Field3D& f, " "const std::string& region = \"RGN_ALL\") instead")]] const Field3D fromFieldAligned(const Field3D &f, REGION region) { return fromFieldAligned(f, toString(region)); } virtual const FieldPerp fromFieldAligned(const FieldPerp &f, const std::string& region = "RGN_ALL") = 0; - [[gnu::deprecated("Please use fromFieldAligned(const FieldPerp& f, " + [[deprecated("Please use fromFieldAligned(const FieldPerp& f, " "const std::string& region = \"RGN_ALL\") instead")]] const FieldPerp fromFieldAligned(const FieldPerp &f, REGION region) { return fromFieldAligned(f, toString(region)); @@ -248,7 +248,7 @@ private: const std::string UNUSED(region) = "RGN_NOX") const { return f; }; - [[gnu::deprecated("Please use shiftZ(const Field2D& f, const Field2D& zangle, " + [[deprecated("Please use shiftZ(const Field2D& f, const Field2D& zangle, " "const std::string& region = \"RGN_NOX\") instead")]] const Field2D shiftZ(const Field2D& f, const Field2D& UNUSED(zangle), REGION UNUSED(region)) const { @@ -264,7 +264,7 @@ private: */ const Field3D shiftZ(const Field3D& f, const Field2D& zangle, const std::string& region = "RGN_NOX") const; - [[gnu::deprecated("Please use shiftZ(const Field3D& f, const Field2D& zangle, " + [[deprecated("Please use shiftZ(const Field3D& f, const Field2D& zangle, " "const std::string& region = \"RGN_NOX\") instead")]] const Field3D shiftZ(const Field3D& f, const Field2D& zangle, REGION region) const { diff --git a/include/derivs.hxx b/include/derivs.hxx index 576beb96da..4865de0270 100644 --- a/include/derivs.hxx +++ b/include/derivs.hxx @@ -40,14 +40,14 @@ #error This utility macro should not clash with another one #else #define DERIV_FUNC_REGION_ENUM_TO_STRING(func, T) \ -[[gnu::deprecated("Please use #func(const #T& f, CELL_LOC outloc = CELL_DEFAULT, " \ +[[deprecated("Please use #func(const #T& f, CELL_LOC outloc = CELL_DEFAULT, " \ "const std::string& method = \"DEFAULT\", const std::string& region = \"RGN_ALL\") " \ "instead")]] \ inline T func(const T& f, CELL_LOC outloc, const std::string& method, \ REGION region) { \ return func(f, outloc, method, toString(region)); \ } \ -[[gnu::deprecated("Please use #func(const #T& f, CELL_LOC outloc = CELL_DEFAULT, " \ +[[deprecated("Please use #func(const #T& f, CELL_LOC outloc = CELL_DEFAULT, " \ "const std::string& method = \"DEFAULT\", const std::string& region = \"RGN_ALL\") " \ "instead")]] \ inline T func(const T& f, CELL_LOC outloc, DIFF_METHOD method, \ @@ -60,14 +60,14 @@ inline T func(const T& f, CELL_LOC outloc, DIFF_METHOD method, \ #error This utility macro should not clash with another one #else #define VDERIV_FUNC_REGION_ENUM_TO_STRING(func, T, T1, T2) \ -[[gnu::deprecated("Please use #func(const #T1 v, const #T2& f, " \ +[[deprecated("Please use #func(const #T1 v, const #T2& f, " \ "CELL_LOC outloc = CELL_DEFAULT, const std::string& method = \"DEFAULT\", const " \ "std::string& region = \"RGN_ALL\") instead")]] \ inline T func(const T1& v, const T2& f, CELL_LOC outloc, const std::string& method, \ REGION region) { \ return func(v, f, outloc, method, toString(region)); \ } \ -[[gnu::deprecated("Please use #func(const #T1& v, const #T2& f, " \ +[[deprecated("Please use #func(const #T1& v, const #T2& f, " \ "CELL_LOC outloc = CELL_DEFAULT, const std::string& method = \"DEFAULT\", " \ "const std::string& region = \"RGN_ALL\") instead")]] \ inline T func(const T1& v, const T2& f, CELL_LOC outloc, DIFF_METHOD method, \ @@ -641,7 +641,7 @@ Field3D D2DXDY(const Field3D& f, CELL_LOC outloc = CELL_DEFAULT, const std::string& method = "DEFAULT", const std::string& region = "RGN_NOBNDRY", const std::string& dfdy_boundary_condition = "free_o3"); -[[gnu::deprecated("Please use D2DXDY(const Field3D& f, CELL_LOC outloc = CELL_DEFAULT, " +[[deprecated("Please use D2DXDY(const Field3D& f, CELL_LOC outloc = CELL_DEFAULT, " "const std::string& method = \"DEFAULT\", const std::string& region = \"RGN_ALL\", " "const std::string& dfdy_boundary_condition) instead")]] inline Field3D D2DXDY(const Field3D& f, CELL_LOC outloc, const std::string& method, @@ -649,7 +649,7 @@ inline Field3D D2DXDY(const Field3D& f, CELL_LOC outloc, const std::string& meth const std::string& dfdy_boundary_condition = "free_o3") { return D2DXDY(f, outloc, method, toString(region), dfdy_boundary_condition); } -[[gnu::deprecated("Please use D2DXDY(const Field3D& f, CELL_LOC outloc = CELL_DEFAULT, " +[[deprecated("Please use D2DXDY(const Field3D& f, CELL_LOC outloc = CELL_DEFAULT, " "const std::string& method = \"DEFAULT\", const std::string& region = \"RGN_ALL\", " "const std::string& dfdy_boundary_condition) instead")]] inline Field3D D2DXDY(const Field3D& f, CELL_LOC outloc, DIFF_METHOD method, @@ -676,7 +676,7 @@ Field2D D2DXDY(const Field2D& f, CELL_LOC outloc = CELL_DEFAULT, const std::string& method = "DEFAULT", const std::string& region = "RGN_NOBNDRY", const std::string& dfdy_boundary_condition = "free_o3"); -[[gnu::deprecated("Please use D2DXDY(const Field2D& f, CELL_LOC outloc = CELL_DEFAULT, " +[[deprecated("Please use D2DXDY(const Field2D& f, CELL_LOC outloc = CELL_DEFAULT, " "const std::string& method = \"DEFAULT\", const std::string& region = \"RGN_ALL\", " "const std::string& dfdy_boundary_condition) instead")]] inline Field2D D2DXDY(const Field2D& f, CELL_LOC outloc, const std::string& method, @@ -684,7 +684,7 @@ inline Field2D D2DXDY(const Field2D& f, CELL_LOC outloc, const std::string& meth const std::string& dfdy_boundary_condition = "free_o3") { return D2DXDY(f, outloc, method, toString(region), dfdy_boundary_condition); } -[[gnu::deprecated("Please use D2DXDY(const Field2D& f, CELL_LOC outloc = CELL_DEFAULT, " +[[deprecated("Please use D2DXDY(const Field2D& f, CELL_LOC outloc = CELL_DEFAULT, " "const std::string& method = \"DEFAULT\", const std::string& region = \"RGN_ALL\", " "const std::string& dfdy_boundary_condition) instead")]] inline Field2D D2DXDY(const Field2D& f, CELL_LOC outloc, DIFF_METHOD method, diff --git a/include/difops.hxx b/include/difops.hxx index 9692cc6f3d..d44b3359b5 100644 --- a/include/difops.hxx +++ b/include/difops.hxx @@ -221,19 +221,19 @@ inline const Field3D Grad2_par2(const Field3D& f, CELL_LOC outloc, DIFF_METHOD m * Parallel derivatives, converting between cell-centred and lower cell boundary * These are a simple way to do staggered differencing */ -[[gnu::deprecated( +[[deprecated( "Grad_par_CtoL is deprecated. Staggering is now supported in Grad_par.")]] inline const Field3D Grad_par_CtoL(const Field3D &var) { ASSERT2(var.getLocation() == CELL_CENTRE); return Grad_par(var, CELL_YLOW); } -[[gnu::deprecated( +[[deprecated( "Grad_par_CtoL is deprecated. Staggering is now supported in Grad_par.")]] inline const Field2D Grad_par_CtoL(const Field2D &var) { ASSERT2(var.getLocation() == CELL_CENTRE); return Grad_par(var, CELL_YLOW); } -[[gnu::deprecated( +[[deprecated( "Vpar_Grad_par_LCtoC is deprecated. Staggering is now supported in Vpar_Grad_par.")]] inline const Field3D Vpar_Grad_par_LCtoC(const Field3D& v, const Field3D& f, const std::string& region="RGN_NOBNDRY") { @@ -241,7 +241,7 @@ inline const Field3D Vpar_Grad_par_LCtoC(const Field3D& v, const Field3D& f, ASSERT2(f.getLocation() == CELL_CENTRE); return Vpar_Grad_par(v, f, CELL_CENTRE, region); } -[[gnu::deprecated( +[[deprecated( "Vpar_Grad_par_LCtoC is deprecated. Staggering is now supported in Vpar_Grad_par.")]] inline const Field3D Vpar_Grad_par_LCtoC(const Field3D& v, const Field3D& f, REGION region=RGN_NOBNDRY) { @@ -249,37 +249,37 @@ inline const Field3D Vpar_Grad_par_LCtoC(const Field3D& v, const Field3D& f, ASSERT2(f.getLocation() == CELL_CENTRE); return Vpar_Grad_par(v, f, CELL_CENTRE, toString(region)); } -[[gnu::deprecated( +[[deprecated( "Grad_par_LtoC is deprecated. Staggering is now supported in Grad_par.")]] inline const Field3D Grad_par_LtoC(const Field3D &var) { ASSERT2(var.getLocation() == CELL_YLOW); return Grad_par(var, CELL_CENTRE); } -[[gnu::deprecated( +[[deprecated( "Grad_par_LtoC is deprecated. Staggering is now supported in Grad_par.")]] inline const Field2D Grad_par_LtoC(const Field2D &var) { ASSERT2(var.getLocation() == CELL_YLOW); return Grad_par(var, CELL_CENTRE); } -[[gnu::deprecated( +[[deprecated( "Div_par_LtoC is deprecated. Staggering is now supported in Grad_par.")]] inline const Field3D Div_par_LtoC(const Field3D &var) { ASSERT2(var.getLocation() == CELL_YLOW); return Div_par(var, CELL_CENTRE); } -[[gnu::deprecated( +[[deprecated( "Div_par_LtoC is deprecated. Staggering is now supported in Grad_par.")]] inline const Field2D Div_par_LtoC(const Field2D &var) { ASSERT2(var.getLocation() == CELL_YLOW); return Div_par(var, CELL_CENTRE); } -[[gnu::deprecated( +[[deprecated( "Div_par_CtoL is deprecated. Staggering is now supported in Grad_par.")]] inline const Field3D Div_par_CtoL(const Field3D &var) { ASSERT2(var.getLocation() == CELL_CENTRE); return Div_par(var, CELL_YLOW); } -[[gnu::deprecated( +[[deprecated( "Div_par_CtoL is deprecated. Staggering is now supported in Grad_par.")]] inline const Field2D Div_par_CtoL(const Field2D &var) { ASSERT2(var.getLocation() == CELL_CENTRE); diff --git a/include/field.hxx b/include/field.hxx index e3b4ac6d61..3c9218060b 100644 --- a/include/field.hxx +++ b/include/field.hxx @@ -279,7 +279,7 @@ inline T toFieldAligned(const T& f, const std::string& region = "RGN_ALL") { return f.getCoordinates()->getParallelTransform().toFieldAligned(f, region); } template -[[gnu::deprecated("Please use toFieldAligned(const T& f, " +[[deprecated("Please use toFieldAligned(const T& f, " "const std::string& region = \"RGN_ALL\") instead")]] inline T toFieldAligned(const T& f, REGION region) { return toFieldAligned(f, toString(region)); @@ -291,7 +291,7 @@ inline T fromFieldAligned(const T& f, const std::string& region = "RGN_ALL") { return f.getCoordinates()->getParallelTransform().fromFieldAligned(f, region); } template -[[gnu::deprecated("Please use fromFieldAligned(const T& f, " +[[deprecated("Please use fromFieldAligned(const T& f, " "const std::string& region = \"RGN_ALL\") instead")]] inline T fromFieldAligned(const T& f, REGION region) { return fromFieldAligned(f, toString(region)); @@ -321,7 +321,7 @@ inline BoutReal min(const T& f, bool allpe = false, const std::string& rgn = "RG return result; } template> -[[gnu::deprecated("Please use Field3D min(const Field3D& f, bool allpe, " +[[deprecated("Please use Field3D min(const Field3D& f, bool allpe, " "const std::string& region = \"RGN_NOBNDRY\") instead")]] inline BoutReal min(const T& f, bool allpe, REGION rgn) { return min(f, allpe, toString(rgn)); @@ -351,7 +351,7 @@ inline BoutReal max(const T& f, bool allpe = false, const std::string& rgn = "RG return result; } template> -[[gnu::deprecated("Please use Field3D max(const Field3D& f, bool allpe, " +[[deprecated("Please use Field3D max(const Field3D& f, bool allpe, " "const std::string& region = \"RGN_NOBNDRY\") instead")]] inline BoutReal max(const T& f, bool allpe, REGION rgn) { return max(f, allpe, toString(rgn)); @@ -384,7 +384,7 @@ inline BoutReal mean(const T &f, bool allpe = false, return result / static_cast(count); } template> -[[gnu::deprecated("Please use Field3D mean(const Field3D& f, bool allpe, " +[[deprecated("Please use Field3D mean(const Field3D& f, bool allpe, " "const std::string& region = \"RGN_NOBNDRY\") instead")]] inline BoutReal mean(const T& f, bool allpe, REGION rgn) { return mean(f, allpe, toString(rgn)); @@ -409,7 +409,7 @@ T pow(const T& lhs, const T& rhs, const std::string& rgn = "RGN_ALL") { return result; } template> -[[gnu::deprecated("Please use pow(const T& lhs, const T& rhs" +[[deprecated("Please use pow(const T& lhs, const T& rhs" "const std::string& region = \"RGN_ALL\") instead")]] inline T pow(const T& lhs, const T& rhs, REGION rgn) { return pow(lhs, rhs, toString(rgn)); @@ -431,7 +431,7 @@ T pow(const T &lhs, BoutReal rhs, const std::string& rgn = "RGN_ALL") { return result; } template> -[[gnu::deprecated("Please use pow(const T& lhs, BoutReal rhs" +[[deprecated("Please use pow(const T& lhs, BoutReal rhs" "const std::string& region = \"RGN_ALL\") instead")]] inline T pow(const T& lhs, BoutReal rhs, REGION rgn) { return pow(lhs, rhs, toString(rgn)); @@ -454,7 +454,7 @@ T pow(BoutReal lhs, const T &rhs, const std::string& rgn = "RGN_ALL") { return result; } template> -[[gnu::deprecated("Please use pow(BoutReal lhs, const T& rhs" +[[deprecated("Please use pow(BoutReal lhs, const T& rhs" "const std::string& region = \"RGN_ALL\") instead")]] inline T pow(BoutReal lhs, const T& rhs, REGION rgn) { return pow(lhs, rhs, toString(rgn)); @@ -493,7 +493,7 @@ inline T pow(BoutReal lhs, const T& rhs, REGION rgn) { return result; \ } \ template> \ - [[gnu::deprecated("Please use func(const T& f, " \ + [[deprecated("Please use func(const T& f, " \ "const std::string& region = \"RGN_ALL\") instead")]] \ inline T name(const T& f, REGION region) { \ return name(f, toString(region)); \ @@ -612,7 +612,7 @@ inline bool finite(const T &f, const std::string& rgn = "RGN_ALL") { return true; } template> -[[gnu::deprecated("Please use bool finite(const Field3D& f, " +[[deprecated("Please use bool finite(const Field3D& f, " "const std::string& region = \"RGN_ALL\") instead")]] inline bool finite(const T& f, REGION rgn) { return finite(f, toString(rgn)); @@ -647,7 +647,7 @@ inline T floor(const T& var, BoutReal f, const std::string& rgn = "RGN_ALL") { return result; } template> -[[gnu::deprecated("Please use floor(const T& var, BoutReal f, " +[[deprecated("Please use floor(const T& var, BoutReal f, " "const std::string& region = \"RGN_ALL\") instead")]] inline T floor(const T& var, BoutReal f, REGION rgn) { return floor(var, f, toString(rgn)); diff --git a/include/field2d.hxx b/include/field2d.hxx index f90a2be5fe..6faa7b21e4 100644 --- a/include/field2d.hxx +++ b/include/field2d.hxx @@ -138,7 +138,7 @@ class Field2D : public Field, public FieldData { return true; } - [[gnu::deprecated("Please use Field2D::hasParallelSlices instead")]] + [[deprecated("Please use Field2D::hasParallelSlices instead")]] bool hasYupYdown() const { return hasParallelSlices(); } @@ -337,7 +337,7 @@ Field2D operator-(const Field2D &f); inline Field2D toFieldAligned(const Field2D& f, const std::string& UNUSED(region) = "RGN_ALL") { return f; } -[[gnu::deprecated("Please use toFieldAligned(const Field2D& f, " +[[deprecated("Please use toFieldAligned(const Field2D& f, " "const std::string& region = \"RGN_ALL\") instead")]] inline Field2D toFieldAligned(const Field2D& f, REGION region) { return toFieldAligned(f, toString(region)); @@ -346,7 +346,7 @@ inline Field2D toFieldAligned(const Field2D& f, REGION region) { inline Field2D fromFieldAligned(const Field2D& f, const std::string& UNUSED(region) = "RGN_ALL") { return f; } -[[gnu::deprecated("Please use fromFieldAligned(const Field2D& f, " +[[deprecated("Please use fromFieldAligned(const Field2D& f, " "const std::string& region = \"RGN_ALL\") instead")]] inline Field2D fromFieldAligned(const Field2D& f, REGION region) { return fromFieldAligned(f, toString(region)); @@ -358,14 +358,14 @@ inline Field2D fromFieldAligned(const Field2D& f, REGION region) { /// Loops over all points including the boundaries by /// default (can be changed using the \p rgn argument void checkData(const Field2D &f, const std::string& region = "RGN_NOBNDRY"); -[[gnu::deprecated("Please use checkData(const Field2D& f, " +[[deprecated("Please use checkData(const Field2D& f, " "const std::string& region = \"RGN_NOBNDRY\") instead")]] inline void checkData(const Field2D &f, REGION region) { return checkData(f, toString(region)); } #else inline void checkData(const Field2D &UNUSED(f), std::string UNUSED(region) = "RGN_NOBNDRY") {} -[[gnu::deprecated("Please use checkData(const Field2D& f, " +[[deprecated("Please use checkData(const Field2D& f, " "const std::string& region = \"RGN_NOBNDRY\") instead")]] inline void checkData(const Field2D &UNUSED(f), REGION UNUSED(region)) {} #endif diff --git a/include/field3d.hxx b/include/field3d.hxx index 75fe892bed..cade57bab1 100644 --- a/include/field3d.hxx +++ b/include/field3d.hxx @@ -242,7 +242,7 @@ class Field3D : public Field, public FieldData { */ void splitParallelSlices(); - [[gnu::deprecated("Please use Field3D::splitParallelSlices instead")]] + [[deprecated("Please use Field3D::splitParallelSlices instead")]] void splitYupYdown() { splitParallelSlices(); } @@ -252,7 +252,7 @@ class Field3D : public Field, public FieldData { */ void clearParallelSlices(); - [[gnu::deprecated("Please use Field3D::clearParallelSlices instead")]] + [[deprecated("Please use Field3D::clearParallelSlices instead")]] void mergeYupYdown() { clearParallelSlices(); } @@ -262,7 +262,7 @@ class Field3D : public Field, public FieldData { return !yup_fields.empty() and !ydown_fields.empty(); } - [[gnu::deprecated("Please use Field3D::hasParallelSlices instead")]] + [[deprecated("Please use Field3D::hasParallelSlices instead")]] bool hasYupYdown() const { return hasParallelSlices(); } @@ -574,13 +574,13 @@ Field3D operator-(const Field3D &f); /// default (can be changed using the \p rgn argument). /// If CHECK >= 3 then the result will be checked for non-finite numbers Field3D pow(const Field3D& lhs, const Field2D& rhs, const std::string& rgn = "RGN_ALL"); -[[gnu::deprecated("Please use pow(const Field3D& lhs, const Field2D& rhs" +[[deprecated("Please use pow(const Field3D& lhs, const Field2D& rhs" "const std::string& region = \"RGN_ALL\") instead")]] inline Field3D pow(const Field3D &lhs, const Field2D &rhs, REGION rgn) { return pow(lhs, rhs, toString(rgn)); } FieldPerp pow(const Field3D& lhs, const FieldPerp& rhs, const std::string& rgn = "RGN_ALL"); -[[gnu::deprecated("Please use pow(const Field3D& lhs, const FieldPerp& rhs" +[[deprecated("Please use pow(const Field3D& lhs, const FieldPerp& rhs" "const std::string& region = \"RGN_ALL\") instead")]] inline FieldPerp pow(const Field3D& lhs, const FieldPerp& rhs, REGION rgn) { return pow(lhs, rhs, toString(rgn)); @@ -592,7 +592,7 @@ inline FieldPerp pow(const Field3D& lhs, const FieldPerp& rhs, REGION rgn) { /// Loops over all points including the boundaries by /// default (can be changed using the \p rgn argument void checkData(const Field3D& f, const std::string& region = "RGN_NOBNDRY"); -[[gnu::deprecated("Please use checkData(const Field3D& f, " +[[deprecated("Please use checkData(const Field3D& f, " "const std::string& region = \"RGN_NOBNDRY\") instead")]] inline void checkData(const Field3D &f, REGION region) { return checkData(f, toString(region)); @@ -601,7 +601,7 @@ inline void checkData(const Field3D &f, REGION region) { /// Ignored with disabled CHECK; Throw an exception if \p f is not /// allocated or if any elements are non-finite (for CHECK > 2) inline void checkData(const Field3D& UNUSED(f), const std::string& UNUSED(region) = "RGN_NOBNDRY") {}; -[[gnu::deprecated("Please use checkData(const Field3D& f, " +[[deprecated("Please use checkData(const Field3D& f, " "const std::string& region = \"RGN_NOBNDRY\") instead")]] inline void checkData(const Field3D &UNUSED(f), REGION UNUSED(region)) {} #endif @@ -612,7 +612,7 @@ inline void checkData(const Field3D &UNUSED(f), REGION UNUSED(region)) {} /// @param[in] N0 The component to keep /// @param[in] rgn The region to calculate the result over Field3D filter(const Field3D& var, int N0, const std::string& rgn = "RGN_ALL"); -[[gnu::deprecated("Please use filter(const Field3D& var, int N0, " +[[deprecated("Please use filter(const Field3D& var, int N0, " "const std::string& region = \"RGN_ALL\") instead")]] inline Field3D filter(const Field3D& var, int N0, REGION rgn) { return filter(var, N0, toString(rgn)); @@ -627,7 +627,7 @@ inline Field3D filter(const Field3D& var, int N0, REGION rgn) { /// @param[in] rgn The region to calculate the result over Field3D lowPass(const Field3D& var, int zmax, bool keep_zonal, const std::string& rgn = "RGN_ALL"); -[[gnu::deprecated("Please use lowpass(const Field3D& var, int zmax, bool keep_zonal, " +[[deprecated("Please use lowpass(const Field3D& var, int zmax, bool keep_zonal, " "const std::string& region = \"RGN_ALL\") instead")]] inline Field3D lowPass(const Field3D& var, int zmax, bool keep_zonal, REGION rgn) { return lowPass(var, zmax, keep_zonal, toString(rgn)); @@ -649,7 +649,7 @@ DEPRECATED(inline Field3D lowPass(const Field3D& var, int zmax, int keep_zonal, inline Field3D lowPass(const Field3D &var, int zmax, const std::string rgn = "RGN_ALL") { return lowPass(var, zmax, true, rgn); } -[[gnu::deprecated("Please use lowpass(const Field3D& var, int zmax, " +[[deprecated("Please use lowpass(const Field3D& var, int zmax, " "const std::string& region = \"RGN_ALL\") instead")]] inline Field3D lowPass(const Field3D &var, int zmax, REGION rgn) { return lowPass(var, zmax, toString(rgn)); @@ -669,7 +669,7 @@ void shiftZ(Field3D &var, int jx, int jy, double zangle); /// @param[in] zangle The angle to shift by in Z /// @param[in] rgn The region to calculate the result over void shiftZ(Field3D &var, BoutReal zangle, const std::string& rgn="RGN_ALL"); -[[gnu::deprecated("Please use shiftZ(const Field3D& var, BoutReal zangle, " +[[deprecated("Please use shiftZ(const Field3D& var, BoutReal zangle, " "const std::string& region = \"RGN_ALL\") instead")]] inline void shiftZ(Field3D &var, BoutReal zangle, REGION rgn) { return shiftZ(var, zangle, toString(rgn)); @@ -680,7 +680,7 @@ inline void shiftZ(Field3D &var, BoutReal zangle, REGION rgn) { /// @param[in] f Variable to average /// @param[in] rgn The region to calculate the result over Field2D DC(const Field3D &f, const std::string& rgn = "RGN_ALL"); -[[gnu::deprecated("Please use DC(const Field3D& f, " +[[deprecated("Please use DC(const Field3D& f, " "const std::string& region = \"RGN_ALL\") instead")]] inline Field2D DC(const Field3D &f, REGION rgn) { return DC(f, toString(rgn)); diff --git a/include/fieldperp.hxx b/include/fieldperp.hxx index 45ff4edb63..c97437937b 100644 --- a/include/fieldperp.hxx +++ b/include/fieldperp.hxx @@ -315,14 +315,14 @@ inline FieldPerp emptyFrom(const FieldPerp& f) { #if CHECK > 0 void checkData(const FieldPerp &f, const std::string& region = "RGN_NOX"); -[[gnu::deprecated("Please use checkData(const FieldPerp& f, " +[[deprecated("Please use checkData(const FieldPerp& f, " "const std::string& region = \"RGN_NOBNDRY\") instead")]] inline void checkData(const FieldPerp &f, REGION region) { return checkData(f, toString(region)); } #else inline void checkData(const FieldPerp &UNUSED(f), const std::string& UNUSED(region) = "RGN_NOX") {} -[[gnu::deprecated("Please use checkData(const FieldPerp& f, " +[[deprecated("Please use checkData(const FieldPerp& f, " "const std::string& region = \"RGN_NOBNDRY\") instead")]] inline void checkData(const FieldPerp &UNUSED(f), REGION UNUSED(region)) {} #endif diff --git a/include/interpolation.hxx b/include/interpolation.hxx index 853091af34..3da28e57b3 100644 --- a/include/interpolation.hxx +++ b/include/interpolation.hxx @@ -204,17 +204,17 @@ const T interp_to(const T& var, CELL_LOC loc, const std::string region = "RGN_AL return result; } template -[[gnu::deprecated("Please use interp_to(const T& var, CELL_LOC loc, " +[[deprecated("Please use interp_to(const T& var, CELL_LOC loc, " "const std::string& region = \"RGN_ALL\") instead")]] const T interp_to(const T& var, CELL_LOC loc, REGION region) { return interp_to(var, loc, toString(region)); } /// Print out the cell location (for debugging) -[[gnu::deprecated("Please use `output << toString(var.getLocation())` instead")]] +[[deprecated("Please use `output << toString(var.getLocation())` instead")]] void printLocation(const Field3D& var); -[[gnu::deprecated("Please use `toString(loc)` instead")]] +[[deprecated("Please use `toString(loc)` instead")]] const char* strLocation(CELL_LOC loc); /// Interpolate a field onto a perturbed set of points diff --git a/include/msg_stack.hxx b/include/msg_stack.hxx index 5182687958..14eea1c778 100644 --- a/include/msg_stack.hxx +++ b/include/msg_stack.hxx @@ -72,7 +72,7 @@ public: int push(const char *s, ...) BOUT_FORMAT_ARGS( 2, 3); ///< Add a message to the stack. Returns a message id - [[gnu::deprecated("Please use `MsgStack::push` with an empty message instead")]] + [[deprecated("Please use `MsgStack::push` with an empty message instead")]] int setPoint(); ///< get a message point void pop(); ///< Remove the last message @@ -85,7 +85,7 @@ public: /// Dummy functions which should be optimised out int push(const char *UNUSED(s), ...) { return 0; } - [[gnu::deprecated("Please use `MsgStack::push` with an empty message instead")]] + [[deprecated("Please use `MsgStack::push` with an empty message instead")]] int setPoint() { return 0; } void pop() {} diff --git a/include/vector2d.hxx b/include/vector2d.hxx index 0c95314ccb..d4fd9f545a 100644 --- a/include/vector2d.hxx +++ b/include/vector2d.hxx @@ -179,7 +179,7 @@ const Vector3D cross(const Vector2D & lhs, const Vector3D &rhs); * |v| = sqrt( v dot v ) */ const Field2D abs(const Vector2D& v, const std::string& region = "RGN_ALL"); -[[gnu::deprecated("Please use Vector2D abs(const Vector2D& f, " +[[deprecated("Please use Vector2D abs(const Vector2D& f, " "const std::string& region = \"RGN_ALL\") instead")]] inline const Field2D abs(const Vector2D &v, REGION region) { return abs(v, toString(region)); diff --git a/include/vector3d.hxx b/include/vector3d.hxx index 8e6d02e0dc..786d040dbf 100644 --- a/include/vector3d.hxx +++ b/include/vector3d.hxx @@ -222,7 +222,7 @@ const Vector3D cross(const Vector3D & lhs, const Vector2D &rhs); * sqrt( v.x^2 + v.y^2 + v.z^2 ) */ const Field3D abs(const Vector3D& v, const std::string& region = "RGN_ALL"); -[[gnu::deprecated("Please use Vector3D abs(const Vector3D& f, " +[[deprecated("Please use Vector3D abs(const Vector3D& f, " "const std::string& region = \"RGN_ALL\") instead")]] inline const Field3D abs(const Vector3D& v, REGION region) { return abs(v, toString(region)); From d35ff4391d5f1c4e94a834078845d32efb117942 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 16 Dec 2019 18:00:24 +0000 Subject: [PATCH 227/428] CMake: Don't use runtest for SNB test --- tests/integrated/test-snb/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integrated/test-snb/CMakeLists.txt b/tests/integrated/test-snb/CMakeLists.txt index be85526e05..e426b7f3fb 100644 --- a/tests/integrated/test-snb/CMakeLists.txt +++ b/tests/integrated/test-snb/CMakeLists.txt @@ -1,5 +1,4 @@ bout_add_integrated_test(test-snb SOURCES test_snb.cxx - USE_RUNTEST USE_DATA_BOUT_INP ) From fcc7f701e7b3ab4fa9b339e9707a4ac933e20974 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 16 Dec 2019 18:11:31 +0000 Subject: [PATCH 228/428] Add wrapper to safely make tests that does nothing if on Windows --- tests/integrated/test-attribs/runtest | 7 ++-- tests/integrated/test-command-args/runtest | 36 +++++++++---------- tests/integrated/test-cyclic/runtest | 6 ++-- tests/integrated/test-delp2/runtest | 6 ++-- .../test-drift-instability/runtest.py | 13 ++----- tests/integrated/test-fieldfactory/runtest | 6 ++-- tests/integrated/test-fieldgroupComm/runtest | 6 ++-- .../test-griddata-yboundary-guards/runtest | 5 ++- tests/integrated/test-griddata/runtest | 7 ++-- tests/integrated/test-gyro/runtest | 6 ++-- tests/integrated/test-initial/runtest | 5 ++- .../test-interchange-instability/runtest | 6 ++-- tests/integrated/test-interpolate/runtest | 6 ++-- .../test-invertable-operator/runtest | 5 ++- tests/integrated/test-invpar/runtest | 7 ++-- tests/integrated/test-io/runtest | 6 ++-- tests/integrated/test-io_hdf5/runtest | 6 ++-- tests/integrated/test-laplace/runtest | 6 ++-- .../integrated/test-multigrid_laplace/runtest | 6 ++-- tests/integrated/test-naulin-laplace/runtest | 6 ++-- tests/integrated/test-options-netcdf/runtest | 6 ++-- tests/integrated/test-petsc_laplace/runtest | 6 ++-- .../test-petsc_laplace_MAST-grid/runtest | 6 ++-- tests/integrated/test-restart-io/runtest | 5 ++- tests/integrated/test-restart-io_hdf5/runtest | 5 ++- tests/integrated/test-restarting/runtest | 6 ++-- tests/integrated/test-slepc-solver/runtest | 5 ++- tests/integrated/test-smooth/runtest | 6 ++-- .../test-twistshift-staggered/runtest | 5 ++- tests/integrated/test-twistshift/runtest | 5 ++- tests/integrated/test-yupdown/runtest | 6 ++-- 31 files changed, 81 insertions(+), 137 deletions(-) diff --git a/tests/integrated/test-attribs/runtest b/tests/integrated/test-attribs/runtest index 7f4188a3d0..6f69ce4a3c 100755 --- a/tests/integrated/test-attribs/runtest +++ b/tests/integrated/test-attribs/runtest @@ -1,12 +1,11 @@ #!/usr/bin/env python3 -from boututils.run_wrapper import shell, shell_safe +from boututils.run_wrapper import build_and_log, launch_safe #requires: all_tests -print("Making test-attribs test") -shell_safe("make > make.log") +build_and_log("test-attribs test") -shell_safe("./test-attribs") +launch_safe("./test-attribs", nproc=1, mthread=1) # Read the attributes diff --git a/tests/integrated/test-command-args/runtest b/tests/integrated/test-command-args/runtest index 6220bee777..d267f4f99b 100755 --- a/tests/integrated/test-command-args/runtest +++ b/tests/integrated/test-command-args/runtest @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -from boututils.run_wrapper import shell_safe +from boututils.run_wrapper import build_and_log, launch_safe import os import shutil @@ -27,7 +27,7 @@ class TestCommandLineArgs(unittest.TestCase): def testNoArgumentsNoDirectory(self): with self.assertRaises(RuntimeError): - shell_safe(self.command, pipe=True) + launch_safe(self.command, pipe=True, nproc=1, mthread=1) with open("stderr.log") as f: contents = f.read() self.assertIn('"data" does not exist', contents, @@ -35,14 +35,14 @@ class TestCommandLineArgs(unittest.TestCase): def testHelpArgument(self): self.makeDirAndCopyInput("data") - _, out = shell_safe(self.command + " --help", pipe=True) + _, out = launch_safe(self.command + " --help", pipe=True, nproc=1, mthread=1) # Not a great test! self.assertNotIn("Writing options", out, msg="FAIL: Attempting to write options") def testNoArgumentsDefaultDirectory(self): self.makeDirAndCopyInput("data") - shell_safe(self.command, pipe=True) + launch_safe(self.command, pipe=True, nproc=1, mthread=1) self.assertTrue(os.path.exists("data/BOUT.settings"), msg="FAIL: No BOUT.settings file in data directory") self.assertTrue(os.path.exists("data/BOUT.log.0"), @@ -50,7 +50,7 @@ class TestCommandLineArgs(unittest.TestCase): def testShortLogArgument(self): self.makeDirAndCopyInput("data") - shell_safe(self.command + " -l different.log", pipe=True) + launch_safe(self.command + " -l different.log", pipe=True, nproc=1, mthread=1) self.assertFalse(os.path.exists("data/BOUT.log.0"), msg="FAIL: BOUT.log.0 file in data directory") self.assertTrue(os.path.exists("data/different.log.0"), @@ -58,7 +58,7 @@ class TestCommandLineArgs(unittest.TestCase): def testLongLogArgument(self): self.makeDirAndCopyInput("data") - shell_safe(self.command + " --log log", pipe=True) + launch_safe(self.command + " --log log", pipe=True, nproc=1, mthread=1) self.assertFalse(os.path.exists("data/BOUT.log.0"), msg="FAIL: BOUT.log.0 file in data directory") self.assertTrue(os.path.exists("data/log.0"), @@ -66,7 +66,7 @@ class TestCommandLineArgs(unittest.TestCase): def testDirectoryArgument(self): self.makeDirAndCopyInput("test") - shell_safe(self.command + " -d test", pipe=True) + launch_safe(self.command + " -d test", pipe=True, nproc=1, mthread=1) self.assertTrue(os.path.exists("test/BOUT.settings"), msg="FAIL: No BOUT.settings file in test directory") self.assertTrue(os.path.exists("test/BOUT.log.0"), @@ -74,7 +74,7 @@ class TestCommandLineArgs(unittest.TestCase): def testDirectoryArgumentNonExistentDirectory(self): with self.assertRaises(RuntimeError): - shell_safe(self.command + " -d non_existent", pipe=True) + launch_safe(self.command + " -d non_existent", pipe=True, nproc=1, mthread=1) with open("stderr.log") as f: contents = f.read() self.assertIn('"non_existent" does not exist', contents, @@ -82,7 +82,7 @@ class TestCommandLineArgs(unittest.TestCase): def testDirectoryArgumentNonDirectory(self): with self.assertRaises(RuntimeError): - shell_safe(self.command + " -d runtest", pipe=True) + launch_safe(self.command + " -d runtest", pipe=True, nproc=1, mthread=1) with open("stderr.log") as f: contents = f.read() self.assertIn('"runtest" is not a directory', contents, @@ -90,10 +90,10 @@ class TestCommandLineArgs(unittest.TestCase): def testDirectoryArgumentOldSettingsFile(self): self.makeDirAndCopyInput("test") - shell_safe(self.command + " -d test", pipe=True) + launch_safe(self.command + " -d test", pipe=True, nproc=1, mthread=1) shutil.copytree("test", "test_copy") - shell_safe(self.command + - " -d test_copy -f BOUT.settings -o testsettings", pipe=True) + launch_safe(self.command + + " -d test_copy -f BOUT.settings -o testsettings", pipe=True, nproc=1, mthread=1) with open("test_copy/testsettings") as f: contents = f.readlines() @@ -110,10 +110,10 @@ class TestCommandLineArgs(unittest.TestCase): def testShortOptionsAreCleaned(self): self.makeDirAndCopyInput("test") - shell_safe(self.command + " -d test", pipe=True) + launch_safe(self.command + " -d test", pipe=True, nproc=1, mthread=1) shutil.copytree("test", "test_copy") - shell_safe(self.command + - " -d test_copy -f BOUT.settings -o testsettings", pipe=True) + launch_safe(self.command + + " -d test_copy -f BOUT.settings -o testsettings", pipe=True, nproc=1, mthread=1) with open("test_copy/testsettings") as f: contents = f.read() @@ -122,11 +122,11 @@ class TestCommandLineArgs(unittest.TestCase): def testCommandLineOptionsArePrinted(self): self.makeDirAndCopyInput("test") - shell_safe(self.command + " -d test", pipe=True) + launch_safe(self.command + " -d test", pipe=True, nproc=1, mthread=1) shutil.copytree("test", "test_copy") extra_options = ["-d", "test_copy", "-f", "BOUT.settings", "-o", "testsettings", "-l", "bout.log", "--foo_flag", "some:option=value"] - _, out = shell_safe(self.command + " " + " ".join(extra_options), pipe=True) + _, out = launch_safe(self.command + " " + " ".join(extra_options), pipe=True, nproc=1, mthread=1) command_line_options = None for line in out.splitlines(): @@ -139,5 +139,5 @@ class TestCommandLineArgs(unittest.TestCase): if __name__ == "__main__": - shell_safe("make") + build_and_log("Command arguments test") unittest.main(verbosity=2) diff --git a/tests/integrated/test-cyclic/runtest b/tests/integrated/test-cyclic/runtest index e64a78abda..56d44101aa 100755 --- a/tests/integrated/test-cyclic/runtest +++ b/tests/integrated/test-cyclic/runtest @@ -9,14 +9,12 @@ try: from builtins import str except: pass -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect from sys import stdout, exit - -print("Making Cyclic Reduction test") -shell_safe("make > make.log") +build_and_log("Cyclic Reduction test") flags = ["", "nsys=2", "nsys=5 periodic", "nsys=7 n=10"] diff --git a/tests/integrated/test-delp2/runtest b/tests/integrated/test-delp2/runtest index fb98b4b6a4..e1ae940530 100755 --- a/tests/integrated/test-delp2/runtest +++ b/tests/integrated/test-delp2/runtest @@ -8,7 +8,7 @@ except: #requires: fftw -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect import numpy as np from sys import stdout, exit @@ -16,9 +16,7 @@ from sys import stdout, exit tol = 1e-10 # Absolute tolerance - -print("Making Delp2 operator test") -shell_safe("make > make.log") +build_and_log("Delp2 operator test") # The command to run exefile = "./test_delp2" diff --git a/tests/integrated/test-drift-instability/runtest.py b/tests/integrated/test-drift-instability/runtest.py index 71001c9bf6..b71495da1f 100755 --- a/tests/integrated/test-drift-instability/runtest.py +++ b/tests/integrated/test-drift-instability/runtest.py @@ -17,20 +17,18 @@ omega_tol = 1e-2 gamma_tol = 1e-2 -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boututils.file_import import file_import from boututils.calculus import deriv from boututils.linear_regression import linear_regression from boutdata.collect import collect import numpy as np -from sys import exit ,argv +from sys import exit, argv nthreads=1 - -print("Making resistive drift instability test") -shell_safe("make > make.log") +build_and_log("resistive drift instability test") zlist = [2, 32, 256] # Just test a few @@ -55,11 +53,6 @@ 128:0.1716229, 256:0.12957680451} #0.130220286897} Changed 25th April 2014 -#zlist = map(lambda x:2**x, range(9)) - -# Create a directory for the data -shell_safe("mkdir -p data") - # Import the grid file grid = file_import("uedge.grd_std.cdl") diff --git a/tests/integrated/test-fieldfactory/runtest b/tests/integrated/test-fieldfactory/runtest index 511240a6cd..97f322c9ff 100755 --- a/tests/integrated/test-fieldfactory/runtest +++ b/tests/integrated/test-fieldfactory/runtest @@ -15,15 +15,13 @@ except: vars = ['a', 'b', 'c', 'd'] # Variables to compare tol = 1e-10 # Absolute tolerance -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect import numpy as np from sys import stdout, exit - -print("Making FieldFactory test") -shell_safe("make > make.log") +build_and_log("FieldFactory test") # Read benchmark values print("Reading benchmark data") diff --git a/tests/integrated/test-fieldgroupComm/runtest b/tests/integrated/test-fieldgroupComm/runtest index 389474afae..4a70f0c2d4 100755 --- a/tests/integrated/test-fieldgroupComm/runtest +++ b/tests/integrated/test-fieldgroupComm/runtest @@ -13,7 +13,7 @@ try: except: pass -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect from numpy import abs, seterr from sys import stdout, exit @@ -29,9 +29,7 @@ exeName = "test_fieldgroupcomm" tol = 1e-10 # Relative tolerance - -print("Making {nm} test".format(nm=name)) -shell_safe("make > make.log") +build_and_log("{nm} test".format(nm=name)) print("Running {nm} test".format(nm=name)) success = True diff --git a/tests/integrated/test-griddata-yboundary-guards/runtest b/tests/integrated/test-griddata-yboundary-guards/runtest index c52c6b1e61..57476ee479 100755 --- a/tests/integrated/test-griddata-yboundary-guards/runtest +++ b/tests/integrated/test-griddata-yboundary-guards/runtest @@ -1,14 +1,13 @@ #!/usr/bin/env python3 -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect from netCDF4 import Dataset import numpy import os.path from sys import stdout, exit -print("Making griddata test") -shell_safe("make > make.log") +build_and_log("griddata test") nx = 4 ny = 24 diff --git a/tests/integrated/test-griddata/runtest b/tests/integrated/test-griddata/runtest index 16627fcb95..074ea1a2e6 100755 --- a/tests/integrated/test-griddata/runtest +++ b/tests/integrated/test-griddata/runtest @@ -7,16 +7,13 @@ try: except: pass -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect import numpy as np from sys import stdout, exit - - -print("Making griddata test") -shell_safe("make > make.log") +build_and_log("griddata test") for nproc in [1]: stdout.write("Checking %d processors ... " % (nproc)) diff --git a/tests/integrated/test-gyro/runtest b/tests/integrated/test-gyro/runtest index dcb4bd3a0c..a3b2a17e0e 100755 --- a/tests/integrated/test-gyro/runtest +++ b/tests/integrated/test-gyro/runtest @@ -15,15 +15,13 @@ vars = ['pade1', 'pade2'] tol = 1e-10 # Absolute tolerance -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect import numpy as np from sys import stdout, exit - -print("Making Gyro-average inversion test") -shell_safe("make > make.log") +build_and_log("Gyro-average inversion test") # Read benchmark values print("Reading benchmark data") diff --git a/tests/integrated/test-initial/runtest b/tests/integrated/test-initial/runtest index 4ccec13c8c..b52053a0dc 100755 --- a/tests/integrated/test-initial/runtest +++ b/tests/integrated/test-initial/runtest @@ -2,7 +2,7 @@ # Test initial conditions -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect import configparser @@ -170,8 +170,7 @@ varlist = [key for key, values in config.items() if 'function' in values] for coord in ["var_x", "var_y", "var_z"]: varlist.remove(coord) -print("Making initial conditions test") -shell_safe("make > make.log") +build_and_log("initial conditions test") nprocs = [1, 2, 3, 4] for nproc in nprocs: diff --git a/tests/integrated/test-interchange-instability/runtest b/tests/integrated/test-interchange-instability/runtest index a54cd78361..70f08433ff 100755 --- a/tests/integrated/test-interchange-instability/runtest +++ b/tests/integrated/test-interchange-instability/runtest @@ -9,16 +9,14 @@ from __future__ import division nproc = 2 # Number of processors to run on reltol = 1.e-3 # Allowed relative tolerance in growth-rate -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect import numpy as np from sys import stdout, exit nthreads=1 - -print("Making interchange instability test") -shell_safe("make > make.log") +build_and_log("interchange instability test") # Delete old output files shell("rm data_1/BOUT.dmp.*") diff --git a/tests/integrated/test-interpolate/runtest b/tests/integrated/test-interpolate/runtest index 5ca1259e17..e27ec1672d 100755 --- a/tests/integrated/test-interpolate/runtest +++ b/tests/integrated/test-interpolate/runtest @@ -4,7 +4,7 @@ # Run the test, compare results against the benchmark # -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata import collect from numpy import sqrt, max, abs, mean, array, log, polyfit from sys import stdout, exit @@ -30,9 +30,7 @@ methods = { } - -print("Making Interpolation test") -shell_safe("make > make.log") +build_and_log("Interpolation test") print("Running Interpolation test") success = True diff --git a/tests/integrated/test-invertable-operator/runtest b/tests/integrated/test-invertable-operator/runtest index 11b366f804..9e7a1020fc 100755 --- a/tests/integrated/test-invertable-operator/runtest +++ b/tests/integrated/test-invertable-operator/runtest @@ -9,7 +9,7 @@ from __future__ import print_function from __future__ import division -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect import numpy as np from sys import stdout, exit @@ -19,8 +19,7 @@ reltol = 1.e-3 # Allowed relative tolerance nthreads=1 -print("Making invertable operator test") -shell_safe("make > make.log") +build_and_log("invertable operator test") # Delete old output files shell("rm data/BOUT.dmp.*") diff --git a/tests/integrated/test-invpar/runtest b/tests/integrated/test-invpar/runtest index b59fbad293..01adc34345 100755 --- a/tests/integrated/test-invpar/runtest +++ b/tests/integrated/test-invpar/runtest @@ -9,15 +9,12 @@ try: from builtins import str except: pass -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect from sys import stdout, exit - -print("Making parallel inversion test") - -shell_safe("make > make.log") +build_and_log("parallel inversion test") flags = [ "acoef=1 bcoef=0 ccoef=0 dcoef=0 ecoef=0", diff --git a/tests/integrated/test-io/runtest b/tests/integrated/test-io/runtest index 77d21abf51..ffad7c8a38 100755 --- a/tests/integrated/test-io/runtest +++ b/tests/integrated/test-io/runtest @@ -12,16 +12,14 @@ try: except: pass -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, shell_safe, launch_safe from boutdata.collect import collect from boututils.datafile import DataFile import numpy as np from sys import stdout, exit - -print("Making I/O test") -shell_safe("make > make.log") +build_and_log("I/O test") # Read benchmark values diff --git a/tests/integrated/test-io_hdf5/runtest b/tests/integrated/test-io_hdf5/runtest index b4c12cf71f..21a9fdb1f0 100755 --- a/tests/integrated/test-io_hdf5/runtest +++ b/tests/integrated/test-io_hdf5/runtest @@ -6,16 +6,14 @@ #Requires: hdf5 -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect from boututils.datafile import DataFile import numpy as np from sys import stdout, exit - -print("Making I/O test") -shell_safe("make > make.log") +build_and_log("I/O test") # Read benchmark values diff --git a/tests/integrated/test-laplace/runtest b/tests/integrated/test-laplace/runtest index 88095a1e1c..e685bffed6 100755 --- a/tests/integrated/test-laplace/runtest +++ b/tests/integrated/test-laplace/runtest @@ -17,15 +17,13 @@ vars = ['flag0', 'flag3', 'flagis', 'flagos', 'flag0ad', 'flag3ad', 'flagisad', 'flagosad'] tol = 1e-6 # Absolute tolerance -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect import numpy as np from sys import stdout, exit - -print("Making Laplacian inversion test") -shell_safe("make > make.log") +build_and_log("Laplacian inversion test") # Read benchmark values print("Reading benchmark data") diff --git a/tests/integrated/test-multigrid_laplace/runtest b/tests/integrated/test-multigrid_laplace/runtest index 8c2949a7a8..69cd7bf42e 100755 --- a/tests/integrated/test-multigrid_laplace/runtest +++ b/tests/integrated/test-multigrid_laplace/runtest @@ -13,14 +13,12 @@ except: tol = 2e-7 # Absolute tolerance numTests = 4 # We test 4 different boundary conditions (with slightly different inputs for each) -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect from sys import exit - -print("Making multigrid Laplacian inversion test") -shell_safe("make > make.log") +build_and_log("multigrid Laplacian inversion test") print("Running multigrid Laplacian inversion test") success = True diff --git a/tests/integrated/test-naulin-laplace/runtest b/tests/integrated/test-naulin-laplace/runtest index 078e523e15..df4c16a2b1 100755 --- a/tests/integrated/test-naulin-laplace/runtest +++ b/tests/integrated/test-naulin-laplace/runtest @@ -13,14 +13,12 @@ except: tol = 2e-7 # Absolute tolerance numTests = 4 # We test 4 different boundary conditions (with slightly different inputs for each) -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect from sys import exit - -print("Making LaplaceNaulin inversion test") -shell_safe("make > make.log") +build_and_log("LaplaceNaulin inversion test") print("Running LaplaceNaulin inversion test") success = True diff --git a/tests/integrated/test-options-netcdf/runtest b/tests/integrated/test-options-netcdf/runtest index 52db45d900..c1f7095d39 100755 --- a/tests/integrated/test-options-netcdf/runtest +++ b/tests/integrated/test-options-netcdf/runtest @@ -4,13 +4,13 @@ #requires: False from boututils.datafile import DataFile -from boututils.run_wrapper import shell +from boututils.run_wrapper import build_and_log, shell, launch from boutdata.data import BoutOptionsFile import math import numpy as np -shell("make") +build_and_log("options-netcdf test") shell("rm -f test-out.ini") shell("rm -f test-out.nc") @@ -21,7 +21,7 @@ with DataFile("test.nc", create=True, format="NETCDF4") as f: f.write("string", "hello"); # run BOUT++ -shell("./test-options-netcdf") +launch("./test-options-netcdf", nproc=1, mthread=1) # Check the output INI file result = BoutOptionsFile("test-out.ini") diff --git a/tests/integrated/test-petsc_laplace/runtest b/tests/integrated/test-petsc_laplace/runtest index 57a369f735..aa84efa070 100755 --- a/tests/integrated/test-petsc_laplace/runtest +++ b/tests/integrated/test-petsc_laplace/runtest @@ -20,15 +20,13 @@ vars = [('max_error1',2.e-4), ('max_error8',2.e-5)] #tol = 1e-4 # Absolute (?) tolerance -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect #import numpy as np from sys import stdout, exit - -print("Making PETSc Laplacian inversion test") -shell_safe("make > make.log") +build_and_log("PETSc Laplacian inversion test") print("Running PETSc Laplacian inversion test") success = True diff --git a/tests/integrated/test-petsc_laplace_MAST-grid/runtest b/tests/integrated/test-petsc_laplace_MAST-grid/runtest index d5853648e6..d157939902 100755 --- a/tests/integrated/test-petsc_laplace_MAST-grid/runtest +++ b/tests/integrated/test-petsc_laplace_MAST-grid/runtest @@ -20,14 +20,12 @@ vars = [['max_error1',2.e-4], ['max_error8',1.e-4]] #tol = 1e-4 # Absolute (?) tolerance -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect from sys import stdout, exit - -print("Making PETSc Laplacian inversion test with non-identity metric (taken from grid for MAST SOL)") -shell_safe("make > make.log") +build_and_log("PETSc Laplacian inversion test with non-identity metric (taken from grid for MAST SOL)") print("Running PETSc Laplacian inversion test with non-identity metric (taken from grid for MAST SOL)") success = True diff --git a/tests/integrated/test-restart-io/runtest b/tests/integrated/test-restart-io/runtest index 23b348521f..b01d8af6c6 100755 --- a/tests/integrated/test-restart-io/runtest +++ b/tests/integrated/test-restart-io/runtest @@ -8,7 +8,7 @@ from boutdata import restart from boutdata.collect import collect from boututils.boutarray import BoutArray from boututils.datafile import DataFile -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe import numpy import os from sys import exit @@ -20,8 +20,7 @@ nz = 4 mxg = 2 myg = 2 -print("Making restart I/O test") -shell_safe("make > make.log") +build_and_log("restart I/O test") x = numpy.linspace(0., 1., nx+2*mxg)[:, numpy.newaxis, numpy.newaxis] y = numpy.linspace(0., 1., ny+2*myg)[numpy.newaxis, :, numpy.newaxis] diff --git a/tests/integrated/test-restart-io_hdf5/runtest b/tests/integrated/test-restart-io_hdf5/runtest index 0dd1792ac0..18810a6f7f 100755 --- a/tests/integrated/test-restart-io_hdf5/runtest +++ b/tests/integrated/test-restart-io_hdf5/runtest @@ -8,7 +8,7 @@ from boutdata import restart from boutdata.collect import collect from boututils.boutarray import BoutArray from boututils.datafile import DataFile -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe import numpy import os from sys import exit @@ -19,8 +19,7 @@ nz = 4 mxg = 2 myg = 2 -print("Making restart I/O test") -shell_safe("make > make.log") +build_and_log("restart I/O test") x = numpy.linspace(0., 1., nx+2*mxg)[:, numpy.newaxis, numpy.newaxis] y = numpy.linspace(0., 1., ny+2*myg)[numpy.newaxis, :, numpy.newaxis] diff --git a/tests/integrated/test-restarting/runtest b/tests/integrated/test-restarting/runtest index 6181398094..fdc75b2b96 100755 --- a/tests/integrated/test-restarting/runtest +++ b/tests/integrated/test-restarting/runtest @@ -1,14 +1,12 @@ #!/usr/bin/env python3 -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect import numpy as np from sys import stdout, exit - -print("-> Making restart test") -shell_safe("make > make.log") +build_and_log("restart test") # Run once for 10 timesteps s, out = launch_safe("./test_restarting nout=10", nproc=1, pipe=True) diff --git a/tests/integrated/test-slepc-solver/runtest b/tests/integrated/test-slepc-solver/runtest index 131849346b..333432e09a 100755 --- a/tests/integrated/test-slepc-solver/runtest +++ b/tests/integrated/test-slepc-solver/runtest @@ -3,11 +3,10 @@ # requires: slepc from boutdata.collect import collect -from boututils.run_wrapper import shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, launch_safe from numpy import isclose -print("Making SLEPc eigen solver test") -shell_safe("make > make.log") +build_and_log("SLEPc eigen solver test") print("Running SLEPc eigen solver test") status, out = launch_safe("./test-slepc-solver", nproc=1, pipe=True, verbose=True) diff --git a/tests/integrated/test-smooth/runtest b/tests/integrated/test-smooth/runtest index 0d578c0de3..1bc9445bc9 100755 --- a/tests/integrated/test-smooth/runtest +++ b/tests/integrated/test-smooth/runtest @@ -13,15 +13,13 @@ except: vars = ['yavg2d', 'yavg3d', 'sm3d'] tol = 1e-10 # Absolute tolerance -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect import numpy as np from sys import stdout, exit - -print("Making smoothing operator test") -shell_safe("make > make.log") +build_and_log("smoothing operator test") # Read benchmark values print("Reading benchmark data") diff --git a/tests/integrated/test-twistshift-staggered/runtest b/tests/integrated/test-twistshift-staggered/runtest index ed5567fbda..57bbeaf96f 100755 --- a/tests/integrated/test-twistshift-staggered/runtest +++ b/tests/integrated/test-twistshift-staggered/runtest @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from boutdata import collect -from boututils.run_wrapper import launch_safe, shell_safe +from boututils.run_wrapper import build_and_log, launch_safe import numpy from sys import exit @@ -9,8 +9,7 @@ datapath = 'data' nproc = 1 tol = 1.e-13 -print('Making twistshift test') -shell_safe('make > make.log') +build_and_log('twistshift test') s, out = launch_safe('./test-twistshift', nproc=nproc, pipe=True) with open("run.log."+str(nproc), "w") as f: diff --git a/tests/integrated/test-twistshift/runtest b/tests/integrated/test-twistshift/runtest index 24dd0490e2..322c7d8071 100755 --- a/tests/integrated/test-twistshift/runtest +++ b/tests/integrated/test-twistshift/runtest @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from boutdata import collect -from boututils.run_wrapper import launch_safe, shell_safe +from boututils.run_wrapper import build_and_log, launch_safe import numpy from sys import exit @@ -9,8 +9,7 @@ datapath = 'data' nproc = 1 tol = 1.e-13 -print('Making twistshift test') -shell_safe('make > make.log') +build_and_log('twistshift test') s, out = launch_safe('./test-twistshift', nproc=nproc, pipe=True) with open("run.log."+str(nproc), "w") as f: diff --git a/tests/integrated/test-yupdown/runtest b/tests/integrated/test-yupdown/runtest index 6599c259d3..de4ca5b25e 100755 --- a/tests/integrated/test-yupdown/runtest +++ b/tests/integrated/test-yupdown/runtest @@ -1,15 +1,13 @@ #!/usr/bin/env python3 -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, launch_safe from boutdata.collect import collect from sys import exit from numpy import max, abs - -shell_safe("make > make.log") - +build_and_log("parallel slices test") s, out = launch_safe("./test_yupdown", nproc=1, pipe=True, verbose=True) From efb8c13189568ae6b37fcbb5bc9b0a611b443b45 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 17 Dec 2019 11:37:20 +0000 Subject: [PATCH 229/428] Add note on running tests on Windows --- manual/sphinx/user_docs/advanced_install.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/manual/sphinx/user_docs/advanced_install.rst b/manual/sphinx/user_docs/advanced_install.rst index 20d66ec5c8..2a5e5c2fae 100644 --- a/manual/sphinx/user_docs/advanced_install.rst +++ b/manual/sphinx/user_docs/advanced_install.rst @@ -846,7 +846,11 @@ really want shared libraries, ``-DCMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=ON``. The latter is untested, use at your own risk! The unit tests should all pass, but most of the integrated tests will -not run work out of the box yet. +not run work out of the box yet as Windows doesn't understand +shabangs. That is, without a file extension, it doesn't know what +program to use to run ``runtest``. The majority of the tests can be +run manually with ``python.exe runtest``. You will stil need to set +``PYTHONPATH`` and have a suitable Python environment. Issues ------ From d6f38329d4969ab0bd9800b6456dec7ba67867ad Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 8 Jan 2020 13:05:58 +0000 Subject: [PATCH 230/428] CMake: fix FindSlepc for non-make generators --- cmake/FindSLEPc.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/FindSLEPc.cmake b/cmake/FindSLEPc.cmake index 10deb80e4e..7a809efba9 100644 --- a/cmake/FindSLEPc.cmake +++ b/cmake/FindSLEPc.cmake @@ -113,7 +113,7 @@ show : # Define macro for getting SLEPc variables from Makefile macro(SLEPC_GET_VARIABLE var name) set(${var} "NOTFOUND" CACHE INTERNAL "Cleared" FORCE) - execute_process(COMMAND ${CMAKE_MAKE_PROGRAM} --no-print-directory -f ${slepc_config_makefile} show VARIABLE=${name} + execute_process(COMMAND ${MAKE_EXECUTABLE} --no-print-directory -f ${slepc_config_makefile} show VARIABLE=${name} OUTPUT_VARIABLE ${var} RESULT_VARIABLE slepc_return) endmacro() From ba387f6e54f5d8b5e9db9d8a0b37c9ce62d58568 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 27 Jan 2020 11:06:19 +0000 Subject: [PATCH 231/428] CMake: Enable using external mpark.variant installation --- CMakeLists.txt | 21 ++++++++++++++------- bout++Config.cmake.in | 9 +++++++-- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a89b8d4cfe..743dd2b19c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,12 +39,6 @@ if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git") endif() endif() -if(NOT EXISTS "${PROJECT_SOURCE_DIR}/externalpackages/mpark.variant/CMakeLists.txt") - message(FATAL_ERROR "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules and try again.") -endif() - -add_subdirectory(externalpackages/mpark.variant) - set(BOUT_SOURCES ./include/boundary_factory.hxx ./include/boundary_op.hxx @@ -304,7 +298,7 @@ add_library(bout++ ${BOUT_SOURCES} ) add_library(bout++::bout++ ALIAS bout++) -target_link_libraries(bout++ PUBLIC MPI::MPI_CXX mpark_variant) +target_link_libraries(bout++ PUBLIC MPI::MPI_CXX) target_include_directories(bout++ PUBLIC $ $ @@ -318,6 +312,19 @@ target_compile_definitions(bout++ target_compile_features(bout++ PUBLIC cxx_std_11) set_target_properties(bout++ PROPERTIES CXX_EXTENSIONS OFF) +option(EXTERNAL_MPARK_VARIANT "Use external installation of mpark.variant" OFF) +if(EXTERNAL_MPARK_VARIANT) + find_package(mpark_variant REQUIRED) + message(STATUS "Using external mpark.variant") +else() + if(NOT EXISTS "${PROJECT_SOURCE_DIR}/externalpackages/mpark.variant/CMakeLists.txt") + message(FATAL_ERROR "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules and try again.") + endif() + add_subdirectory(externalpackages/mpark.variant) + message(STATUS "Using mpark.variant submodule") +endif() +target_link_libraries(bout++ PUBLIC mpark_variant) + option(ENABLE_WARNINGS "Enable compiler warnings" ON) if (ENABLE_WARNINGS) target_compile_options(bout++ PRIVATE diff --git a/bout++Config.cmake.in b/bout++Config.cmake.in index a50a356179..1615b1d2ca 100644 --- a/bout++Config.cmake.in +++ b/bout++Config.cmake.in @@ -45,10 +45,15 @@ if(EXISTS "@NetCDF_ROOT@") set(NetCDF_ROOT "@NetCDF_ROOT@") endif() -set(mpark_variant_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../lib/cmake/mpark_variant") -set(MPIEXEC_EXECUTABLE @MPIEXEC_EXECUTABLE@) +if(@EXTERNAL_MPARK_VARIANT@) + set(mpark_variant_ROOT "@mpark_variant_ROOT@") +else() + set(mpark_variant_ROOT "${CMAKE_CURRENT_LIST_DIR}/../../../lib/cmake/mpark_variant") +endif() +set(MPIEXEC_EXECUTABLE @MPIEXEC_EXECUTABLE@) find_dependency(MPI @MPI_CXX_VERSION@ EXACT) + if (BOUT_USE_OPENMP) find_dependency(OpenMP) endif() From 9ed38ad9a0a599590bc13ab940065ca013bb85cb Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 27 Jan 2020 11:25:52 +0000 Subject: [PATCH 232/428] CMake: Wrap submodule update into function Means GIT_SUBMODULE=OFF is not required if no bundled dependencies are used --- CMakeLists.txt | 25 +++++++++++++------------ tests/unit/CMakeLists.txt | 1 + 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 743dd2b19c..7c60cb4d86 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -23,21 +23,24 @@ find_package(MPI REQUIRED) set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) -# Taken from https://cliutils.gitlab.io/modern-cmake/chapters/projects/submodule.html -find_package(Git QUIET) -if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git") - # Update submodules as needed - option(GIT_SUBMODULE "Check submodules during build" ON) - if(GIT_SUBMODULE) +option(GIT_SUBMODULE "Check submodules during build" ON) +# Adapted from https://cliutils.gitlab.io/modern-cmake/chapters/projects/submodule.html +# Update submodules as needed +function(bout_update_submodules) + if(NOT GIT_SUBMODULE) + return() + endif() + find_package(Git QUIET) + if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git") message(STATUS "Submodule update") execute_process(COMMAND ${GIT_EXECUTABLE} submodule update --init --recursive - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - RESULT_VARIABLE GIT_SUBMOD_RESULT) + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + RESULT_VARIABLE GIT_SUBMOD_RESULT) if(NOT GIT_SUBMOD_RESULT EQUAL "0") message(FATAL_ERROR "git submodule update --init failed with ${GIT_SUBMOD_RESULT}, please checkout submodules") endif() endif() -endif() +endfunction() set(BOUT_SOURCES ./include/boundary_factory.hxx @@ -317,9 +320,7 @@ if(EXTERNAL_MPARK_VARIANT) find_package(mpark_variant REQUIRED) message(STATUS "Using external mpark.variant") else() - if(NOT EXISTS "${PROJECT_SOURCE_DIR}/externalpackages/mpark.variant/CMakeLists.txt") - message(FATAL_ERROR "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules and try again.") - endif() + bout_update_submodules() add_subdirectory(externalpackages/mpark.variant) message(STATUS "Using mpark.variant submodule") endif() diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 2bee9a2f53..4e17dc9b1d 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -1,3 +1,4 @@ +bout_update_submodules() add_subdirectory("${PROJECT_SOURCE_DIR}/externalpackages/googletest" "externalpackages/googletest") From 98d09c857d055d665a038fe3782a9ea0fcea3451 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 27 Jan 2020 11:31:21 +0000 Subject: [PATCH 233/428] CMake: Fix wrong variable name in FindNetCDF Ended up not setting netcdf version --- cmake/FindNetCDF.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/FindNetCDF.cmake b/cmake/FindNetCDF.cmake index 81e75e667f..08fca8b401 100644 --- a/cmake/FindNetCDF.cmake +++ b/cmake/FindNetCDF.cmake @@ -181,8 +181,8 @@ if (NetCDF_DEBUG) endif() mark_as_advanced(NetCDF_CXX_LIBRARY) -if (NetCDF_INCLUDE_DIR) - file(STRINGS "${NetCDF_INCLUDE_DIR}/netcdf_meta.h" _netcdf_version_lines +if (NetCDF_CXX_INCLUDE_DIR) + file(STRINGS "${NetCDF_CXX_INCLUDE_DIR}/netcdf_meta.h" _netcdf_version_lines REGEX "#define[ \t]+NC_VERSION_(MAJOR|MINOR|PATCH|NOTE)") string(REGEX REPLACE ".*NC_VERSION_MAJOR *\([0-9]*\).*" "\\1" _netcdf_version_major "${_netcdf_version_lines}") string(REGEX REPLACE ".*NC_VERSION_MINOR *\([0-9]*\).*" "\\1" _netcdf_version_minor "${_netcdf_version_lines}") From 6452319939821e977357732dec73b16105736f2f Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 27 Jan 2020 11:52:56 +0000 Subject: [PATCH 234/428] CMake: Update docs with how to use existing fmt/mpark installations --- manual/sphinx/user_docs/installing.rst | 50 +++++++++++++++++++------- 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/manual/sphinx/user_docs/installing.rst b/manual/sphinx/user_docs/installing.rst index 8f28bba0c8..ee01a68bcb 100644 --- a/manual/sphinx/user_docs/installing.rst +++ b/manual/sphinx/user_docs/installing.rst @@ -289,14 +289,15 @@ There is now (experimental) support for `CMake `_. You will 3.9. CMake supports out-of-source builds by default, which are A Good Idea. Basic configuration with CMake looks like:: - $ mkdir build && cd build - $ cmake .. + $ cmake . -B build -You can then run ``make`` as usual. +which creates a new directory ``build``, which you can then compile with:: + + $ cmake --build build You can see what build options are available with:: - $ cmake .. -LH + $ cmake . -B build -LH ... // Enable backtrace ENABLE_BACKTRACE:BOOL=ON @@ -314,16 +315,20 @@ You can see what build options are available with:: CMake uses the ``-D=`` syntax to control these variables. You can set ``_ROOT`` to guide CMake in finding the various optional third-party packages (except for PETSc/SLEPc, -which use ``_DIR``). CMake understands the usual environment variables -for setting the compiler, compiler/linking flags, as well as having -built-in options to control them and things like static vs shared -libraries, etc. See the `CMake documentation -`_ for more infomation. +which use ``_DIR``). Note that some packages have funny +captialisation, for example ``NetCDF_ROOT``! Use ``-LH`` to see the +form that each package expects. + +CMake understands the usual environment variables for setting the +compiler, compiler/linking flags, as well as having built-in options +to control them and things like static vs shared libraries, etc. See +the `CMake documentation `_ for more +infomation. A more complicated CMake configuration command might look like:: - $ CC=mpicc CXX=mpic++ cmake .. \ + $ CC=mpicc CXX=mpic++ cmake . -B build \ -DUSE_PETSC=ON -DPETSC_DIR=/path/to/petsc/ \ -DUSE_SLEPC=ON -DSLEPC_DIR=/path/to/slepc/ \ -DUSE_SUNDIALS=ON -DSUNDIALS_ROOT=/path/to/sundials \ @@ -334,6 +339,28 @@ might look like:: -DBUILD_SHARED_LIBS=ON -DCMAKE_INSTALL_PREFIX=/path/to/install/BOUT++ +If you wish to change the configuration after having built ``BOUT++``, +it's wise to delete the ``CMakeCache.txt`` file in the build +directory. The equivalent of ``make distclean`` with CMake is to just +delete the entire build directory and reconfigure. + +Bundled Dependencies +^^^^^^^^^^^^^^^^^^^^ + +BOUT++ bundles some dependencies, currently `mpark.variant +`_, `fmt `_ and +`googletest `_. If you wish to +use an existing installation of ``mpark.variant``, you can set +``-DEXTERNAL_MPARK_VARIANT=ON``, and supply the installation path +using ``mpark_variant_ROOT`` via the command line or environment +variable. Similarly for ``fmt``, using ``-DEXTERNAL_FMT=ON`` and +``fmt_ROOT`` respectively. The recommended way to use ``googletest`` +is to compile it at the same time as your project, therefore there is +no option to use an external installation for that. + +Using CMake with your physics model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + You can write a CMake configuration file (``CMakeLists.txt``) for your physics model in only four lines: @@ -347,8 +374,7 @@ physics model in only four lines: You just need to give CMake the location where you installed BOUT++ via the ``CMAKE_PREFIX_PATH`` variable:: - $ mkdir build && cd build - $ cmake .. -DCMAKE_PREFIX_PATH=/path/to/install/BOUT++ + $ cmake . -B build -DCMAKE_PREFIX_PATH=/path/to/install/BOUT++ .. _sec-config-nls: From 37d0732349d9b7928dc76d8048d08828b20db31d Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 28 Jan 2020 11:00:39 +0000 Subject: [PATCH 235/428] CMake: Fatal error if bundled dependencies not found This stops the summary getting printed --- CMakeLists.txt | 6 +++++- tests/unit/CMakeLists.txt | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7c60cb4d86..199b1e0945 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -322,7 +322,11 @@ if(EXTERNAL_MPARK_VARIANT) else() bout_update_submodules() add_subdirectory(externalpackages/mpark.variant) - message(STATUS "Using mpark.variant submodule") + if(TARGET mpark_variant) + message(STATUS "Using mpark.variant submodule") + else() + message(FATAL_ERROR "mpark_variant not found! Have you disabled the git submodules (GIT_SUBMODULE)?") + endif() endif() target_link_libraries(bout++ PUBLIC mpark_variant) diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 4e17dc9b1d..749b159be0 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -2,6 +2,10 @@ bout_update_submodules() add_subdirectory("${PROJECT_SOURCE_DIR}/externalpackages/googletest" "externalpackages/googletest") +if(NOT TARGET gtest) + message(FATAL_ERROR "googletest not found! Have you disabled the git submodules (GIT_SUBMODULE)?") +endif() + mark_as_advanced( BUILD_GMOCK BUILD_GTEST BUILD_SHARED_LIBS gmock_build_tests gtest_build_samples gtest_build_tests From 7ac9ffd20b73c5e4e5d6c90c50081477bfde27d4 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 28 Jan 2020 11:13:57 +0000 Subject: [PATCH 236/428] CMake: Rename EXTERNAL_* options to BOUT_USE_SYSTEM_* --- CMakeLists.txt | 4 ++-- bout++Config.cmake.in | 3 ++- manual/sphinx/user_docs/installing.rst | 12 ++++++------ 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 199b1e0945..72a459251f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -315,8 +315,8 @@ target_compile_definitions(bout++ target_compile_features(bout++ PUBLIC cxx_std_11) set_target_properties(bout++ PROPERTIES CXX_EXTENSIONS OFF) -option(EXTERNAL_MPARK_VARIANT "Use external installation of mpark.variant" OFF) -if(EXTERNAL_MPARK_VARIANT) +option(BOUT_USE_SYSTEM_MPARK_VARIANT "Use external installation of mpark.variant" OFF) +if(BOUT_USE_SYSTEM_MPARK_VARIANT) find_package(mpark_variant REQUIRED) message(STATUS "Using external mpark.variant") else() diff --git a/bout++Config.cmake.in b/bout++Config.cmake.in index 1615b1d2ca..f8c53dd75b 100644 --- a/bout++Config.cmake.in +++ b/bout++Config.cmake.in @@ -45,9 +45,10 @@ if(EXISTS "@NetCDF_ROOT@") set(NetCDF_ROOT "@NetCDF_ROOT@") endif() -if(@EXTERNAL_MPARK_VARIANT@) +if(@BOUT_USE_SYSTEM_MPARK_VARIANT@) set(mpark_variant_ROOT "@mpark_variant_ROOT@") else() + # mpark.variant doesn't use GNUInstallDirs, always installs to lib set(mpark_variant_ROOT "${CMAKE_CURRENT_LIST_DIR}/../../../lib/cmake/mpark_variant") endif() diff --git a/manual/sphinx/user_docs/installing.rst b/manual/sphinx/user_docs/installing.rst index ee01a68bcb..6e2a41450c 100644 --- a/manual/sphinx/user_docs/installing.rst +++ b/manual/sphinx/user_docs/installing.rst @@ -351,12 +351,12 @@ BOUT++ bundles some dependencies, currently `mpark.variant `_, `fmt `_ and `googletest `_. If you wish to use an existing installation of ``mpark.variant``, you can set -``-DEXTERNAL_MPARK_VARIANT=ON``, and supply the installation path -using ``mpark_variant_ROOT`` via the command line or environment -variable. Similarly for ``fmt``, using ``-DEXTERNAL_FMT=ON`` and -``fmt_ROOT`` respectively. The recommended way to use ``googletest`` -is to compile it at the same time as your project, therefore there is -no option to use an external installation for that. +``-DBOUT_USE_SYSTEM_MPARK_VARIANT=ON``, and supply the installation +path using ``mpark_variant_ROOT`` via the command line or environment +variable. Similarly for ``fmt``, using ``-DBOUT_USE_SYSTEM_FMT=ON`` +and ``fmt_ROOT`` respectively. The recommended way to use +``googletest`` is to compile it at the same time as your project, +therefore there is no option to use an external installation for that. Using CMake with your physics model ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From df090e7453af7f5535a282d90861dd995b148449 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Tue, 28 Jan 2020 16:01:37 +0000 Subject: [PATCH 237/428] use system library, if GIT_SUBMODULE=OFF --- CMakeLists.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 72a459251f..eb7cd7a369 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -315,7 +315,12 @@ target_compile_definitions(bout++ target_compile_features(bout++ PUBLIC cxx_std_11) set_target_properties(bout++ PROPERTIES CXX_EXTENSIONS OFF) -option(BOUT_USE_SYSTEM_MPARK_VARIANT "Use external installation of mpark.variant" OFF) +if(GIT_SUBMODULE OR (EXISTS externalpackages/mpark.variant/CMakeLists.txt)) + option(BOUT_USE_SYSTEM_MPARK_VARIANT "Use external installation of mpark.variant" OFF) +else() + option(BOUT_USE_SYSTEM_MPARK_VARIANT "Use external installation of mpark.variant" ON) +endif() + if(BOUT_USE_SYSTEM_MPARK_VARIANT) find_package(mpark_variant REQUIRED) message(STATUS "Using external mpark.variant") From 33b007dde6aa7efc217ebc2067b56f1c3eb9b608 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Wed, 11 Mar 2020 10:58:45 +0000 Subject: [PATCH 238/428] mention that *_ROOT is optional --- manual/sphinx/user_docs/installing.rst | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/manual/sphinx/user_docs/installing.rst b/manual/sphinx/user_docs/installing.rst index 6e2a41450c..64207deda6 100644 --- a/manual/sphinx/user_docs/installing.rst +++ b/manual/sphinx/user_docs/installing.rst @@ -352,11 +352,13 @@ BOUT++ bundles some dependencies, currently `mpark.variant `googletest `_. If you wish to use an existing installation of ``mpark.variant``, you can set ``-DBOUT_USE_SYSTEM_MPARK_VARIANT=ON``, and supply the installation -path using ``mpark_variant_ROOT`` via the command line or environment -variable. Similarly for ``fmt``, using ``-DBOUT_USE_SYSTEM_FMT=ON`` +path using ``mpark_variant_ROOT`` via the command line or +environment variable if it is installed in a non standard +loction. Similarly for ``fmt``, using ``-DBOUT_USE_SYSTEM_FMT=ON`` and ``fmt_ROOT`` respectively. The recommended way to use ``googletest`` is to compile it at the same time as your project, -therefore there is no option to use an external installation for that. +therefore there is no option to use an external installation for +that. Using CMake with your physics model ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From c490f132aa4888784c199a4a9e2fc26d9cc5b807 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Thu, 30 Jan 2020 10:28:21 +0000 Subject: [PATCH 239/428] Do not install GTEST by default --- CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index eb7cd7a369..ea6fd557ad 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -21,6 +21,9 @@ project(BOUT++ find_program(MPIEXEC_EXECUTABLE NAMES mpiexec mpirun) find_package(MPI REQUIRED) +# Override default +option(INSTALL_GTEST "Enable installation of googletest. (Projects embedding googletest may want to turn this OFF.)" OFF) + set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) option(GIT_SUBMODULE "Check submodules during build" ON) From 7deb7a28cde5dd30362540751f43b9dd6853cc7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Thu, 30 Jan 2020 10:28:38 +0000 Subject: [PATCH 240/428] Set default checklevel to 2 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ea6fd557ad..d6f75dba32 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -359,7 +359,7 @@ endif() # Compile time features set(CHECK_LEVELS 0 1 2 3 4) -set(CHECK 3 CACHE STRING "Set run-time checking level") +set(CHECK 2 CACHE STRING "Set run-time checking level") set_property(CACHE CHECK PROPERTY STRINGS ${CHECK_LEVELS}) if (NOT CHECK IN_LIST CHECK_LEVELS) message(FATAL_ERROR "CHECK must be one of ${CHECK_LEVELS}") From 444d762111db35e293060a56ceb409e38edcba8c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 14 Feb 2020 17:32:55 +0000 Subject: [PATCH 241/428] CMake: Fix FindPETSc for static builds --- cmake/FindPETSc.cmake | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/cmake/FindPETSc.cmake b/cmake/FindPETSc.cmake index 8910c7b5bb..70c884031d 100644 --- a/cmake/FindPETSc.cmake +++ b/cmake/FindPETSc.cmake @@ -307,7 +307,9 @@ int main(int argc,char *argv[]) { message (STATUS "Minimal PETSc includes and libraries work. This probably means we are building with shared libs.") set (petsc_includes_needed "${petsc_includes_minimal}") else (petsc_works_minimal) # Minimal includes fail, see if just adding full includes fixes it - petsc_test_runs ("${petsc_includes_all}" "${PETSC_LIBRARIES_TS}" petsc_works_allincludes) + petsc_test_runs ("${petsc_includes_all}" + "${PETSC_LIBRARIES_TS};MPI::MPI_${PETSC_LANGUAGE_BINDINGS}" + petsc_works_allincludes) if (petsc_works_allincludes) # It does, we just need all the includes ( message (STATUS "PETSc requires extra include paths, but links correctly with only interface libraries. This is an unexpected configuration (but it seems to work fine).") set (petsc_includes_needed ${petsc_includes_all}) @@ -316,14 +318,18 @@ int main(int argc,char *argv[]) { foreach (pkg SYS VEC MAT DM KSP SNES TS ALL) list (APPEND PETSC_LIBRARIES_${pkg} ${petsc_libraries_external}) endforeach (pkg) - petsc_test_runs ("${petsc_includes_minimal}" "${PETSC_LIBRARIES_TS}" petsc_works_alllibraries) + petsc_test_runs ("${petsc_includes_minimal}" + "${PETSC_LIBRARIES_TS};MPI::MPI_${PETSC_LANGUAGE_BINDINGS}" + petsc_works_alllibraries) if (petsc_works_alllibraries) message (STATUS "PETSc only need minimal includes, but requires explicit linking to all dependencies. This is expected when PETSc is built with static libraries.") set (petsc_includes_needed ${petsc_includes_minimal}) else (petsc_works_alllibraries) # It looks like we really need everything, should have listened to Matt set (petsc_includes_needed ${petsc_includes_all}) - petsc_test_runs ("${petsc_includes_all}" "${PETSC_LIBRARIES_TS}" petsc_works_all) + petsc_test_runs ("${petsc_includes_all}" + "${PETSC_LIBRARIES_TS};MPI::MPI_${PETSC_LANGUAGE_BINDINGS}" + petsc_works_all) if (petsc_works_all) # We fail anyways message (STATUS "PETSc requires extra include paths and explicit linking to all dependencies. This probably means you have static libraries and something unexpected in PETSc headers.") else (petsc_works_all) # We fail anyways @@ -367,8 +373,10 @@ find_package_handle_standard_args (PETSc if (PETSC_FOUND) if (NOT TARGET PETSc::PETSc) add_library(PETSc::PETSc UNKNOWN IMPORTED) + list(GET PETSC_LIBRARIES 0 PETSC_LIBRARY) + target_link_libraries(PETSc::PETSc INTERFACE "${PETSC_LIBRARIES}") set_target_properties(PETSc::PETSc PROPERTIES - IMPORTED_LOCATION "${PETSC_LIBRARIES}" + IMPORTED_LOCATION "${PETSC_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${PETSC_INCLUDES}" ) endif() From 06374fc80fbc75919d459c5fe48a80099e8dbe6b Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 17 Feb 2020 14:02:35 +0000 Subject: [PATCH 242/428] CMake: Fix FindPETSc to link/include OpenMP/MPI --- cmake/FindPETSc.cmake | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/cmake/FindPETSc.cmake b/cmake/FindPETSc.cmake index 70c884031d..7a57c9f8ac 100644 --- a/cmake/FindPETSc.cmake +++ b/cmake/FindPETSc.cmake @@ -300,15 +300,23 @@ int main(int argc,char *argv[]) { mark_as_advanced (PETSC_INCLUDE_DIR PETSC_INCLUDE_CONF) set (petsc_includes_minimal ${PETSC_INCLUDE_CONF} ${PETSC_INCLUDE_DIR}) - petsc_test_runs ("${petsc_includes_minimal}" - "${PETSC_LIBRARIES_TS};MPI::MPI_${PETSC_LANGUAGE_BINDINGS}" + file (STRINGS "${PETSC_INCLUDE_CONF}/petscconf.h" PETSC_HAS_OPENMP REGEX "#define PETSC_HAVE_OPENMP 1") + if (PETSC_HAS_OPENMP) + find_package(OpenMP REQUIRED) + set (petsc_openmp_library ";OpenMP::OpenMP_${PETSC_LANGUAGE_BINDINGS}") + endif() + set (petsc_mpi_include_dirs "${MPI_${PETSC_LANGUAGE_BINDINGS}_INCLUDE_DIRS}") + set (petsc_additional_libraries "MPI::MPI_${PETSC_LANGUAGE_BINDINGS}${petsc_openmp_library}") + + petsc_test_runs ("${petsc_includes_minimal};${petsc_mpi_include_dirs}" + "${PETSC_LIBRARIES_TS};${petsc_additional_libraries}" petsc_works_minimal) if (petsc_works_minimal) message (STATUS "Minimal PETSc includes and libraries work. This probably means we are building with shared libs.") set (petsc_includes_needed "${petsc_includes_minimal}") else (petsc_works_minimal) # Minimal includes fail, see if just adding full includes fixes it - petsc_test_runs ("${petsc_includes_all}" - "${PETSC_LIBRARIES_TS};MPI::MPI_${PETSC_LANGUAGE_BINDINGS}" + petsc_test_runs ("${petsc_includes_all};${petsc_mpi_include_dirs}" + "${PETSC_LIBRARIES_TS};${petsc_additional_libraries}" petsc_works_allincludes) if (petsc_works_allincludes) # It does, we just need all the includes ( message (STATUS "PETSc requires extra include paths, but links correctly with only interface libraries. This is an unexpected configuration (but it seems to work fine).") @@ -318,8 +326,8 @@ int main(int argc,char *argv[]) { foreach (pkg SYS VEC MAT DM KSP SNES TS ALL) list (APPEND PETSC_LIBRARIES_${pkg} ${petsc_libraries_external}) endforeach (pkg) - petsc_test_runs ("${petsc_includes_minimal}" - "${PETSC_LIBRARIES_TS};MPI::MPI_${PETSC_LANGUAGE_BINDINGS}" + petsc_test_runs ("${petsc_includes_minimal};${petsc_mpi_include_dirs}" + "${PETSC_LIBRARIES_TS};${petsc_additional_libraries}" petsc_works_alllibraries) if (petsc_works_alllibraries) message (STATUS "PETSc only need minimal includes, but requires explicit linking to all dependencies. This is expected when PETSc is built with static libraries.") @@ -327,8 +335,8 @@ int main(int argc,char *argv[]) { else (petsc_works_alllibraries) # It looks like we really need everything, should have listened to Matt set (petsc_includes_needed ${petsc_includes_all}) - petsc_test_runs ("${petsc_includes_all}" - "${PETSC_LIBRARIES_TS};MPI::MPI_${PETSC_LANGUAGE_BINDINGS}" + petsc_test_runs ("${petsc_includes_all};${petsc_mpi_include_dirs}" + "${PETSC_LIBRARIES_TS};${petsc_additional_libraries}" petsc_works_all) if (petsc_works_all) # We fail anyways message (STATUS "PETSc requires extra include paths and explicit linking to all dependencies. This probably means you have static libraries and something unexpected in PETSc headers.") From 5f44f8e83250d0f12a192748234804e9a49b540f Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 17 Feb 2020 10:57:05 +0000 Subject: [PATCH 243/428] CMake: Fix finding/setting NetCDF version Set tweak level to 99 if the note is empty --- cmake/FindNetCDF.cmake | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cmake/FindNetCDF.cmake b/cmake/FindNetCDF.cmake index 08fca8b401..1b1d68e09f 100644 --- a/cmake/FindNetCDF.cmake +++ b/cmake/FindNetCDF.cmake @@ -181,13 +181,17 @@ if (NetCDF_DEBUG) endif() mark_as_advanced(NetCDF_CXX_LIBRARY) -if (NetCDF_CXX_INCLUDE_DIR) - file(STRINGS "${NetCDF_CXX_INCLUDE_DIR}/netcdf_meta.h" _netcdf_version_lines +if (NetCDF_C_INCLUDE_DIR) + file(STRINGS "${NetCDF_C_INCLUDE_DIR}/netcdf_meta.h" _netcdf_version_lines REGEX "#define[ \t]+NC_VERSION_(MAJOR|MINOR|PATCH|NOTE)") string(REGEX REPLACE ".*NC_VERSION_MAJOR *\([0-9]*\).*" "\\1" _netcdf_version_major "${_netcdf_version_lines}") string(REGEX REPLACE ".*NC_VERSION_MINOR *\([0-9]*\).*" "\\1" _netcdf_version_minor "${_netcdf_version_lines}") string(REGEX REPLACE ".*NC_VERSION_PATCH *\([0-9]*\).*" "\\1" _netcdf_version_patch "${_netcdf_version_lines}") string(REGEX REPLACE ".*NC_VERSION_NOTE *\"\([^\"]*\)\".*" "\\1" _netcdf_version_note "${_netcdf_version_lines}") + if (NOT _netcdf_version_note STREQUAL "") + # Make development version compare higher than any patch level + set(_netcdf_version_note ".99") + endif() set(NetCDF_VERSION "${_netcdf_version_major}.${_netcdf_version_minor}.${_netcdf_version_patch}${_netcdf_version_note}") unset(_netcdf_version_major) unset(_netcdf_version_minor) From e5d2a59cf69c9c140935d3b28c72690b3a3e7455 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 17 Feb 2020 14:17:34 +0000 Subject: [PATCH 244/428] CMake: Add missing dependencies to install config file --- bout++Config.cmake.in | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/bout++Config.cmake.in b/bout++Config.cmake.in index f8c53dd75b..21bed28430 100644 --- a/bout++Config.cmake.in +++ b/bout++Config.cmake.in @@ -44,6 +44,12 @@ endif() if(EXISTS "@NetCDF_ROOT@") set(NetCDF_ROOT "@NetCDF_ROOT@") endif() +if(EXISTS "@HDF5F_ROOT@") + set(HDF5F_ROOT "@HDF5F_ROOT@") +endif() +if(EXISTS "@PVODE_ROOT@") + set(PVODE_ROOT "@PVODE_ROOT@") +endif() if(@BOUT_USE_SYSTEM_MPARK_VARIANT@) set(mpark_variant_ROOT "@mpark_variant_ROOT@") @@ -61,6 +67,12 @@ endif() if (BOUT_HAS_NETCDF) find_dependency(NetCDF @NetCDF_VERSION@ EXACT) endif() +if (BOUT_HAS_HDF5) + find_dependency(HDF5 @HDF5_VERSION@) +endif() +if (BOUT_HAS_PVODE) + find_dependency(PVODE @PVODE_VERSION@) +endif() if (BOUT_HAS_FFTW) find_dependency(FFTW) endif() From 9a05b93c6d3c77b8169a4c05b4fbf1a19e96a7dc Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 17 Feb 2020 14:17:56 +0000 Subject: [PATCH 245/428] CMake: Don't require exact version matches in install config file --- bout++Config.cmake.in | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/bout++Config.cmake.in b/bout++Config.cmake.in index 21bed28430..d0057816f2 100644 --- a/bout++Config.cmake.in +++ b/bout++Config.cmake.in @@ -65,7 +65,7 @@ if (BOUT_USE_OPENMP) find_dependency(OpenMP) endif() if (BOUT_HAS_NETCDF) - find_dependency(NetCDF @NetCDF_VERSION@ EXACT) + find_dependency(NetCDF @NetCDF_VERSION@) endif() if (BOUT_HAS_HDF5) find_dependency(HDF5 @HDF5_VERSION@) @@ -74,24 +74,24 @@ if (BOUT_HAS_PVODE) find_dependency(PVODE @PVODE_VERSION@) endif() if (BOUT_HAS_FFTW) - find_dependency(FFTW) + find_dependency(FFTW @FFTW_VERSION@) endif() if (BOUT_HAS_LAPACK) - find_dependency(LAPACK) + find_dependency(LAPACK @LAPACK_VERSION@) endif() if (BOUT_HAS_PETSC) - find_dependency(PETSc @PETSC_VERSION@ EXACT) + find_dependency(PETSc @PETSC_VERSION@) endif() if (BOUT_HAS_SUNDIALS) - find_dependency(SUNDIALS @SUNDIALS_VERSION@ EXACT) + find_dependency(SUNDIALS @SUNDIALS_VERSION@) endif() if (BOUT_HAS_GETTEXT) - find_dependency(Gettext) - find_dependency(Intl) + find_dependency(Gettext @Gettext_VERSION@) + find_dependency(Intl @Intl_VERSION@) endif() -find_dependency(mpark_variant) +find_dependency(mpark_variant @mpark_variant_VERSION@) if (BOUT_HAS_SLEPC) - find_dependency(SLEPc @SLEPC_VERSION@ EXACT) + find_dependency(SLEPc @SLEPC_VERSION@) endif() if (BOUT_HAS_SCOREP) find_dependency(ScoreP) From 7c35d99a7d8bcde5fe3b139b120839b606fd35d0 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 6 Mar 2020 14:17:11 +0000 Subject: [PATCH 246/428] Fix (almost) all remaining uses of shell("make") in runtests MMS/advection is a bit weird, using symlinks and nested directories. Probably a better way of doing that --- examples/finite-volume/fluid/runtest | 6 ++---- examples/laplacexy/laplace_perp/runtest | 5 ++--- tests/MMS/GBS/runtest-slab2d | 5 ++--- tests/MMS/GBS/runtest-slab3d | 6 ++---- tests/MMS/diffusion/runtest | 6 ++---- tests/MMS/diffusion2/runtest | 6 ++---- tests/MMS/elm-pb/runtest.broken | 6 ++---- tests/MMS/fieldalign/runtest.broken | 5 ++--- tests/MMS/hw/runtest | 6 ++---- tests/MMS/laplace/runtest | 6 ++---- tests/MMS/spatial/advection/runtest | 5 ++--- tests/MMS/spatial/d2dx2/runtest | 6 ++---- tests/MMS/spatial/d2dz2/runtest | 6 ++---- tests/MMS/spatial/diffusion/runtest | 6 ++---- tests/MMS/spatial/fci/runtest | 5 ++--- tests/MMS/time/runtest | 6 ++---- tests/MMS/tokamak/runtest.broken | 7 ++----- tests/MMS/wave-1d-y/runtest | 6 ++---- tests/MMS/wave-1d/runtest | 6 ++---- .../test-multigrid_laplace/runtest_multiple_grids | 6 ++---- tests/integrated/test-multigrid_laplace/runtest_unsheared | 6 ++---- .../integrated/test-naulin-laplace/runtest_multiple_grids | 7 ++----- tests/integrated/test-naulin-laplace/runtest_unsheared | 7 ++----- tests/integrated/test-region-iterator/runtest | 6 ++---- tests/integrated/test-solver/runtest | 5 ++--- tests/integrated/test-squash/runtest | 4 ++-- tests/integrated/test-stopCheck-file/runtest | 6 ++---- 27 files changed, 54 insertions(+), 103 deletions(-) diff --git a/examples/finite-volume/fluid/runtest b/examples/finite-volume/fluid/runtest index e1b4088c53..aa07749c43 100755 --- a/examples/finite-volume/fluid/runtest +++ b/examples/finite-volume/fluid/runtest @@ -10,15 +10,13 @@ try: except: pass -from boututils.run_wrapper import shell, launch, getmpirun +from boututils.run_wrapper import shell, launch, getmpirun, build_and_log from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log, concatenate - -print("Making fluid model MMS test") -shell("make > make.log") +build_and_log("Fluid model MMS test") # List of NY values to use nylist = [20, 40, 80, 160, 320, 640, 1280] diff --git a/examples/laplacexy/laplace_perp/runtest b/examples/laplacexy/laplace_perp/runtest index ffaf48bff0..6acdcffe65 100755 --- a/examples/laplacexy/laplace_perp/runtest +++ b/examples/laplacexy/laplace_perp/runtest @@ -2,7 +2,7 @@ from __future__ import print_function -from boututils.run_wrapper import shell, launch +from boututils.run_wrapper import build_and_log, launch from boutdata.collect import collect from sys import stdout, exit @@ -14,8 +14,7 @@ path = "torus" # Set this to False to turn off plotting interactive = True -print("Making test") -shell("make > make.log") +build_and_log("Laplace perp test") print("Running test") s, out = launch("./test -d "+path, nproc=1, pipe=True) diff --git a/tests/MMS/GBS/runtest-slab2d b/tests/MMS/GBS/runtest-slab2d index 037988b0fc..d468bc2534 100755 --- a/tests/MMS/GBS/runtest-slab2d +++ b/tests/MMS/GBS/runtest-slab2d @@ -6,15 +6,14 @@ from __future__ import division from __future__ import print_function from builtins import str -from boututils.run_wrapper import shell, launch_safe +from boututils.run_wrapper import shell, launch_safe, build_and_log from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log, concatenate +build_and_log("MMS test") -print("Making MMS test") -shell("make > make.log") # List of NX values to use nxlist = [16, 32, 64, 128, 256] diff --git a/tests/MMS/GBS/runtest-slab3d b/tests/MMS/GBS/runtest-slab3d index ff95aa3f5e..5d73e9a3ff 100755 --- a/tests/MMS/GBS/runtest-slab3d +++ b/tests/MMS/GBS/runtest-slab3d @@ -7,7 +7,7 @@ from __future__ import print_function from builtins import zip from builtins import str -from boututils.run_wrapper import shell, launch_safe +from boututils.run_wrapper import shell, launch_safe, build_and_log from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log, concatenate @@ -15,9 +15,7 @@ from numpy import sqrt, max, abs, mean, array, log, concatenate import pickle - -print("Making MMS test") -shell("make > make.log") +build_and_log("MMS test") # List of NX values to use nxlist = [8, 16, 32, 64, 128]#, 256] diff --git a/tests/MMS/diffusion/runtest b/tests/MMS/diffusion/runtest index d19f9b8457..e1c04252c6 100755 --- a/tests/MMS/diffusion/runtest +++ b/tests/MMS/diffusion/runtest @@ -9,15 +9,13 @@ try: except: pass -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log - -print("Making MMS diffusion test") -shell_safe("make > make.log") +build_and_log("MMS diffusion test") # List of NX values to use nxlist = [4, 8, 16, 32, 64, 128] diff --git a/tests/MMS/diffusion2/runtest b/tests/MMS/diffusion2/runtest index 3278974f77..d648345ea0 100755 --- a/tests/MMS/diffusion2/runtest +++ b/tests/MMS/diffusion2/runtest @@ -12,7 +12,7 @@ from __future__ import division from __future__ import print_function from builtins import str -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log @@ -20,9 +20,7 @@ from numpy import sqrt, max, abs, mean, array, log from os.path import join - -print("Making MMS diffusion test") -shell_safe("make > make.log") +build_and_log("MMS diffusion test") # List of input directories inputs = [ diff --git a/tests/MMS/elm-pb/runtest.broken b/tests/MMS/elm-pb/runtest.broken index 6890ac0cdc..e62fcac971 100755 --- a/tests/MMS/elm-pb/runtest.broken +++ b/tests/MMS/elm-pb/runtest.broken @@ -12,7 +12,7 @@ from __future__ import print_function from builtins import zip from builtins import str -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, shell_safe, launch_safe, build_and_log from boututils.datafile import DataFile from boutdata.collect import collect @@ -52,9 +52,7 @@ shape.add(J0, "Jpar0") shape.add(bxcvz, "bxcvz") - -print("Making MMS elm-pb test") -shell_safe("make > make.log") +build_and_log("MMS elm-pb test") # List of NX values to use nxlist = [8, 16, 32, 64]#, 128]#, 256] diff --git a/tests/MMS/fieldalign/runtest.broken b/tests/MMS/fieldalign/runtest.broken index 23b9bb7c44..fc5e17ab14 100755 --- a/tests/MMS/fieldalign/runtest.broken +++ b/tests/MMS/fieldalign/runtest.broken @@ -8,7 +8,7 @@ from __future__ import division from __future__ import print_function from builtins import str -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect import pickle @@ -19,8 +19,7 @@ from os.path import join import time -print("Making MMS test") -shell_safe("make > make.log") +build_and_log("MMS test") #nxlist = [256, 128, 64, 32, 16, 8] # do in reverse order to save disk space nxlist = [32, 64] diff --git a/tests/MMS/hw/runtest b/tests/MMS/hw/runtest index 9569532be2..acf04dba6e 100755 --- a/tests/MMS/hw/runtest +++ b/tests/MMS/hw/runtest @@ -8,15 +8,13 @@ from __future__ import division from __future__ import print_function from builtins import str -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log, concatenate - -print("Making MMS test") -shell_safe("make > make.log") +build_and_log("MMS test") # List of NX values to use nxlist = [32, 64] diff --git a/tests/MMS/laplace/runtest b/tests/MMS/laplace/runtest index 78a684e357..0ebb01af60 100755 --- a/tests/MMS/laplace/runtest +++ b/tests/MMS/laplace/runtest @@ -8,15 +8,13 @@ from __future__ import division from __future__ import print_function from builtins import str -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log, concatenate - -print("Making MMS test") -shell_safe("make > make.log") +build_and_log("MMS test") # List of NX values to use nxlist = [16, 32, 64, 128, 256] diff --git a/tests/MMS/spatial/advection/runtest b/tests/MMS/spatial/advection/runtest index 394b586552..a0d1374d59 100755 --- a/tests/MMS/spatial/advection/runtest +++ b/tests/MMS/spatial/advection/runtest @@ -12,7 +12,7 @@ from __future__ import print_function from builtins import str from builtins import range -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boututils import check_scaling from boutdata.collect import collect @@ -29,8 +29,7 @@ parser.add_argument("-n", "--no-show", action="store_false", dest="show", cli_args = parser.parse_args() -print("Making MMS steady-state advection test") -shell_safe("make > make.log") +build_and_log("MMS steady-state advection test") # List of options to be passed for each test options = [ diff --git a/tests/MMS/spatial/d2dx2/runtest b/tests/MMS/spatial/d2dx2/runtest index f9c5b508d0..ff6a821234 100755 --- a/tests/MMS/spatial/d2dx2/runtest +++ b/tests/MMS/spatial/d2dx2/runtest @@ -4,7 +4,7 @@ from __future__ import division from __future__ import print_function from builtins import str -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log @@ -17,9 +17,7 @@ except: #requires: all_tests - -print("Making MMS d2dx2 test") -shell_safe("make > make.log") +build_and_log("Making MMS d2dx2 test") nproc = 1 diff --git a/tests/MMS/spatial/d2dz2/runtest b/tests/MMS/spatial/d2dz2/runtest index 3e843785f0..c7e9b33d11 100755 --- a/tests/MMS/spatial/d2dz2/runtest +++ b/tests/MMS/spatial/d2dz2/runtest @@ -4,7 +4,7 @@ from __future__ import division from __future__ import print_function from builtins import str -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log @@ -17,9 +17,7 @@ except: #requires: all_tests - -print("Making MMS d2dz2 test") -shell_safe("make > make.log") +build_and_log("Making MMS d2dz2 test") nproc = 1 diff --git a/tests/MMS/spatial/diffusion/runtest b/tests/MMS/spatial/diffusion/runtest index 0de6897a21..e40f185077 100755 --- a/tests/MMS/spatial/diffusion/runtest +++ b/tests/MMS/spatial/diffusion/runtest @@ -13,7 +13,7 @@ from __future__ import division from __future__ import print_function from builtins import str -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log @@ -23,9 +23,7 @@ from os.path import join import matplotlib.pyplot as plt - -print("Making MMS diffusion test") -shell_safe("make > make.log") +build_and_log("MMS diffusion test") # List of input directories inputs = [ diff --git a/tests/MMS/spatial/fci/runtest b/tests/MMS/spatial/fci/runtest index dfbca4e505..ab4e26bbda 100755 --- a/tests/MMS/spatial/fci/runtest +++ b/tests/MMS/spatial/fci/runtest @@ -5,7 +5,7 @@ from __future__ import division from __future__ import print_function -from boututils.run_wrapper import shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, launch_safe from boutdata.collect import collect from numpy import array, log, polyfit, linspace, arange @@ -42,8 +42,7 @@ yperiodic = True failures = [] -print("Making fci MMS test") -shell_safe("make > make.log") +build_and_log("FCI MMS test") for nslice in nslices: error_2[nslice] = [] diff --git a/tests/MMS/time/runtest b/tests/MMS/time/runtest index 36832936e0..c9e3eff717 100755 --- a/tests/MMS/time/runtest +++ b/tests/MMS/time/runtest @@ -9,7 +9,7 @@ from __future__ import division from __future__ import print_function from builtins import str -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect #requires: all_tests @@ -28,9 +28,7 @@ except: plt=None - -print("Making MMS time integration test") -shell_safe("make > make.log") +build_and_log("MMS time integration test") # List of options to be passed for each test if "only_petsc" in sys.argv: diff --git a/tests/MMS/tokamak/runtest.broken b/tests/MMS/tokamak/runtest.broken index 3fb542250f..3f8051d431 100755 --- a/tests/MMS/tokamak/runtest.broken +++ b/tests/MMS/tokamak/runtest.broken @@ -12,7 +12,7 @@ from __future__ import print_function from builtins import zip from builtins import str -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boututils.datafile import DataFile from boutdata.collect import collect @@ -29,10 +29,7 @@ from sys import stdout from boutdata.mms import SimpleTokamak shape = SimpleTokamak() - - -print("Making MMS tokamak geometry test") -shell_safe("make > make.log") +build_and_log("MMS tokamak geometry test") # List of NX values to use nxlist = [4, 8, 16, 32]#, 64]#, 128]#, 256] diff --git a/tests/MMS/wave-1d-y/runtest b/tests/MMS/wave-1d-y/runtest index ea332f79cd..b8894070ee 100755 --- a/tests/MMS/wave-1d-y/runtest +++ b/tests/MMS/wave-1d-y/runtest @@ -10,7 +10,7 @@ try: except: pass -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect import pickle @@ -20,9 +20,7 @@ from sys import stdout from numpy import sqrt, max, abs, mean, array, log, concatenate, pi - -print("Making MMS wave test") -shell_safe("make > make.log") +build_and_log("Making MMS wave test") # List of NX values to use nylist = [8, 16, 32, 64, 128, 256] diff --git a/tests/MMS/wave-1d/runtest b/tests/MMS/wave-1d/runtest index 39278d500a..5122548d92 100755 --- a/tests/MMS/wave-1d/runtest +++ b/tests/MMS/wave-1d/runtest @@ -10,15 +10,13 @@ try: except: pass -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log, concatenate - -print("Making MMS wave test") -shell_safe("make > make.log") +build_and_log("Making MMS wave test") # List of NX values to use nxlist = [8, 12, 20, 36, 68, 132] diff --git a/tests/integrated/test-multigrid_laplace/runtest_multiple_grids b/tests/integrated/test-multigrid_laplace/runtest_multiple_grids index f778718bbb..609f034c5a 100755 --- a/tests/integrated/test-multigrid_laplace/runtest_multiple_grids +++ b/tests/integrated/test-multigrid_laplace/runtest_multiple_grids @@ -13,14 +13,12 @@ except: tol = 2e-6 # Absolute tolerance numTests = 4 # We test 4 different boundary conditions (with slightly different inputs for each) -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from sys import exit - -print("Making multigrid Laplacian inversion test") -shell_safe("make > make.log") +build_and_log("Multigrid Laplacian inversion test") print("Running multigrid Laplacian inversion test") success = True diff --git a/tests/integrated/test-multigrid_laplace/runtest_unsheared b/tests/integrated/test-multigrid_laplace/runtest_unsheared index 2c1c10553b..f395b9f372 100755 --- a/tests/integrated/test-multigrid_laplace/runtest_unsheared +++ b/tests/integrated/test-multigrid_laplace/runtest_unsheared @@ -13,14 +13,12 @@ except: tol = 1e-9 # Absolute tolerance numTests = 4 # We test 4 different boundary conditions (with slightly different inputs for each) -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from sys import exit - -print("Making multigrid Laplacian inversion test") -shell_safe("make > make.log") +build_and_log("Making multigrid Laplacian inversion test") print("Running multigrid Laplacian inversion test") success = True diff --git a/tests/integrated/test-naulin-laplace/runtest_multiple_grids b/tests/integrated/test-naulin-laplace/runtest_multiple_grids index e3327a43a7..660b277405 100755 --- a/tests/integrated/test-naulin-laplace/runtest_multiple_grids +++ b/tests/integrated/test-naulin-laplace/runtest_multiple_grids @@ -13,15 +13,12 @@ except: tol = 2e-6 # Absolute tolerance numTests = 4 # We test 4 different boundary conditions (with slightly different inputs for each) -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from sys import exit - -print("Making LaplaceNaulin inversion test") -shell("rm test_naulin_laplace") -shell_safe("make > make.log") +build_and_log("Making LaplaceNaulin inversion test") print("Running LaplaceNaulin inversion test") success = True diff --git a/tests/integrated/test-naulin-laplace/runtest_unsheared b/tests/integrated/test-naulin-laplace/runtest_unsheared index 0478f2e849..5f60e2fd18 100755 --- a/tests/integrated/test-naulin-laplace/runtest_unsheared +++ b/tests/integrated/test-naulin-laplace/runtest_unsheared @@ -13,15 +13,12 @@ except: tol = 1e-9 # Absolute tolerance numTests = 4 # We test 4 different boundary conditions (with slightly different inputs for each) -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, build_and_log, launch_safe from boutdata.collect import collect from sys import exit - -print("Making LaplaceNaulin inversion test") -shell("rm test_naulin_laplace") -shell_safe("make > make.log") +build_and_log("LaplaceNaulin inversion test") print("Running LaplaceNaulin inversion test") success = True diff --git a/tests/integrated/test-region-iterator/runtest b/tests/integrated/test-region-iterator/runtest index 34d651dbd8..fad6cf3f42 100755 --- a/tests/integrated/test-region-iterator/runtest +++ b/tests/integrated/test-region-iterator/runtest @@ -9,14 +9,12 @@ try: from builtins import str except: pass -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, launch_safe from boutdata.collect import collect from sys import exit - -print("Making Region Iterator test") -s, out = shell_safe("make > make.log",pipe=True) +build_and_log("Region Iterator test") flags = [""] cmd = "./test_region_iterator" diff --git a/tests/integrated/test-solver/runtest b/tests/integrated/test-solver/runtest index d065bf92c7..607f3bb7ea 100755 --- a/tests/integrated/test-solver/runtest +++ b/tests/integrated/test-solver/runtest @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -from boututils.run_wrapper import shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, launch_safe from sys import exit @@ -8,8 +8,7 @@ nthreads = 1 nproc = 1 -print("Making solver test") -shell_safe("make > make.log") +build_and_log("Solver test") print("Running solver test") status, out = launch_safe("./test_solver", nproc=nproc, mthread=nthreads, pipe=True) diff --git a/tests/integrated/test-squash/runtest b/tests/integrated/test-squash/runtest index f01e4be1ec..0ce396084e 100755 --- a/tests/integrated/test-squash/runtest +++ b/tests/integrated/test-squash/runtest @@ -4,7 +4,7 @@ from boututils.datafile import DataFile import itertools import time import numpy as np -from boututils.run_wrapper import launch_safe, shell_safe +from boututils.run_wrapper import launch_safe, shell_safe, build_and_log import re # requires: all_tests @@ -67,7 +67,7 @@ def verify(f1, f2): raise RuntimeError("data mismatch in ", v, err, d1[v], d2[v]) -timed_shell_safe("make") +build_and_log("Squash test") # Run once to get normal data timed_shell_safe("./squash -q -q -q nout=2") diff --git a/tests/integrated/test-stopCheck-file/runtest b/tests/integrated/test-stopCheck-file/runtest index 7e8aa140b9..686f25c130 100755 --- a/tests/integrated/test-stopCheck-file/runtest +++ b/tests/integrated/test-stopCheck-file/runtest @@ -12,7 +12,7 @@ try: except: pass -from boututils.run_wrapper import shell, launch +from boututils.run_wrapper import build_and_log, launch from boutdata.collect import collect import numpy as np from sys import stdout, exit @@ -20,9 +20,7 @@ from sys import stdout, exit nproc = 1 - -print("Making stopCheck test") -shell("make > make.log") +build_and_log("Making stopCheck test") checkVal=[True,False] nstepExpect=[1,11] From 4a5146bc5b82a01de6af5abc747ac515540fe430 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 23 Mar 2020 16:48:04 +0000 Subject: [PATCH 247/428] CMake: Create PVODE config files properly --- externalpackages/PVODE/CMakeLists.txt | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/externalpackages/PVODE/CMakeLists.txt b/externalpackages/PVODE/CMakeLists.txt index 8e1bdfb90c..9c4778d142 100644 --- a/externalpackages/PVODE/CMakeLists.txt +++ b/externalpackages/PVODE/CMakeLists.txt @@ -58,10 +58,30 @@ target_link_libraries(pvpre PUBLIC MPI::MPI_CXX) include(GNUInstallDirs) install(TARGETS pvode pvpre - EXPORT bout++Targets + EXPORT PVODETargets LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" ) install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + +include(CMakePackageConfigHelpers) +write_basic_package_version_file( + PVODEConfigVersion.cmake + VERSION ${PACKAGE_VERSION} + COMPATIBILITY SameMajorVersion + ) + +install(EXPORT PVODETargets + FILE PVODETargets.cmake + NAMESPACE PVODE:: + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/PVODE" + ) + +export(EXPORT PVODETargets + FILE "${CMAKE_CURRENT_BINARY_DIR}/PVODEConfig.cmake" + NAMESPACE PVODE:: + ) + +export(PACKAGE PVODE) From 78b282c04a445a9a5558401bb78cfbfc91e0a8ca Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 23 Mar 2020 16:48:29 +0000 Subject: [PATCH 248/428] CMake: Enable using the build directory directly --- bout++Config.cmake.in | 12 ++++++++++++ manual/sphinx/user_docs/installing.rst | 6 +++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/bout++Config.cmake.in b/bout++Config.cmake.in index d0057816f2..a213781ec0 100644 --- a/bout++Config.cmake.in +++ b/bout++Config.cmake.in @@ -23,6 +23,18 @@ set(BOUT_HAS_GETTEXT @BOUT_HAS_GETTEXT@) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}") +# Enables the use of the build directory directly, without having to +# specify the paths for the external packages as well +if(EXISTS "@CMAKE_BINARY_DIR@/externalpackages/fmt") + list(APPEND CMAKE_PREFIX_PATH "@CMAKE_BINARY_DIR@/externalpackages/fmt") + list(APPEND CMAKE_PREFIX_PATH "@CMAKE_BINARY_DIR@/externalpackages/mpark.variant") +endif() + +# If using the build directory directly, we need the CMake modules too +if(EXISTS "@PROJECT_SOURCE_DIR@/cmake") + list(APPEND CMAKE_MODULE_PATH "@PROJECT_SOURCE_DIR@/cmake") +endif() + if(EXISTS "@PETSC_DIR@") set(PETSC_DIR "@PETSC_DIR@") endif() diff --git a/manual/sphinx/user_docs/installing.rst b/manual/sphinx/user_docs/installing.rst index 64207deda6..37da3c34fa 100644 --- a/manual/sphinx/user_docs/installing.rst +++ b/manual/sphinx/user_docs/installing.rst @@ -373,10 +373,10 @@ physics model in only four lines: add_executable(blob2d blob2d.cxx) target_link_libraries(blob2d PRIVATE bout++::bout++) -You just need to give CMake the location where you installed BOUT++ -via the ``CMAKE_PREFIX_PATH`` variable:: +You just need to give CMake the location where you built or installed +BOUT++ via the ``CMAKE_PREFIX_PATH`` variable:: - $ cmake . -B build -DCMAKE_PREFIX_PATH=/path/to/install/BOUT++ + $ cmake . -B build -DCMAKE_PREFIX_PATH=/path/to/built/BOUT++ .. _sec-config-nls: From 04dc1aeed56eb119ba0d726733bc820d2241a79a Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 23 Mar 2020 17:06:38 +0000 Subject: [PATCH 249/428] CMake: Add instructions for using BOUT++ from a subdirectory --- manual/sphinx/user_docs/installing.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/manual/sphinx/user_docs/installing.rst b/manual/sphinx/user_docs/installing.rst index 37da3c34fa..a0e29114ca 100644 --- a/manual/sphinx/user_docs/installing.rst +++ b/manual/sphinx/user_docs/installing.rst @@ -378,6 +378,22 @@ BOUT++ via the ``CMAKE_PREFIX_PATH`` variable:: $ cmake . -B build -DCMAKE_PREFIX_PATH=/path/to/built/BOUT++ +If you want to modify BOUT++ along with developing your model, you may +instead wish to place the BOUT++ as a subdirectory of your model and +use ``add_subdirectory`` instead of ``find_package`` above: + +.. code-block:: cmake + + project(blob2d LANGUAGES CXX) + add_subdirectory(BOUT++/source) + add_executable(blob2d blob2d.cxx) + target_link_libraries(blob2d PRIVATE bout++::bout++) + +where ``BOUT++/source`` is the subdirectory containing the BOUT++ +source. Doing this has the advantage that any changes you make to +BOUT++ source files will trigger a rebuild of both the BOUT++ library +and your model when you next build your code. + .. _sec-config-nls: Natural Language Support From 1bb13168401962493efad2f4afa6ef9d7315a57d Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 16 Mar 2020 09:36:33 +0000 Subject: [PATCH 250/428] CMake: Add CMakeLists.txt for all examples --- CMakeLists.txt | 5 ++ .../2Dturbulence_multigrid/CMakeLists.txt | 10 ++++ examples/6field-simple/CMakeLists.txt | 10 ++++ examples/CMakeLists.txt | 48 +++++++++++++++++++ .../IMEX/advection-diffusion/CMakeLists.txt | 10 ++++ .../IMEX/advection-reaction/CMakeLists.txt | 10 ++++ examples/IMEX/diffusion-nl/CMakeLists.txt | 10 ++++ .../IMEX/drift-wave-constraint/CMakeLists.txt | 10 ++++ examples/IMEX/drift-wave/CMakeLists.txt | 10 ++++ examples/advdiff/CMakeLists.txt | 10 ++++ examples/advdiff2/CMakeLists.txt | 16 +++++++ examples/backtrace/CMakeLists.txt | 10 ++++ examples/blob2d-laplacexz/CMakeLists.txt | 10 ++++ examples/blob2d/CMakeLists.txt | 5 +- .../advection/CMakeLists.txt | 10 ++++ examples/conducting-wall-mode/CMakeLists.txt | 10 ++++ examples/conduction-snb/CMakeLists.txt | 10 ++++ examples/conduction/CMakeLists.txt | 10 ++++ .../constraints/alfven-wave/CMakeLists.txt | 10 ++++ .../constraints/laplace-dae/CMakeLists.txt | 10 ++++ examples/dalf3/CMakeLists.txt | 10 ++++ examples/dalf3/README.md | 14 ++++++ examples/eigen-box/CMakeLists.txt | 10 ++++ examples/elm-pb/CMakeLists.txt | 10 ++++ examples/em-drift/CMakeLists.txt | 10 ++++ examples/fci-wave-logn/CMakeLists.txt | 10 ++++ examples/fci-wave/CMakeLists.txt | 10 ++++ .../finite-volume/diffusion/CMakeLists.txt | 10 ++++ examples/finite-volume/fluid/CMakeLists.txt | 10 ++++ examples/finite-volume/test/CMakeLists.txt | 10 ++++ examples/gas-compress/CMakeLists.txt | 10 ++++ examples/gravity_reduced/CMakeLists.txt | 10 ++++ examples/gyro-gem/CMakeLists.txt | 10 ++++ examples/hasegawa-wakatani/CMakeLists.txt | 10 ++++ examples/invertable_operator/CMakeLists.txt | 13 +++++ examples/jorek-compare/CMakeLists.txt | 10 ++++ examples/lapd-drift/CMakeLists.txt | 10 ++++ examples/laplacexy/alfven-wave/CMakeLists.txt | 10 ++++ .../laplacexy/laplace_perp/CMakeLists.txt | 10 ++++ examples/laplacexy/simple/CMakeLists.txt | 10 ++++ examples/monitor-newapi/CMakeLists.txt | 10 ++++ examples/monitor/CMakeLists.txt | 10 ++++ examples/orszag-tang/CMakeLists.txt | 10 ++++ examples/preconditioning/wave/CMakeLists.txt | 10 ++++ examples/reconnect-2field/CMakeLists.txt | 10 ++++ examples/shear-alfven-wave/CMakeLists.txt | 10 ++++ examples/staggered_grid/CMakeLists.txt | 10 ++++ examples/subsampling/CMakeLists.txt | 10 ++++ examples/tokamak-2fluid/CMakeLists.txt | 10 ++++ examples/uedge-benchmark/CMakeLists.txt | 10 ++++ examples/wave-slab/CMakeLists.txt | 10 ++++ 51 files changed, 550 insertions(+), 1 deletion(-) create mode 100644 examples/2Dturbulence_multigrid/CMakeLists.txt create mode 100644 examples/6field-simple/CMakeLists.txt create mode 100644 examples/CMakeLists.txt create mode 100644 examples/IMEX/advection-diffusion/CMakeLists.txt create mode 100644 examples/IMEX/advection-reaction/CMakeLists.txt create mode 100644 examples/IMEX/diffusion-nl/CMakeLists.txt create mode 100644 examples/IMEX/drift-wave-constraint/CMakeLists.txt create mode 100644 examples/IMEX/drift-wave/CMakeLists.txt create mode 100644 examples/advdiff/CMakeLists.txt create mode 100644 examples/advdiff2/CMakeLists.txt create mode 100644 examples/backtrace/CMakeLists.txt create mode 100644 examples/blob2d-laplacexz/CMakeLists.txt create mode 100644 examples/boundary-conditions/advection/CMakeLists.txt create mode 100644 examples/conducting-wall-mode/CMakeLists.txt create mode 100644 examples/conduction-snb/CMakeLists.txt create mode 100644 examples/conduction/CMakeLists.txt create mode 100644 examples/constraints/alfven-wave/CMakeLists.txt create mode 100644 examples/constraints/laplace-dae/CMakeLists.txt create mode 100644 examples/dalf3/CMakeLists.txt create mode 100644 examples/dalf3/README.md create mode 100644 examples/eigen-box/CMakeLists.txt create mode 100644 examples/elm-pb/CMakeLists.txt create mode 100644 examples/em-drift/CMakeLists.txt create mode 100644 examples/fci-wave-logn/CMakeLists.txt create mode 100644 examples/fci-wave/CMakeLists.txt create mode 100644 examples/finite-volume/diffusion/CMakeLists.txt create mode 100644 examples/finite-volume/fluid/CMakeLists.txt create mode 100644 examples/finite-volume/test/CMakeLists.txt create mode 100644 examples/gas-compress/CMakeLists.txt create mode 100644 examples/gravity_reduced/CMakeLists.txt create mode 100644 examples/gyro-gem/CMakeLists.txt create mode 100644 examples/hasegawa-wakatani/CMakeLists.txt create mode 100644 examples/invertable_operator/CMakeLists.txt create mode 100644 examples/jorek-compare/CMakeLists.txt create mode 100644 examples/lapd-drift/CMakeLists.txt create mode 100644 examples/laplacexy/alfven-wave/CMakeLists.txt create mode 100644 examples/laplacexy/laplace_perp/CMakeLists.txt create mode 100644 examples/laplacexy/simple/CMakeLists.txt create mode 100644 examples/monitor-newapi/CMakeLists.txt create mode 100644 examples/monitor/CMakeLists.txt create mode 100644 examples/orszag-tang/CMakeLists.txt create mode 100644 examples/preconditioning/wave/CMakeLists.txt create mode 100644 examples/reconnect-2field/CMakeLists.txt create mode 100644 examples/shear-alfven-wave/CMakeLists.txt create mode 100644 examples/staggered_grid/CMakeLists.txt create mode 100644 examples/subsampling/CMakeLists.txt create mode 100644 examples/tokamak-2fluid/CMakeLists.txt create mode 100644 examples/uedge-benchmark/CMakeLists.txt create mode 100644 examples/wave-slab/CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index d6f75dba32..74724ca459 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -771,6 +771,11 @@ if(PACKAGE_TESTS) add_dependencies(check check-unit-tests check-integrated-tests) endif() +option(BOUT_BUILD_EXAMPLES "Build the examples" OFF) +if(BOUT_BUILD_EXAMPLES) + add_subdirectory(examples) +endif() + ################################################## # Installation diff --git a/examples/2Dturbulence_multigrid/CMakeLists.txt b/examples/2Dturbulence_multigrid/CMakeLists.txt new file mode 100644 index 0000000000..9981cc4b7c --- /dev/null +++ b/examples/2Dturbulence_multigrid/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(2Dturbulence_multigrid LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(esel esel.cxx) +target_link_libraries(esel PRIVATE bout++::bout++) diff --git a/examples/6field-simple/CMakeLists.txt b/examples/6field-simple/CMakeLists.txt new file mode 100644 index 0000000000..45b30a3fc7 --- /dev/null +++ b/examples/6field-simple/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(6field-simple LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(elm_6f elm_6f.cxx) +target_link_libraries(elm_6f PRIVATE bout++::bout++) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt new file mode 100644 index 0000000000..0a5f6fb239 --- /dev/null +++ b/examples/CMakeLists.txt @@ -0,0 +1,48 @@ +add_subdirectory(2Dturbulence_multigrid) +add_subdirectory(6field-simple) +add_subdirectory(IMEX/advection-diffusion) +add_subdirectory(IMEX/advection-reaction) +add_subdirectory(IMEX/diffusion-nl) +add_subdirectory(IMEX/drift-wave) +add_subdirectory(IMEX/drift-wave-constraint) +add_subdirectory(advdiff) +add_subdirectory(advdiff2) +add_subdirectory(backtrace) +add_subdirectory(blob2d) +add_subdirectory(blob2d-laplacexz) +add_subdirectory(boundary-conditions/advection) +add_subdirectory(conducting-wall-mode) +add_subdirectory(conduction) +add_subdirectory(conduction-snb) +add_subdirectory(constraints/alfven-wave) +add_subdirectory(constraints/laplace-dae) +add_subdirectory(dalf3) +add_subdirectory(eigen-box) +add_subdirectory(elm-pb) +add_subdirectory(em-drift) +add_subdirectory(fci-wave) +add_subdirectory(fci-wave-logn) +add_subdirectory(finite-volume/diffusion) +add_subdirectory(finite-volume/fluid) +add_subdirectory(finite-volume/test) +add_subdirectory(gas-compress) +add_subdirectory(gravity_reduced) +add_subdirectory(gyro-gem) +add_subdirectory(hasegawa-wakatani) +add_subdirectory(invertable_operator) +add_subdirectory(jorek-compare) +add_subdirectory(lapd-drift) +add_subdirectory(laplacexy/alfven-wave) +add_subdirectory(laplacexy/laplace_perp) +add_subdirectory(laplacexy/simple) +add_subdirectory(monitor) +add_subdirectory(monitor-newapi) +add_subdirectory(orszag-tang) +add_subdirectory(preconditioning/wave) +add_subdirectory(reconnect-2field) +add_subdirectory(shear-alfven-wave) +add_subdirectory(staggered_grid) +add_subdirectory(subsampling) +add_subdirectory(tokamak-2fluid) +add_subdirectory(uedge-benchmark) +add_subdirectory(wave-slab) diff --git a/examples/IMEX/advection-diffusion/CMakeLists.txt b/examples/IMEX/advection-diffusion/CMakeLists.txt new file mode 100644 index 0000000000..7570292fdd --- /dev/null +++ b/examples/IMEX/advection-diffusion/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(advection-diffusion LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(imex imex.cxx) +target_link_libraries(imex PRIVATE bout++::bout++) diff --git a/examples/IMEX/advection-reaction/CMakeLists.txt b/examples/IMEX/advection-reaction/CMakeLists.txt new file mode 100644 index 0000000000..0c0d2ec57f --- /dev/null +++ b/examples/IMEX/advection-reaction/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(advection-reaction LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(split_operator split_operator.cxx) +target_link_libraries(split_operator PRIVATE bout++::bout++) diff --git a/examples/IMEX/diffusion-nl/CMakeLists.txt b/examples/IMEX/diffusion-nl/CMakeLists.txt new file mode 100644 index 0000000000..757c1d29bc --- /dev/null +++ b/examples/IMEX/diffusion-nl/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(diffusion-nl LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(diffusion-nl diffusion-nl.cxx) +target_link_libraries(diffusion-nl PRIVATE bout++::bout++) diff --git a/examples/IMEX/drift-wave-constraint/CMakeLists.txt b/examples/IMEX/drift-wave-constraint/CMakeLists.txt new file mode 100644 index 0000000000..3ac0a9f45d --- /dev/null +++ b/examples/IMEX/drift-wave-constraint/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(drift-wave-constraint LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(drift-wave-constraint test-drift.cxx) +target_link_libraries(drift-wave-constraint PRIVATE bout++::bout++) diff --git a/examples/IMEX/drift-wave/CMakeLists.txt b/examples/IMEX/drift-wave/CMakeLists.txt new file mode 100644 index 0000000000..dbc469cf12 --- /dev/null +++ b/examples/IMEX/drift-wave/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(drift-wave LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(test-drift test-drift.cxx) +target_link_libraries(test-drift PRIVATE bout++::bout++) diff --git a/examples/advdiff/CMakeLists.txt b/examples/advdiff/CMakeLists.txt new file mode 100644 index 0000000000..04049fff69 --- /dev/null +++ b/examples/advdiff/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(advdiff LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(advdiff advdiff.cxx) +target_link_libraries(advdiff PRIVATE bout++::bout++) diff --git a/examples/advdiff2/CMakeLists.txt b/examples/advdiff2/CMakeLists.txt new file mode 100644 index 0000000000..c8f85bc21f --- /dev/null +++ b/examples/advdiff2/CMakeLists.txt @@ -0,0 +1,16 @@ +cmake_minimum_required(VERSION 3.13) + +project(advdiff2 LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(advdiff2 + advdiff.cxx + globals.cxx + globals.hxx + header.hxx + init.cxx + run.cxx) +target_link_libraries(advdiff2 PRIVATE bout++::bout++) diff --git a/examples/backtrace/CMakeLists.txt b/examples/backtrace/CMakeLists.txt new file mode 100644 index 0000000000..c52fde2647 --- /dev/null +++ b/examples/backtrace/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(backtrace LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(backtrace backtrace.cxx) +target_link_libraries(backtrace PRIVATE bout++::bout++) diff --git a/examples/blob2d-laplacexz/CMakeLists.txt b/examples/blob2d-laplacexz/CMakeLists.txt new file mode 100644 index 0000000000..0e83085d30 --- /dev/null +++ b/examples/blob2d-laplacexz/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(blob2d-laplacexz LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(blob2d-laplacexz blob2d.cxx) +target_link_libraries(blob2d-laplacexz PRIVATE bout++::bout++) diff --git a/examples/blob2d/CMakeLists.txt b/examples/blob2d/CMakeLists.txt index dee5d08e41..17b24ec1d6 100644 --- a/examples/blob2d/CMakeLists.txt +++ b/examples/blob2d/CMakeLists.txt @@ -2,6 +2,9 @@ cmake_minimum_required(VERSION 3.13) project(blob2d LANGUAGES CXX) -find_package(bout++ REQUIRED) +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + add_executable(blob2d blob2d.cxx) target_link_libraries(blob2d PRIVATE bout++::bout++) diff --git a/examples/boundary-conditions/advection/CMakeLists.txt b/examples/boundary-conditions/advection/CMakeLists.txt new file mode 100644 index 0000000000..b18808df26 --- /dev/null +++ b/examples/boundary-conditions/advection/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(advection LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(advection advection.cxx) +target_link_libraries(advection PRIVATE bout++::bout++) diff --git a/examples/conducting-wall-mode/CMakeLists.txt b/examples/conducting-wall-mode/CMakeLists.txt new file mode 100644 index 0000000000..f6d7e6b341 --- /dev/null +++ b/examples/conducting-wall-mode/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(conducting-wall-mode LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(conducting-wall-mode cwm.cxx) +target_link_libraries(conducting-wall-mode PRIVATE bout++::bout++) diff --git a/examples/conduction-snb/CMakeLists.txt b/examples/conduction-snb/CMakeLists.txt new file mode 100644 index 0000000000..9e86818a04 --- /dev/null +++ b/examples/conduction-snb/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(conduction-snb LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(conduction-snb conduction-snb.cxx) +target_link_libraries(conduction-snb PRIVATE bout++::bout++) diff --git a/examples/conduction/CMakeLists.txt b/examples/conduction/CMakeLists.txt new file mode 100644 index 0000000000..ad653172f2 --- /dev/null +++ b/examples/conduction/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(conduction LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(conduction conduction.cxx) +target_link_libraries(conduction PRIVATE bout++::bout++) diff --git a/examples/constraints/alfven-wave/CMakeLists.txt b/examples/constraints/alfven-wave/CMakeLists.txt new file mode 100644 index 0000000000..c979a04faa --- /dev/null +++ b/examples/constraints/alfven-wave/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(constraints-alfven-wave LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(constraints-alfven-wave alfven.cxx) +target_link_libraries(constraints-alfven-wave PRIVATE bout++::bout++) diff --git a/examples/constraints/laplace-dae/CMakeLists.txt b/examples/constraints/laplace-dae/CMakeLists.txt new file mode 100644 index 0000000000..42257f1652 --- /dev/null +++ b/examples/constraints/laplace-dae/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(constraints-laplace-dae LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(constraints-laplace-dae laplace_dae.cxx) +target_link_libraries(constraints-laplace-dae PRIVATE bout++::bout++) diff --git a/examples/dalf3/CMakeLists.txt b/examples/dalf3/CMakeLists.txt new file mode 100644 index 0000000000..3d6a19d174 --- /dev/null +++ b/examples/dalf3/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(dalf3 LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(dalf3 dalf3.cxx) +target_link_libraries(dalf3 PRIVATE bout++::bout++) diff --git a/examples/dalf3/README.md b/examples/dalf3/README.md new file mode 100644 index 0000000000..a3751c0ef1 --- /dev/null +++ b/examples/dalf3/README.md @@ -0,0 +1,14 @@ +DALF3 model +=========== + +Four-field model for electron pressure, vorticity, A|| and parallel +velocity + +References: +- B.Scott, Plasma Phys. Contr. Fusion 39 (1997) 1635 +- B.Scott, "Drift Wave versus Interchange Turbulence in Tokamak + Geometry: Linear versus Nonlinear Mode Structure" + arXiv:physics/0207126 Feb 2001 + +NOTE: The normalisation used here is different to in the above +papers. See manual in doc/ subdirectory for details diff --git a/examples/eigen-box/CMakeLists.txt b/examples/eigen-box/CMakeLists.txt new file mode 100644 index 0000000000..9f14bf0ec6 --- /dev/null +++ b/examples/eigen-box/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(eigen-box LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(eigen-box eigen-box.cxx) +target_link_libraries(eigen-box PRIVATE bout++::bout++) diff --git a/examples/elm-pb/CMakeLists.txt b/examples/elm-pb/CMakeLists.txt new file mode 100644 index 0000000000..bd43246a32 --- /dev/null +++ b/examples/elm-pb/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(elm_pb LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(elm_pb elm_pb.cxx) +target_link_libraries(elm_pb PRIVATE bout++::bout++) diff --git a/examples/em-drift/CMakeLists.txt b/examples/em-drift/CMakeLists.txt new file mode 100644 index 0000000000..c9f3e0fb86 --- /dev/null +++ b/examples/em-drift/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(em-drift LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(em-drift 2fluid.cxx) +target_link_libraries(em-drift PRIVATE bout++::bout++) diff --git a/examples/fci-wave-logn/CMakeLists.txt b/examples/fci-wave-logn/CMakeLists.txt new file mode 100644 index 0000000000..5fd558c51d --- /dev/null +++ b/examples/fci-wave-logn/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(fci-wave-logn LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(fci-wave-logn fci-wave.cxx) +target_link_libraries(fci-wave-logn PRIVATE bout++::bout++) diff --git a/examples/fci-wave/CMakeLists.txt b/examples/fci-wave/CMakeLists.txt new file mode 100644 index 0000000000..d6ac77d0a0 --- /dev/null +++ b/examples/fci-wave/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(fci-wave LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(fci-wave fci-wave.cxx) +target_link_libraries(fci-wave PRIVATE bout++::bout++) diff --git a/examples/finite-volume/diffusion/CMakeLists.txt b/examples/finite-volume/diffusion/CMakeLists.txt new file mode 100644 index 0000000000..0092fee0c6 --- /dev/null +++ b/examples/finite-volume/diffusion/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(finite-volume-diffusion LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(finite-volume-diffusion diffusion.cxx) +target_link_libraries(finite-volume-diffusion PRIVATE bout++::bout++) diff --git a/examples/finite-volume/fluid/CMakeLists.txt b/examples/finite-volume/fluid/CMakeLists.txt new file mode 100644 index 0000000000..5164b830e1 --- /dev/null +++ b/examples/finite-volume/fluid/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(finite-volume-fluid LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(finite-volume-fluid fluid.cxx) +target_link_libraries(finite-volume-fluid PRIVATE bout++::bout++) diff --git a/examples/finite-volume/test/CMakeLists.txt b/examples/finite-volume/test/CMakeLists.txt new file mode 100644 index 0000000000..f73b809b51 --- /dev/null +++ b/examples/finite-volume/test/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(finite-volume-test LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(finite-volume-test finite_volume.cxx) +target_link_libraries(finite-volume-test PRIVATE bout++::bout++) diff --git a/examples/gas-compress/CMakeLists.txt b/examples/gas-compress/CMakeLists.txt new file mode 100644 index 0000000000..7246303fd0 --- /dev/null +++ b/examples/gas-compress/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(gas-compress LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(gas-compress gas_compress.cxx gas_compress.hxx) +target_link_libraries(gas-compress PRIVATE bout++::bout++) diff --git a/examples/gravity_reduced/CMakeLists.txt b/examples/gravity_reduced/CMakeLists.txt new file mode 100644 index 0000000000..18f4aa95df --- /dev/null +++ b/examples/gravity_reduced/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(gravity_reduced LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(gravity_reduced gravity_reduced.cxx) +target_link_libraries(gravity_reduced PRIVATE bout++::bout++) diff --git a/examples/gyro-gem/CMakeLists.txt b/examples/gyro-gem/CMakeLists.txt new file mode 100644 index 0000000000..6176aa2b52 --- /dev/null +++ b/examples/gyro-gem/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(gyro-gem LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(gyro-gem gem.cxx) +target_link_libraries(gyro-gem PRIVATE bout++::bout++) diff --git a/examples/hasegawa-wakatani/CMakeLists.txt b/examples/hasegawa-wakatani/CMakeLists.txt new file mode 100644 index 0000000000..2617b51c28 --- /dev/null +++ b/examples/hasegawa-wakatani/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(hasegama-wakatani LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(hasegama-wakatani hw.cxx) +target_link_libraries(hasegama-wakatani PRIVATE bout++::bout++) diff --git a/examples/invertable_operator/CMakeLists.txt b/examples/invertable_operator/CMakeLists.txt new file mode 100644 index 0000000000..23f4ed9056 --- /dev/null +++ b/examples/invertable_operator/CMakeLists.txt @@ -0,0 +1,13 @@ +cmake_minimum_required(VERSION 3.13) + +project(invertable_operator LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +if(NOT BOUT_HAS_PETSC) + message(FATAL_ERROR "This example requires PETSc. Please compile BOUT++ with PETSc") +endif() +add_executable(invertable_operator invertable_operator.cxx) +target_link_libraries(invertable_operator PRIVATE bout++::bout++) diff --git a/examples/jorek-compare/CMakeLists.txt b/examples/jorek-compare/CMakeLists.txt new file mode 100644 index 0000000000..b0c4766adb --- /dev/null +++ b/examples/jorek-compare/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(jorek-compare LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(jorek-compare jorek_compare.cxx) +target_link_libraries(jorek-compare PRIVATE bout++::bout++) diff --git a/examples/lapd-drift/CMakeLists.txt b/examples/lapd-drift/CMakeLists.txt new file mode 100644 index 0000000000..940be3c681 --- /dev/null +++ b/examples/lapd-drift/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(lapd-drift LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(lapd-drift lapd_drift.cxx) +target_link_libraries(lapd-drift PRIVATE bout++::bout++) diff --git a/examples/laplacexy/alfven-wave/CMakeLists.txt b/examples/laplacexy/alfven-wave/CMakeLists.txt new file mode 100644 index 0000000000..f2444ac0c7 --- /dev/null +++ b/examples/laplacexy/alfven-wave/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(laplacexy-alfven-wave LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(laplacexy-alfven-wave alfven.cxx) +target_link_libraries(laplacexy-alfven-wave PRIVATE bout++::bout++) diff --git a/examples/laplacexy/laplace_perp/CMakeLists.txt b/examples/laplacexy/laplace_perp/CMakeLists.txt new file mode 100644 index 0000000000..d29bc8d3c8 --- /dev/null +++ b/examples/laplacexy/laplace_perp/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(laplacexy-laplace_perp LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(laplacexy-laplace_perp test.cxx) +target_link_libraries(laplacexy-laplace_perp PRIVATE bout++::bout++) diff --git a/examples/laplacexy/simple/CMakeLists.txt b/examples/laplacexy/simple/CMakeLists.txt new file mode 100644 index 0000000000..08dd40ef01 --- /dev/null +++ b/examples/laplacexy/simple/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(laplacexy-simple LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(laplacexy-simple test-laplacexy.cxx) +target_link_libraries(laplacexy-simple PRIVATE bout++::bout++) diff --git a/examples/monitor-newapi/CMakeLists.txt b/examples/monitor-newapi/CMakeLists.txt new file mode 100644 index 0000000000..481f1efe87 --- /dev/null +++ b/examples/monitor-newapi/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(monitor-newapi LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(monitor-newapi monitor.cxx) +target_link_libraries(monitor-newapi PRIVATE bout++::bout++) diff --git a/examples/monitor/CMakeLists.txt b/examples/monitor/CMakeLists.txt new file mode 100644 index 0000000000..a5dc0af31d --- /dev/null +++ b/examples/monitor/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(monitor LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(monitor monitor.cxx) +target_link_libraries(monitor PRIVATE bout++::bout++) diff --git a/examples/orszag-tang/CMakeLists.txt b/examples/orszag-tang/CMakeLists.txt new file mode 100644 index 0000000000..9aa39b8a16 --- /dev/null +++ b/examples/orszag-tang/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(orszag-tang LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(orszag-tang mhd.cxx) +target_link_libraries(orszag-tang PRIVATE bout++::bout++) diff --git a/examples/preconditioning/wave/CMakeLists.txt b/examples/preconditioning/wave/CMakeLists.txt new file mode 100644 index 0000000000..93a9530ad7 --- /dev/null +++ b/examples/preconditioning/wave/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(preconditioning-wave LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(preconditioning-wave test_precon.cxx) +target_link_libraries(preconditioning-wave PRIVATE bout++::bout++) diff --git a/examples/reconnect-2field/CMakeLists.txt b/examples/reconnect-2field/CMakeLists.txt new file mode 100644 index 0000000000..08145414ab --- /dev/null +++ b/examples/reconnect-2field/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(reconnect-2field LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(reconnect-2field 2field.cxx) +target_link_libraries(reconnect-2field PRIVATE bout++::bout++) diff --git a/examples/shear-alfven-wave/CMakeLists.txt b/examples/shear-alfven-wave/CMakeLists.txt new file mode 100644 index 0000000000..a9295db3d5 --- /dev/null +++ b/examples/shear-alfven-wave/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(shear-alfven-wave LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(shear-alfven-wave 2fluid.cxx) +target_link_libraries(shear-alfven-wave PRIVATE bout++::bout++) diff --git a/examples/staggered_grid/CMakeLists.txt b/examples/staggered_grid/CMakeLists.txt new file mode 100644 index 0000000000..3554d9536d --- /dev/null +++ b/examples/staggered_grid/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(staggered_grid LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(staggered_grid test_staggered.cxx) +target_link_libraries(staggered_grid PRIVATE bout++::bout++) diff --git a/examples/subsampling/CMakeLists.txt b/examples/subsampling/CMakeLists.txt new file mode 100644 index 0000000000..6316837429 --- /dev/null +++ b/examples/subsampling/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(subsampling LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(subsampling monitor.cxx) +target_link_libraries(subsampling PRIVATE bout++::bout++) diff --git a/examples/tokamak-2fluid/CMakeLists.txt b/examples/tokamak-2fluid/CMakeLists.txt new file mode 100644 index 0000000000..246ca61dab --- /dev/null +++ b/examples/tokamak-2fluid/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(tokamak-2fluid LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(tokamak-2fluid 2fluid.cxx) +target_link_libraries(tokamak-2fluid PRIVATE bout++::bout++) diff --git a/examples/uedge-benchmark/CMakeLists.txt b/examples/uedge-benchmark/CMakeLists.txt new file mode 100644 index 0000000000..1fc82e580d --- /dev/null +++ b/examples/uedge-benchmark/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(uedge-benchmark LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(uedge-benchmark ue_bmark.cxx) +target_link_libraries(uedge-benchmark PRIVATE bout++::bout++) diff --git a/examples/wave-slab/CMakeLists.txt b/examples/wave-slab/CMakeLists.txt new file mode 100644 index 0000000000..f5d5037341 --- /dev/null +++ b/examples/wave-slab/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.13) + +project(wave-slab LANGUAGES CXX) + +if (NOT TARGET bout++::bout++) + find_package(bout++ REQUIRED) +endif() + +add_executable(wave-slab wave_slab.cxx) +target_link_libraries(wave-slab PRIVATE bout++::bout++) From 9773bbffaae66e3bc806149d1cca6bb7afcf44b3 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 24 Mar 2020 11:27:36 +0000 Subject: [PATCH 251/428] Merge [laplace] sections in input file --- examples/conducting-wall-mode/data/BOUT.inp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/conducting-wall-mode/data/BOUT.inp b/examples/conducting-wall-mode/data/BOUT.inp index 30902575a2..700e0264df 100644 --- a/examples/conducting-wall-mode/data/BOUT.inp +++ b/examples/conducting-wall-mode/data/BOUT.inp @@ -52,6 +52,9 @@ upwind = C4 all_terms = true laplace_nonuniform = true +inner_boundary_flags = 2 # INVERT_AC_GRAD +outer_boundary_flags = 2 # INVERT_AC_GRAD + ################################################## # Solver settings @@ -80,10 +83,6 @@ bout_exb = true # Use the BOUT-06 subset of ExB terms filter_z = true # Filter in Z filter_z_mode = 1 # Keep this Z harmonic -[laplace] -inner_boundary_flags = 2 # INVERT_AC_GRAD -outer_boundary_flags = 2 # INVERT_AC_GRAD - ################################################## # settings for individual variables # The section "All" defines default settings for all variables From 067d2370f50dcc8abcb24f3d6f14c5b17ca47f75 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 15 Apr 2020 16:08:11 +0100 Subject: [PATCH 252/428] Minor fix for laplace-dae: use references for Options --- examples/constraints/laplace-dae/laplace_dae.cxx | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/constraints/laplace-dae/laplace_dae.cxx b/examples/constraints/laplace-dae/laplace_dae.cxx index 6dc93839ed..ec268286c9 100644 --- a/examples/constraints/laplace-dae/laplace_dae.cxx +++ b/examples/constraints/laplace-dae/laplace_dae.cxx @@ -28,14 +28,13 @@ int physics_init(bool UNUSED(restarting)) { // Give the solver two RHS functions // Get options - auto globalOptions = Options::root(); - auto options = globalOptions["dae"]; + auto& globalOptions = Options::root(); + auto& options = globalOptions["dae"]; constraint = options["constraint"].withDefault(true); // Create a solver for the Laplacian phiSolver = Laplacian::create(); - // Just solving one variable, U SOLVE_FOR2(U, Apar); if(constraint) { From 16b67cca5cb74a84bad1a86e605b1666c9bc0fe0 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 15 Apr 2020 16:08:43 +0100 Subject: [PATCH 253/428] Minor LaTeX fix for finite volume docs, plus add links --- manual/sphinx/user_docs/differential_operators.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/manual/sphinx/user_docs/differential_operators.rst b/manual/sphinx/user_docs/differential_operators.rst index bab4c35262..19e19bd8a5 100644 --- a/manual/sphinx/user_docs/differential_operators.rst +++ b/manual/sphinx/user_docs/differential_operators.rst @@ -527,7 +527,8 @@ cell is added to another. There are several caveats to this: * There will always be a small rounding error, even with double precision. -The methods can be used by including the header:: +The methods can be used by including the +:doc:`header<../_breathe_autogen/file/fv__ops_8cxx>`:: #include "bout/fv_ops.hxx" @@ -535,7 +536,7 @@ The methods can be used by including the header:: **Note** The methods are defined in a namespace ``FV``. Some methods (those with templates) are defined in the header, but others -are defined in ``src/mesh/fv_ops.cxx``. +are defined in :doc:`src/mesh/fv_ops.cxx<../_breathe_autogen/file/fv__ops_8cxx>`. Parallel divergence ``Div_par`` @@ -615,7 +616,7 @@ The parallel diffusion operator calculates :math:`\nabla_{||}\left[k\partial_||\ bool bndry_flux=true); -This is done by calculating the flux :math:`k\partial_||\left(f\right)` on cell boundaries +This is done by calculating the flux :math:`k\partial_{||}\left(f\right)` on cell boundaries using central differencing. From a9f25326692039ad59edc575295994e27414099c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 15 Apr 2020 17:02:13 +0100 Subject: [PATCH 254/428] Remove unused example source file --- examples/advdiff2/CMakeLists.txt | 1 - examples/advdiff2/advdiff.cxx | 57 -------------------------------- 2 files changed, 58 deletions(-) delete mode 100644 examples/advdiff2/advdiff.cxx diff --git a/examples/advdiff2/CMakeLists.txt b/examples/advdiff2/CMakeLists.txt index c8f85bc21f..c429127b1a 100644 --- a/examples/advdiff2/CMakeLists.txt +++ b/examples/advdiff2/CMakeLists.txt @@ -7,7 +7,6 @@ if (NOT TARGET bout++::bout++) endif() add_executable(advdiff2 - advdiff.cxx globals.cxx globals.hxx header.hxx diff --git a/examples/advdiff2/advdiff.cxx b/examples/advdiff2/advdiff.cxx deleted file mode 100644 index f01581779f..0000000000 --- a/examples/advdiff2/advdiff.cxx +++ /dev/null @@ -1,57 +0,0 @@ -/******************************************************************* - * Advection-Diffusion Example - * - * MVU 19-july-2011 - *******************************************************************/ - -#include -#include -#include - -// Evolving variables -Field3D V; - - -int physics_init(bool restarting) -{ - // 2D initial profiles - Field2D V0; - - - // Read initial conditions - - mesh->get(V0, "V0"); - mesh->get(mesh->dx, "dx"); - mesh->get(mesh->dy, "dy"); - - - // read options - - - // Set evolving variables - bout_solve(V, "V"); - - - if(!restarting) { - // Set variables to these values (+ the initial perturbation) - // NOTE: This must be after the calls to bout_solve - V += V0; - } - - return 0; -} - - - -int physics_run(BoutReal t) -{ - // Run communications - mesh->communicate(V); - - - //ddt(V) = D2DX2(V) + 0.5*DDX(V) + D2DY2(V); - ddt(V) = DDX(V); - - - return 0; -} From e53a4c3d99ebb198c87148109a5d95823ba2c067 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 15 Apr 2020 16:07:51 +0100 Subject: [PATCH 255/428] Add READMEs for remaining examples --- examples/2Dturbulence_multigrid/README.md | 7 ++++ examples/6field-simple/README.md | 7 ++++ examples/IMEX/advection-reaction/README.md | 17 ++++++++ examples/backtrace/README.md | 42 +++++++++++++++++++ examples/blob2d-laplacexz/README.md | 12 ++++++ examples/blob2d/README | 12 ------ examples/blob2d/README.md | 15 +++++++ examples/conduction/README.md | 8 ++++ examples/constraints/alfven-wave/README.md | 22 ++++++++++ examples/constraints/laplace-dae/README.md | 5 +++ examples/finite-volume/README.md | 8 ++++ examples/finite-volume/diffusion/README.md | 7 ++++ examples/finite-volume/fluid/README.md | 7 +++- examples/gravity_reduced/README.md | 8 ++++ examples/gyro-gem/README.md | 9 ++++ examples/hasegawa-wakatani/README.md | 9 ++++ examples/invertable_operator/README.md | 7 ++++ examples/jorek-compare/README.md | 4 ++ .../lapd-drift/{AREADME.txt => README.md} | 3 ++ examples/laplacexy/README.md | 4 ++ examples/laplacexy/alfven-wave/README.md | 21 +++++----- examples/laplacexy/simple/README.md | 39 +++++++++-------- examples/monitor-newapi/README.md | 4 ++ examples/monitor/README.md | 4 ++ examples/preconditioning/wave/README.md | 4 ++ examples/reconnect-2field/README.md | 4 ++ examples/tokamak-2fluid/{README => README.md} | 13 ++---- examples/uedge-benchmark/README.md | 10 +++++ examples/wave-slab/README.md | 8 ++++ examples/zoidberg/README.md | 16 +++++++ 30 files changed, 282 insertions(+), 54 deletions(-) create mode 100644 examples/2Dturbulence_multigrid/README.md create mode 100644 examples/6field-simple/README.md create mode 100644 examples/IMEX/advection-reaction/README.md create mode 100644 examples/backtrace/README.md create mode 100644 examples/blob2d-laplacexz/README.md delete mode 100644 examples/blob2d/README create mode 100644 examples/blob2d/README.md create mode 100644 examples/conduction/README.md create mode 100644 examples/constraints/alfven-wave/README.md create mode 100644 examples/constraints/laplace-dae/README.md create mode 100644 examples/finite-volume/README.md create mode 100644 examples/finite-volume/diffusion/README.md create mode 100644 examples/gravity_reduced/README.md create mode 100644 examples/gyro-gem/README.md create mode 100644 examples/hasegawa-wakatani/README.md create mode 100644 examples/invertable_operator/README.md create mode 100644 examples/jorek-compare/README.md rename examples/lapd-drift/{AREADME.txt => README.md} (96%) create mode 100644 examples/laplacexy/README.md create mode 100644 examples/monitor-newapi/README.md create mode 100644 examples/monitor/README.md create mode 100644 examples/preconditioning/wave/README.md create mode 100644 examples/reconnect-2field/README.md rename examples/tokamak-2fluid/{README => README.md} (69%) create mode 100644 examples/uedge-benchmark/README.md create mode 100644 examples/wave-slab/README.md create mode 100644 examples/zoidberg/README.md diff --git a/examples/2Dturbulence_multigrid/README.md b/examples/2Dturbulence_multigrid/README.md new file mode 100644 index 0000000000..5edc4076ea --- /dev/null +++ b/examples/2Dturbulence_multigrid/README.md @@ -0,0 +1,7 @@ +2D Turbulence using multigrid +============================= + +This examples demonstrates the use of the multigrid Laplacian inversion solver +for a 2D turbulence based on the physics model implemented in `ESEL` +(Interchange turbulence in the TCV scrape-off layer, Garcia et al, PPCF 2006, +DOI: [10.1088/0741-3335/48/1/L01](https://doi.org/10.1088/0741-3335/48/1/L01)) diff --git a/examples/6field-simple/README.md b/examples/6field-simple/README.md new file mode 100644 index 0000000000..d70bff2eb0 --- /dev/null +++ b/examples/6field-simple/README.md @@ -0,0 +1,7 @@ +6-Field Simple +============== + +High-Beta Flute-Reduced MHD with 6-field of (`N_i`, `T_e`, `T_i`, `U`, `Psi`, +`Vipar`). This is a simple model for the BOUT++ workshop 2013 hand-on +exercise. Basically the same as Hazeltine-Meiss but different +normalisations. `diffusion_par` can turn on the parallel thermal conductivity diff --git a/examples/IMEX/advection-reaction/README.md b/examples/IMEX/advection-reaction/README.md new file mode 100644 index 0000000000..10791c995d --- /dev/null +++ b/examples/IMEX/advection-reaction/README.md @@ -0,0 +1,17 @@ +Advection-Reaction equation +=========================== + +Split into advective and reaction parts. Can be simulated using unsplit methods +(the two parts are just combined), but intended for testing split schemes. + +Currently one of the RHS functions has to be called `physics_run`, so here +`physics_run` contains advection term only. + +Grid file simple_xz.nc contains: +- `nx = 68` +- `ny = 5` +- `dx = 1. / 64` so X domain has length 1 + +In `data/BOUT.inp`: +- Domain is set to periodic in X +- The Z domain is set to size 1 (`1 / 2*pi`-th of a torus) diff --git a/examples/backtrace/README.md b/examples/backtrace/README.md new file mode 100644 index 0000000000..6901ec15c2 --- /dev/null +++ b/examples/backtrace/README.md @@ -0,0 +1,42 @@ +Backtrace Example +================= + +This demonstrates what the exception backtrace looks like when something goes +wrong in a physics model or in BOUT++. Requires both backtrace to be enabled +(done by default) and debug symbols (`--enable-debug` with `configure` or +`-DCMAKE_BUILD_TYPE=Debug` with `CMake` in both BOUT++ _and_ this example). + +The output should look something like: + +``` +... +c is inf +Error encountered +====== Exception path ====== +[bt] #11 ./build/backtrace() [0x42ee5e] +_start at ??:? +[bt] #10 /lib64/libc.so.6(__libc_start_main+0xf3) [0x7f7d8bfc11a3] +__libc_start_main at ??:? +[bt] #9 ./build/backtrace() [0x42f08f] +main at /path/to/BOUT++/include/boutmain.hxx:91 (discriminator 9) +[bt] #8 /path/to/BOUT++/build/libbout++.so(_ZN6Solver8setModelEP12PhysicsModel+0xb5) [0x7f7d8d1ce121] +Solver::setModel(PhysicsModel*) at /path/to/BOUT++/build/../src/solver/solver.cxx:92 +[bt] #7 /path/to/BOUT++/build/libbout++.so(_ZN12PhysicsModel10initialiseEP6Solver+0xbe) [0x7f7d8d1d6bf6] +PhysicsModel::initialise(Solver*) at /path/to/BOUT++/build/../include/bout/physicsmodel.hxx:79 (discriminator 5) +[bt] #6 ./build/backtrace() [0x433ceb] +LegacyModel::init(bool) at /path/to/BOUT++/include/boutmain.hxx:63 +[bt] #5 ./build/backtrace() [0x42f2f6] +physics_init(bool) at /path/to/BOUT++/examples/backtrace/backtrace.cxx:28 +[bt] #4 ./build/backtrace() [0x42f2dd] +f3() at /path/to/BOUT++/examples/backtrace/backtrace.cxx:22 +[bt] #3 ./build/backtrace() [0x42f2cc] +f2(int) at /path/to/BOUT++/examples/backtrace/backtrace.cxx:18 +[bt] #2 ./build/backtrace() [0x42f294] +f1() at /path/to/BOUT++/examples/backtrace/backtrace.cxx:14 (discriminator 2) +[bt] #1 ./build/backtrace(_ZN13BoutExceptionC1IA19_cJEEERKT_DpRKT0_+0x37) [0x4343c5] +BoutException::BoutException(char const (&) [19]) at /path/to/BOUT++/include/boutexception.hxx:26 (discriminator 2) +====== Back trace ====== + +====== Exception thrown ====== +Tomatoes are red? +``` diff --git a/examples/blob2d-laplacexz/README.md b/examples/blob2d-laplacexz/README.md new file mode 100644 index 0000000000..1482b4cccb --- /dev/null +++ b/examples/blob2d-laplacexz/README.md @@ -0,0 +1,12 @@ +blob2d using `LaplaceXZ` +======================== + +This is very similar to the 2D drift-reduced [blob2d +model](../examples/README.md), except that the perpendicular Laplacian inversion +solver for `phi` uses [`LaplaceXZ`][laplacexz] instead of +[`Laplace`][laplace]. See the linked documentation for details on the +differences + + +[laplacexz]: https://bout-dev.readthedocs.io/en/latest/user_docs/laplacian.html#laplacexz +[laplace]: https://bout-dev.readthedocs.io/en/latest/user_docs/laplacian.html#laplacian-inversion diff --git a/examples/blob2d/README b/examples/blob2d/README deleted file mode 100644 index 453ad45f67..0000000000 --- a/examples/blob2d/README +++ /dev/null @@ -1,12 +0,0 @@ -This module allows 2D simulations of blobs in a slab geometry with curvature - -The model is gyro-bohm normalized (length scales normalized to rho_s, time normalized to Omega_i) - -Input test cases -================ - -delta_0.25 - Blob with size 0.25 * delta_* -delta_1 - Blob with size delta_* -delta_10 - Blob with size 10 * delta_* - - diff --git a/examples/blob2d/README.md b/examples/blob2d/README.md new file mode 100644 index 0000000000..b454df35be --- /dev/null +++ b/examples/blob2d/README.md @@ -0,0 +1,15 @@ +blob2d +====== + +This module allows 2D simulations of drift-reduced blobs in a slab geometry with +curvature. + +The model is gyro-bohm normalized (length scales normalized to `rho_s`, time +normalized to `Omega_i`) + +Input test cases +---------------- + +- `delta_0.25` - Blob with size `0.25 * delta_*` +- `delta_1` - Blob with size `delta_*` +- `delta_10` - Blob with size `10 * delta_*` diff --git a/examples/conduction/README.md b/examples/conduction/README.md new file mode 100644 index 0000000000..47e5a9d0d5 --- /dev/null +++ b/examples/conduction/README.md @@ -0,0 +1,8 @@ +Temperature conduction +====================== + +This example solves the 1D temperature conduction problem. See the +[documentation][conduction] for a line-by-line walkthrough of this model. + + +[conduction]: https://bout-dev.readthedocs.io/en/latest/user_docs/physics_models.html#heat-conduction diff --git a/examples/constraints/alfven-wave/README.md b/examples/constraints/alfven-wave/README.md new file mode 100644 index 0000000000..cedff75d78 --- /dev/null +++ b/examples/constraints/alfven-wave/README.md @@ -0,0 +1,22 @@ +Alfvén wave +=========== + +This is a simple model of an Alfvén wave, evolving the vorticity and parallel +electromagnetic potential, which calculates the electrostatic potential by +constraining `Laplace_perp(phi) - voricity` to be zero: + +``` + // Calculate parallel current from Apar + jpar = Delp2(Apar / (0.5*beta_e)); + + // Electrostatic potential, calculate by constraining + // Laplace_perp(phi) - Vort + // to be zero + ddt(phi) = Delp2(phi) - Vort; + + // Vorticity equation + ddt(Vort) = Div_par(jpar); + + // Parallel electric field + ddt(Apar) = Grad_par(phi); +``` diff --git a/examples/constraints/laplace-dae/README.md b/examples/constraints/laplace-dae/README.md new file mode 100644 index 0000000000..11bedf56ad --- /dev/null +++ b/examples/constraints/laplace-dae/README.md @@ -0,0 +1,5 @@ +Laplace DAE +=========== + +Demonstrates a simple Laplacian inversion using either constraints or +the usual `Laplacian`. Use `dae:constraint` to toggle the method used. diff --git a/examples/finite-volume/README.md b/examples/finite-volume/README.md new file mode 100644 index 0000000000..5a1b20d9c5 --- /dev/null +++ b/examples/finite-volume/README.md @@ -0,0 +1,8 @@ +Finite Volume methods +===================== + +The different models here demonstrate various uses of the finite volume +differencing operators. See [the finite volume documentation][fvdocs] for more +information. + +[fvdocs]: https://bout-dev.readthedocs.io/en/latest/user_docs/differential_operators.html#finite-volume-conservative-finite-difference-methods diff --git a/examples/finite-volume/diffusion/README.md b/examples/finite-volume/diffusion/README.md new file mode 100644 index 0000000000..b7813a7e80 --- /dev/null +++ b/examples/finite-volume/diffusion/README.md @@ -0,0 +1,7 @@ +Finite Volume parallel diffusion +================================ + +This is a simple example showing how to use the finite volume [parallel +diffusion][diffusion] operator. + +[diffusion]: https://bout-dev.readthedocs.io/en/latest/user_docs/differential_operators.html#parallel-diffusion diff --git a/examples/finite-volume/fluid/README.md b/examples/finite-volume/fluid/README.md index 5eae3fc8b9..d112da3039 100644 --- a/examples/finite-volume/fluid/README.md +++ b/examples/finite-volume/fluid/README.md @@ -17,9 +17,12 @@ Momentum equation: d(nv)/dt + Div(nv v) = -Grad(p) The advection terms (divergences on the left) are solved using -the FV::Div_par function in `bout/fv_ops.hxx`. This uses the MC +the `FV::Div_par` function in `bout/fv_ops.hxx`. This uses the MC slope limiter, together with a Lax flux at the local sound speed -to provide dissipation and minimise unphysical oscillations. +to provide dissipation and minimise unphysical oscillations. + +See also: +https://bout-dev.readthedocs.io/en/latest/user_docs/differential_operators.html#parallel-divergence-div-par MMS test -------- diff --git a/examples/gravity_reduced/README.md b/examples/gravity_reduced/README.md new file mode 100644 index 0000000000..53c1a322b1 --- /dev/null +++ b/examples/gravity_reduced/README.md @@ -0,0 +1,8 @@ +Flute-Reduced with Gravity +========================== + +Flute-Reduced MHD - including gravity term instead of curvature. Basically the +same as Hazeltine-Meiss but different normalisations and have gravity intead of +curvature. Evolving Vorticity U, Parallel electric field Psi, Parallel velocity +Vpar, Pressure p, and density rho. Have included compressional terms in Vpar and +in pressure and density evolution equations. diff --git a/examples/gyro-gem/README.md b/examples/gyro-gem/README.md new file mode 100644 index 0000000000..23d04a45ef --- /dev/null +++ b/examples/gyro-gem/README.md @@ -0,0 +1,9 @@ +GEM Gyro-fluid model +==================== + +6 moments for each species + +"GEM - An Energy Conserving Electromagnetic Gyrofluid Model" +by Bruce D Scott. arXiv:physics/0501124v1 23 Jan 2005 + +This version uses global parameters for collisionality etc. diff --git a/examples/hasegawa-wakatani/README.md b/examples/hasegawa-wakatani/README.md new file mode 100644 index 0000000000..c9b270cbd6 --- /dev/null +++ b/examples/hasegawa-wakatani/README.md @@ -0,0 +1,9 @@ +Hasegawa-Wakatani +================= + +The Hasegawa-Wakatani equations, implemented using Poisson brackets `[]`: + + ddt(n) = -[phi, n] + alpha*(phi - n) - kappa*DDZ(phi) + ddt(vorticity) = -[phi, vorticity] + alpha*(phi - n) + +where `alpha`, `kappa` are constants diff --git a/examples/invertable_operator/README.md b/examples/invertable_operator/README.md new file mode 100644 index 0000000000..de767868dd --- /dev/null +++ b/examples/invertable_operator/README.md @@ -0,0 +1,7 @@ +Invertable Operators +==================== + +Demonstrates usage of the `InvertableOperator`. See the [documentation][docs] +for more information. + +[docs]: https://bout-dev.readthedocs.io/en/latest/user_docs/invertable_operator.html diff --git a/examples/jorek-compare/README.md b/examples/jorek-compare/README.md new file mode 100644 index 0000000000..2866f4536f --- /dev/null +++ b/examples/jorek-compare/README.md @@ -0,0 +1,4 @@ +JOREK Comparison +================ + +Implements a similar set of equations to [JOREK](https://www.jorek.eu) diff --git a/examples/lapd-drift/AREADME.txt b/examples/lapd-drift/README.md similarity index 96% rename from examples/lapd-drift/AREADME.txt rename to examples/lapd-drift/README.md index f1ec85486b..a29fcf1a63 100644 --- a/examples/lapd-drift/AREADME.txt +++ b/examples/lapd-drift/README.md @@ -1,3 +1,6 @@ +2-fluid Turbulence in a Linear device +===================================== + Benchmark cases for comparisons of BOUT 06 vs BOUT++ results Electrostatic drift wave diff --git a/examples/laplacexy/README.md b/examples/laplacexy/README.md new file mode 100644 index 0000000000..a172083e38 --- /dev/null +++ b/examples/laplacexy/README.md @@ -0,0 +1,4 @@ +`LaplaceXY` Examples +==================== + +These examples demonstrate various uses of the `LaplaceXY` inversion operator diff --git a/examples/laplacexy/alfven-wave/README.md b/examples/laplacexy/alfven-wave/README.md index 8d3b09ec7e..eff9c6c65d 100644 --- a/examples/laplacexy/alfven-wave/README.md +++ b/examples/laplacexy/alfven-wave/README.md @@ -3,20 +3,20 @@ Alfven wave test case Solves equations for the vorticity and the parallel electric field: -d/dt (Vort) = Div_par(jpar) -d/dt (A||) = Grad_par(phi) + d/dt (Vort) = Div_par(jpar) + d/dt (A||) = Grad_par(phi) -where jpar = Laplace_perp(A||) and Vort = Laplace_perp(phi) +where `jpar` = `Laplace_perp(A||)` and `Vort = Laplace_perp(phi)` Switches in the input file change between forms of these operators: -* split_n0 If true, the axisymmetric component (n=0) is solved - separately, using LaplaceXY. This includes the poloidal (Y) - derivatives in the vorticity inversion. -* newXZsolver If true, the LaplaceXZ solver is used for the n!=0 components - rather than the older Laplacian solver -* laplace_perp If true, Laplace_perp is used to calculate jpar from A||. - otherwise Delp2 is used. +* `split_n0` If true, the axisymmetric component (n=0) is solved + separately, using `LaplaceXY`. This includes the poloidal + (Y) derivatives in the vorticity inversion. +* `newXZsolver`: If true, the LaplaceXZ solver is used for the `n != 0` + components rather than the older Laplacian solver +* `laplace_perp`: If true, `Laplace_perp` is used to calculate `jpar` from + `A||`, otherwise `Delp2` is used. Test cases ---------- @@ -28,4 +28,3 @@ Circular cross-section, large aspect ratio plasma: Tokamak X-point geometry $ mpirun -np 8 ./alfven -d data - diff --git a/examples/laplacexy/simple/README.md b/examples/laplacexy/simple/README.md index 1a98acec4e..ce7cbebf00 100644 --- a/examples/laplacexy/simple/README.md +++ b/examples/laplacexy/simple/README.md @@ -12,23 +12,22 @@ Run with which should print the KSP norms from PETSc: - 0 KSP Residual norm 5.656854249492e+00 - 1 KSP Residual norm 4.732163974221e+00 - 2 KSP Residual norm 4.084280618934e+00 - 3 KSP Residual norm 3.390335900434e+00 - 4 KSP Residual norm 2.980304269384e+00 - 5 KSP Residual norm 2.583427730146e+00 - 6 KSP Residual norm 2.320399960793e+00 - 7 KSP Residual norm 2.059145598820e+00 - 8 KSP Residual norm 1.832451815744e+00 - 9 KSP Residual norm 1.674179696341e+00 - 10 KSP Residual norm 1.589376411329e+00 - 11 KSP Residual norm 1.549055878503e+00 - 12 KSP Residual norm 1.517041587794e+00 - 13 KSP Residual norm 1.473466938498e+00 - 14 KSP Residual norm 1.382770759212e+00 - 15 KSP Residual norm 1.080408049371e+00 - 16 KSP Residual norm 4.309526296050e-01 - 17 KSP Residual norm 1.115269396077e-01 - 18 KSP Residual norm 4.334487475743e-13 - + 0 KSP Residual norm 5.656854249492e+00 + 1 KSP Residual norm 4.732163974221e+00 + 2 KSP Residual norm 4.084280618934e+00 + 3 KSP Residual norm 3.390335900434e+00 + 4 KSP Residual norm 2.980304269384e+00 + 5 KSP Residual norm 2.583427730146e+00 + 6 KSP Residual norm 2.320399960793e+00 + 7 KSP Residual norm 2.059145598820e+00 + 8 KSP Residual norm 1.832451815744e+00 + 9 KSP Residual norm 1.674179696341e+00 + 10 KSP Residual norm 1.589376411329e+00 + 11 KSP Residual norm 1.549055878503e+00 + 12 KSP Residual norm 1.517041587794e+00 + 13 KSP Residual norm 1.473466938498e+00 + 14 KSP Residual norm 1.382770759212e+00 + 15 KSP Residual norm 1.080408049371e+00 + 16 KSP Residual norm 4.309526296050e-01 + 17 KSP Residual norm 1.115269396077e-01 + 18 KSP Residual norm 4.334487475743e-13 diff --git a/examples/monitor-newapi/README.md b/examples/monitor-newapi/README.md new file mode 100644 index 0000000000..a56d11363c --- /dev/null +++ b/examples/monitor-newapi/README.md @@ -0,0 +1,4 @@ +Monitors inside a `PhysicsModel` +================================ + +Demonstrates how to override the virtual monitor functions in a `PhysicsModel` diff --git a/examples/monitor/README.md b/examples/monitor/README.md new file mode 100644 index 0000000000..b92a6755ae --- /dev/null +++ b/examples/monitor/README.md @@ -0,0 +1,4 @@ +Monitor example +=============== + +Demonstrates how to add monitors to a `Solver` diff --git a/examples/preconditioning/wave/README.md b/examples/preconditioning/wave/README.md new file mode 100644 index 0000000000..4fc18ea6d1 --- /dev/null +++ b/examples/preconditioning/wave/README.md @@ -0,0 +1,4 @@ +Implicit Preconditioning +======================== + +A simple demonstration of implicit preconditioning using the PETSc time solver diff --git a/examples/reconnect-2field/README.md b/examples/reconnect-2field/README.md new file mode 100644 index 0000000000..e631079e91 --- /dev/null +++ b/examples/reconnect-2field/README.md @@ -0,0 +1,4 @@ +Magnetic Reconnection +===================== + +2 field (Apar, vorticity) model for benchmarking simple slab reconnection model diff --git a/examples/tokamak-2fluid/README b/examples/tokamak-2fluid/README.md similarity index 69% rename from examples/tokamak-2fluid/README rename to examples/tokamak-2fluid/README.md index 0af22dac3f..cb281be529 100644 --- a/examples/tokamak-2fluid/README +++ b/examples/tokamak-2fluid/README.md @@ -1,20 +1,15 @@ - Tokamak edge turbulence case - +Tokamak edge turbulence +======================= Equilibrium from DIII-D tokamak, discharge 129131 - - Running the case ---------------- To set up the case, run the following in this directory -make + make Then run the 2fluid executable on >= 16 processors -e.g. - -mpirun -np 16 ./2fluid - + mpirun -np 16 ./2fluid diff --git a/examples/uedge-benchmark/README.md b/examples/uedge-benchmark/README.md new file mode 100644 index 0000000000..6afb7b2f27 --- /dev/null +++ b/examples/uedge-benchmark/README.md @@ -0,0 +1,10 @@ +UEDGE benchmark case +==================== + +Solves equations for +* density `Ni` +* parallel ion velocity `Vi` +* electron and ion temperatures `Te`, `Ti` + +Intended to be run for `nz = 1` (i.e. `X` and `Y` only) for comparison with +[`UEDGE`](https://github.com/LLNL/UEDGE) diff --git a/examples/wave-slab/README.md b/examples/wave-slab/README.md new file mode 100644 index 0000000000..fb68aa8b0e --- /dev/null +++ b/examples/wave-slab/README.md @@ -0,0 +1,8 @@ +Simple wave test in a sheared slab domain +========================================= + +Uses the same field-aligned Clebsch coordinate system as most BOUT++ tokamak +simulations. See the coordinates manual for details. + +Note: Here the only components of the coordinate system which are tested are +`g_22` (for `Grad_par`), and the twist shift angle. diff --git a/examples/zoidberg/README.md b/examples/zoidberg/README.md new file mode 100644 index 0000000000..b500c09b01 --- /dev/null +++ b/examples/zoidberg/README.md @@ -0,0 +1,16 @@ +Example Zoidberg grid generation +================================ + +Here are several examples of generating grids using Zoidberg for simulations +using the FCI parallel transform: + +- `example-straight-stellarator.py`: A "straight stellarator", a rotating + ellipse stellarator without curvature +- `straight-stellarator-curvilinear.py`: A straight stellarator but based on + curvilinear grids +- `tokamak.py`: A tokamak grid generated from an EFIT "geqdsk" file + +The remaining files demonstrate some of the plotting capabilities of Zoidberg: + +- `field_line.py`: 3D plots of magnetic field lines +- `poincare.py`: A Poincaré plot of a straight stellarator From fc944fda6305c6cc4f5db35d3fbb682d4598b453 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 27 Jul 2020 13:36:30 +0100 Subject: [PATCH 256/428] CMake: Add bout_add_example function Copies input files to build directory when building examples as part of library --- CMakeLists.txt | 15 +--- bout++Config.cmake.in | 2 + cmake/BOUT++functions.cmake | 84 +++++++++++++++++++ .../2Dturbulence_multigrid/CMakeLists.txt | 3 +- examples/6field-simple/CMakeLists.txt | 5 +- .../IMEX/advection-diffusion/CMakeLists.txt | 3 +- .../IMEX/advection-reaction/CMakeLists.txt | 5 +- examples/IMEX/diffusion-nl/CMakeLists.txt | 3 +- .../IMEX/drift-wave-constraint/CMakeLists.txt | 3 +- examples/IMEX/drift-wave/CMakeLists.txt | 3 +- examples/advdiff/CMakeLists.txt | 5 +- examples/advdiff2/CMakeLists.txt | 14 ++-- examples/backtrace/CMakeLists.txt | 3 +- examples/blob2d-laplacexz/CMakeLists.txt | 3 +- examples/blob2d/CMakeLists.txt | 5 +- .../advection/CMakeLists.txt | 8 +- examples/conducting-wall-mode/CMakeLists.txt | 3 +- examples/conduction-snb/CMakeLists.txt | 6 +- examples/conduction-snb/sinusoid.py | 4 +- examples/conduction-snb/step.py | 4 +- examples/conduction/CMakeLists.txt | 6 +- .../constraints/alfven-wave/CMakeLists.txt | 7 +- .../constraints/laplace-dae/CMakeLists.txt | 5 +- examples/dalf3/CMakeLists.txt | 6 +- examples/eigen-box/CMakeLists.txt | 5 +- examples/elm-pb/CMakeLists.txt | 5 +- examples/em-drift/CMakeLists.txt | 5 +- examples/fci-wave-logn/CMakeLists.txt | 5 +- examples/fci-wave/CMakeLists.txt | 6 +- .../finite-volume/diffusion/CMakeLists.txt | 5 +- examples/finite-volume/fluid/CMakeLists.txt | 6 +- examples/finite-volume/test/CMakeLists.txt | 3 +- examples/gas-compress/CMakeLists.txt | 6 +- examples/gravity_reduced/CMakeLists.txt | 5 +- examples/gyro-gem/CMakeLists.txt | 5 +- examples/hasegawa-wakatani/CMakeLists.txt | 3 +- examples/invertable_operator/CMakeLists.txt | 4 +- examples/jorek-compare/CMakeLists.txt | 5 +- examples/lapd-drift/CMakeLists.txt | 11 ++- examples/laplacexy/alfven-wave/CMakeLists.txt | 6 +- .../laplacexy/laplace_perp/CMakeLists.txt | 6 +- examples/laplacexy/simple/CMakeLists.txt | 3 +- examples/monitor-newapi/CMakeLists.txt | 3 +- examples/monitor/CMakeLists.txt | 3 +- examples/orszag-tang/CMakeLists.txt | 5 +- examples/preconditioning/wave/CMakeLists.txt | 3 +- examples/reconnect-2field/CMakeLists.txt | 5 +- examples/shear-alfven-wave/CMakeLists.txt | 5 +- examples/staggered_grid/CMakeLists.txt | 6 +- examples/staggered_grid/runandplot | 6 +- examples/subsampling/CMakeLists.txt | 6 +- examples/tokamak-2fluid/CMakeLists.txt | 10 ++- examples/uedge-benchmark/CMakeLists.txt | 5 +- examples/wave-slab/CMakeLists.txt | 3 +- 54 files changed, 240 insertions(+), 119 deletions(-) create mode 100644 cmake/BOUT++functions.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 74724ca459..a4d60f08af 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,6 +25,7 @@ find_package(MPI REQUIRED) option(INSTALL_GTEST "Enable installation of googletest. (Projects embedding googletest may want to turn this OFF.)" OFF) set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) +include(BOUT++functions) option(GIT_SUBMODULE "Check submodules during build" ON) # Adapted from https://cliutils.gitlab.io/modern-cmake/chapters/projects/submodule.html @@ -650,14 +651,6 @@ target_compile_definitions(bout++ ################################################## # Tests -# Copy FILENAME from source directory to build directory -macro(bout_test_copy_file FILENAME) - configure_file( - ${CMAKE_CURRENT_SOURCE_DIR}/${FILENAME} - ${CMAKE_CURRENT_BINARY_DIR}/${FILENAME} - COPYONLY) -endmacro() - # Add a new integrated test. By default, the executable is named like # the first source, stripped of its file extension. # @@ -718,20 +711,20 @@ function(bout_add_integrated_test TESTNAME) # Set the actual test command if (BOUT_TEST_OPTIONS_USE_RUNTEST) add_test(NAME ${TESTNAME} COMMAND ./runtest) - bout_test_copy_file(runtest) + bout_copy_file(runtest) else() add_test(NAME ${TESTNAME} COMMAND ${TESTNAME}) endif() # Copy the input file if needed if (BOUT_TEST_OPTIONS_USE_DATA_BOUT_INP) - bout_test_copy_file(data/BOUT.inp) + bout_copy_file(data/BOUT.inp) endif() # Copy any other needed files if (BOUT_TEST_OPTIONS_EXTRA_FILES) foreach (FILE ${BOUT_TEST_OPTIONS_EXTRA_FILES}) - bout_test_copy_file("${FILE}") + bout_copy_file("${FILE}") endforeach() endif() diff --git a/bout++Config.cmake.in b/bout++Config.cmake.in index a213781ec0..cf12d9089e 100644 --- a/bout++Config.cmake.in +++ b/bout++Config.cmake.in @@ -35,6 +35,8 @@ if(EXISTS "@PROJECT_SOURCE_DIR@/cmake") list(APPEND CMAKE_MODULE_PATH "@PROJECT_SOURCE_DIR@/cmake") endif() +include(BOUT++functions) + if(EXISTS "@PETSC_DIR@") set(PETSC_DIR "@PETSC_DIR@") endif() diff --git a/cmake/BOUT++functions.cmake b/cmake/BOUT++functions.cmake new file mode 100644 index 0000000000..f283c0db7c --- /dev/null +++ b/cmake/BOUT++functions.cmake @@ -0,0 +1,84 @@ +# Helper functions for building BOUT++ models + +# Copy FILENAME from source directory to build directory +macro(bout_copy_file FILENAME) + configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/${FILENAME} + ${CMAKE_CURRENT_BINARY_DIR}/${FILENAME} + COPYONLY) +endmacro() + + +# Build a BOUT++ physics model +# +# This is basically just a simple wrapper around 'add_executable' and +# 'target_link_libraries'. +# +# Arguments: +# - MODEL: Name of the executable +# - SOURCES: List of source files to compile +function(bout_add_model MODEL) + cmake_parse_arguments(BOUT_MODEL_OPTIONS "" "" "SOURCES" ${ARGN}) + + if (NOT BOUT_MODEL_OPTIONS_SOURCES) + message(FATAL_ERROR "Required argument SOURCES missing from 'bout_add_model'") + endif() + + if ("SOURCES" IN_LIST BOUT_MODEL_OPTIONS_KEYWORDS_MISSING_VALUES) + message(FATAL_ERROR "SOURCES missing values from 'bout_add_model'") + endif() + + add_executable(${MODEL} ${BOUT_MODEL_OPTIONS_SOURCES}) + target_link_libraries(${MODEL} bout++::bout++) + target_include_directories(${MODEL} PRIVATE $) +endfunction() + + +# Build a BOUT++ example +# +# If called from a standalone project, just builds the example as a +# normal model. If called when building the BOUT++ library itself, +# also copy input files and optionally other files, like grid files, +# to the library build directory. +# +# Arguments: +# - EXAMPENAME: Name of the executable +# - SOURCES: List of source files to compile +# - DATA_DIRS: List of data directories to copy (default: 'data') +# - EXTRA_FILES: List of other files to copy +function(bout_add_example EXAMPLENAME) + set(multiValueArgs SOURCES DATA_DIRS EXTRA_FILES) + cmake_parse_arguments(BOUT_EXAMPLE_OPTIONS "" "" "${multiValueArgs}" ${ARGN}) + + bout_add_model(${EXAMPLENAME} SOURCES ${BOUT_EXAMPLE_OPTIONS_SOURCES}) + + # If this is a standalone project, we can stop here. Otherwise, we + # need to copy the various input files to the build directory + get_directory_property(HAS_PARENT PARENT_DIRECTORY) + if (NOT HAS_PARENT) + return() + endif() + + # Copy the documentation if it exists + if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/README.md) + bout_copy_file(README.md) + endif() + + # Copy the input file + if (NOT BOUT_EXAMPLE_OPTIONS_DATA_DIRS) + bout_copy_file(data/BOUT.inp) + else() + foreach (DATA_DIR IN LISTS BOUT_EXAMPLE_OPTIONS_DATA_DIRS) + bout_copy_file(${DATA_DIR}/BOUT.inp) + endforeach() + endif() + + # Copy any other needed files + if (BOUT_EXAMPLE_OPTIONS_EXTRA_FILES) + foreach (FILE ${BOUT_EXAMPLE_OPTIONS_EXTRA_FILES}) + bout_copy_file("${FILE}") + endforeach() + endif() + + set_target_properties(${EXAMPLENAME} PROPERTIES FOLDER examples) +endfunction() diff --git a/examples/2Dturbulence_multigrid/CMakeLists.txt b/examples/2Dturbulence_multigrid/CMakeLists.txt index 9981cc4b7c..369d2bf5bc 100644 --- a/examples/2Dturbulence_multigrid/CMakeLists.txt +++ b/examples/2Dturbulence_multigrid/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(esel esel.cxx) -target_link_libraries(esel PRIVATE bout++::bout++) +bout_add_example(esel SOURCES esel.cxx) diff --git a/examples/6field-simple/CMakeLists.txt b/examples/6field-simple/CMakeLists.txt index 45b30a3fc7..6a51327cda 100644 --- a/examples/6field-simple/CMakeLists.txt +++ b/examples/6field-simple/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(elm_6f elm_6f.cxx) -target_link_libraries(elm_6f PRIVATE bout++::bout++) +bout_add_example(elm_6f + SOURCES elm_6f.cxx + EXTRA_FILES cbm18_dens8.grid_nx68ny64.nc) diff --git a/examples/IMEX/advection-diffusion/CMakeLists.txt b/examples/IMEX/advection-diffusion/CMakeLists.txt index 7570292fdd..334d48d767 100644 --- a/examples/IMEX/advection-diffusion/CMakeLists.txt +++ b/examples/IMEX/advection-diffusion/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(imex imex.cxx) -target_link_libraries(imex PRIVATE bout++::bout++) +bout_add_example(imex SOURCES imex.cxx) diff --git a/examples/IMEX/advection-reaction/CMakeLists.txt b/examples/IMEX/advection-reaction/CMakeLists.txt index 0c0d2ec57f..03e8686371 100644 --- a/examples/IMEX/advection-reaction/CMakeLists.txt +++ b/examples/IMEX/advection-reaction/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(split_operator split_operator.cxx) -target_link_libraries(split_operator PRIVATE bout++::bout++) +bout_add_example(split_operator + SOURCES split_operator.cxx + EXTRA_FILES simple_xz.nc) diff --git a/examples/IMEX/diffusion-nl/CMakeLists.txt b/examples/IMEX/diffusion-nl/CMakeLists.txt index 757c1d29bc..664d16e042 100644 --- a/examples/IMEX/diffusion-nl/CMakeLists.txt +++ b/examples/IMEX/diffusion-nl/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(diffusion-nl diffusion-nl.cxx) -target_link_libraries(diffusion-nl PRIVATE bout++::bout++) +bout_add_example(diffusion-nl SOURCES diffusion-nl.cxx) diff --git a/examples/IMEX/drift-wave-constraint/CMakeLists.txt b/examples/IMEX/drift-wave-constraint/CMakeLists.txt index 3ac0a9f45d..5680b5367e 100644 --- a/examples/IMEX/drift-wave-constraint/CMakeLists.txt +++ b/examples/IMEX/drift-wave-constraint/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(drift-wave-constraint test-drift.cxx) -target_link_libraries(drift-wave-constraint PRIVATE bout++::bout++) +bout_add_example(drift-wave-constraint SOURCES test-drift.cxx) diff --git a/examples/IMEX/drift-wave/CMakeLists.txt b/examples/IMEX/drift-wave/CMakeLists.txt index dbc469cf12..e3e2a1b8ee 100644 --- a/examples/IMEX/drift-wave/CMakeLists.txt +++ b/examples/IMEX/drift-wave/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(test-drift test-drift.cxx) -target_link_libraries(test-drift PRIVATE bout++::bout++) +bout_add_example(test-drift SOURCES test-drift.cxx) diff --git a/examples/advdiff/CMakeLists.txt b/examples/advdiff/CMakeLists.txt index 04049fff69..531cfb743a 100644 --- a/examples/advdiff/CMakeLists.txt +++ b/examples/advdiff/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(advdiff advdiff.cxx) -target_link_libraries(advdiff PRIVATE bout++::bout++) +bout_add_example(advdiff + SOURCES advdiff.cxx + EXTRA_FILES slab.grd.nc) diff --git a/examples/advdiff2/CMakeLists.txt b/examples/advdiff2/CMakeLists.txt index c429127b1a..92fcbe543b 100644 --- a/examples/advdiff2/CMakeLists.txt +++ b/examples/advdiff2/CMakeLists.txt @@ -6,10 +6,10 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(advdiff2 - globals.cxx - globals.hxx - header.hxx - init.cxx - run.cxx) -target_link_libraries(advdiff2 PRIVATE bout++::bout++) +bout_add_example(advdiff2 + SOURCES globals.cxx + globals.hxx + header.hxx + init.cxx + run.cxx + EXTRA_FILES slab.grd.nc) diff --git a/examples/backtrace/CMakeLists.txt b/examples/backtrace/CMakeLists.txt index c52fde2647..4eac38e51a 100644 --- a/examples/backtrace/CMakeLists.txt +++ b/examples/backtrace/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(backtrace backtrace.cxx) -target_link_libraries(backtrace PRIVATE bout++::bout++) +bout_add_example(backtrace SOURCES backtrace.cxx) diff --git a/examples/blob2d-laplacexz/CMakeLists.txt b/examples/blob2d-laplacexz/CMakeLists.txt index 0e83085d30..17f9ebe97a 100644 --- a/examples/blob2d-laplacexz/CMakeLists.txt +++ b/examples/blob2d-laplacexz/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(blob2d-laplacexz blob2d.cxx) -target_link_libraries(blob2d-laplacexz PRIVATE bout++::bout++) +bout_add_example(blob2d-laplacexz SOURCES blob2d.cxx) diff --git a/examples/blob2d/CMakeLists.txt b/examples/blob2d/CMakeLists.txt index 17b24ec1d6..41803df112 100644 --- a/examples/blob2d/CMakeLists.txt +++ b/examples/blob2d/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(blob2d blob2d.cxx) -target_link_libraries(blob2d PRIVATE bout++::bout++) +bout_add_example(blob2d + SOURCES blob2d.cxx + DATA_DIRS delta_0.25 delta_1 delta_10 two_blobs) diff --git a/examples/boundary-conditions/advection/CMakeLists.txt b/examples/boundary-conditions/advection/CMakeLists.txt index b18808df26..a4ab73a24d 100644 --- a/examples/boundary-conditions/advection/CMakeLists.txt +++ b/examples/boundary-conditions/advection/CMakeLists.txt @@ -6,5 +6,9 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(advection advection.cxx) -target_link_libraries(advection PRIVATE bout++::bout++) +bout_add_example(advection + SOURCES advection.cxx + DATA_DIRS central-dirichlet + central-free + central-free-o3 + upwind) diff --git a/examples/conducting-wall-mode/CMakeLists.txt b/examples/conducting-wall-mode/CMakeLists.txt index f6d7e6b341..10e94e32fc 100644 --- a/examples/conducting-wall-mode/CMakeLists.txt +++ b/examples/conducting-wall-mode/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(conducting-wall-mode cwm.cxx) -target_link_libraries(conducting-wall-mode PRIVATE bout++::bout++) +bout_add_example(conducting-wall-mode SOURCES cwm.cxx) diff --git a/examples/conduction-snb/CMakeLists.txt b/examples/conduction-snb/CMakeLists.txt index 9e86818a04..45072dbe59 100644 --- a/examples/conduction-snb/CMakeLists.txt +++ b/examples/conduction-snb/CMakeLists.txt @@ -6,5 +6,7 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(conduction-snb conduction-snb.cxx) -target_link_libraries(conduction-snb PRIVATE bout++::bout++) +bout_add_example(conduction-snb + SOURCES conduction-snb.cxx + EXTRA_FILES fit_temperature.py sinusoid.py snb.csv spitzer-harm.csv step.py temperature.csv vfp.csv + DATA_DIRS data step) diff --git a/examples/conduction-snb/sinusoid.py b/examples/conduction-snb/sinusoid.py index 209d6de85e..10c81923a3 100644 --- a/examples/conduction-snb/sinusoid.py +++ b/examples/conduction-snb/sinusoid.py @@ -5,10 +5,10 @@ import numpy as np import matplotlib.pyplot as plt -from boututils.run_wrapper import shell_safe, launch_safe +from boututils.run_wrapper import launch_safe, build_and_log from boutdata.collect import collect -shell_safe("make > make.log") +build_and_log("Sinusoidal SNB") # Electron temperature in eV Telist = 10 ** np.linspace(0,3,20) diff --git a/examples/conduction-snb/step.py b/examples/conduction-snb/step.py index 3e49a58038..1f8933e66b 100644 --- a/examples/conduction-snb/step.py +++ b/examples/conduction-snb/step.py @@ -10,12 +10,12 @@ import numpy as np import matplotlib.pyplot as plt -from boututils.run_wrapper import shell_safe, launch_safe +from boututils.run_wrapper import build_and_log, launch_safe from boutdata.collect import collect path = "step" -shell_safe("make > make.log") +build_and_log("Step SNB") # Run the case s, out = launch_safe("./conduction-snb -d " + path, nproc=1, mthread=1, pipe=True) diff --git a/examples/conduction/CMakeLists.txt b/examples/conduction/CMakeLists.txt index ad653172f2..f26b838621 100644 --- a/examples/conduction/CMakeLists.txt +++ b/examples/conduction/CMakeLists.txt @@ -6,5 +6,7 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(conduction conduction.cxx) -target_link_libraries(conduction PRIVATE bout++::bout++) +bout_add_example(conduction + SOURCES conduction.cxx + DATA_DIRS data fromfile + EXTRA_FILES generate.py) diff --git a/examples/constraints/alfven-wave/CMakeLists.txt b/examples/constraints/alfven-wave/CMakeLists.txt index c979a04faa..a95ace4086 100644 --- a/examples/constraints/alfven-wave/CMakeLists.txt +++ b/examples/constraints/alfven-wave/CMakeLists.txt @@ -6,5 +6,8 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(constraints-alfven-wave alfven.cxx) -target_link_libraries(constraints-alfven-wave PRIVATE bout++::bout++) +bout_add_example(constraints-alfven-wave + SOURCES alfven.cxx + DATA_DIRS cbm18 data + EXTRA_FILES cbm18_dens8.grid_nx68ny64.nc d3d_119919.nc) + diff --git a/examples/constraints/laplace-dae/CMakeLists.txt b/examples/constraints/laplace-dae/CMakeLists.txt index 42257f1652..e487bd6a0a 100644 --- a/examples/constraints/laplace-dae/CMakeLists.txt +++ b/examples/constraints/laplace-dae/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(constraints-laplace-dae laplace_dae.cxx) -target_link_libraries(constraints-laplace-dae PRIVATE bout++::bout++) +bout_add_example(constraints-laplace-dae + SOURCES laplace_dae.cxx + EXTRA_FILES simple_xz.nc) diff --git a/examples/dalf3/CMakeLists.txt b/examples/dalf3/CMakeLists.txt index 3d6a19d174..5f5b3d701d 100644 --- a/examples/dalf3/CMakeLists.txt +++ b/examples/dalf3/CMakeLists.txt @@ -6,5 +6,7 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(dalf3 dalf3.cxx) -target_link_libraries(dalf3 PRIVATE bout++::bout++) + +bout_add_example(dalf3 + SOURCES dalf3.cxx + EXTRA_FILES cbm18_8_y064_x516_090309.nc) diff --git a/examples/eigen-box/CMakeLists.txt b/examples/eigen-box/CMakeLists.txt index 9f14bf0ec6..76af4fbaa6 100644 --- a/examples/eigen-box/CMakeLists.txt +++ b/examples/eigen-box/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(eigen-box eigen-box.cxx) -target_link_libraries(eigen-box PRIVATE bout++::bout++) +bout_add_example(eigen-box + SOURCES eigen-box.cxx + EXTRA_FILES eigenvals.py) diff --git a/examples/elm-pb/CMakeLists.txt b/examples/elm-pb/CMakeLists.txt index bd43246a32..fb65a8e7a2 100644 --- a/examples/elm-pb/CMakeLists.txt +++ b/examples/elm-pb/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(elm_pb elm_pb.cxx) -target_link_libraries(elm_pb PRIVATE bout++::bout++) +bout_add_example(elm_pb + SOURCES elm_pb.cxx + EXTRA_FILES cbm18_dens8.grid_nx68ny64.nc) diff --git a/examples/em-drift/CMakeLists.txt b/examples/em-drift/CMakeLists.txt index c9f3e0fb86..5a5fc8fe51 100644 --- a/examples/em-drift/CMakeLists.txt +++ b/examples/em-drift/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(em-drift 2fluid.cxx) -target_link_libraries(em-drift PRIVATE bout++::bout++) +bout_add_example(em-drift + SOURCES 2fluid.cxx + EXTRA_FILES uedge.grd_beta.nc) diff --git a/examples/fci-wave-logn/CMakeLists.txt b/examples/fci-wave-logn/CMakeLists.txt index 5fd558c51d..b49a070427 100644 --- a/examples/fci-wave-logn/CMakeLists.txt +++ b/examples/fci-wave-logn/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(fci-wave-logn fci-wave.cxx) -target_link_libraries(fci-wave-logn PRIVATE bout++::bout++) +bout_add_example(fci-wave-logn + SOURCES fci-wave.cxx + DATA_DIRS boundary div-integrate expanded) diff --git a/examples/fci-wave/CMakeLists.txt b/examples/fci-wave/CMakeLists.txt index d6ac77d0a0..2680b1310e 100644 --- a/examples/fci-wave/CMakeLists.txt +++ b/examples/fci-wave/CMakeLists.txt @@ -6,5 +6,7 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(fci-wave fci-wave.cxx) -target_link_libraries(fci-wave PRIVATE bout++::bout++) +bout_add_example(fci-wave + SOURCES fci-wave.cxx + DATA_DIRS div div-integrate logn + EXTRA_FILES compare-density.py) diff --git a/examples/finite-volume/diffusion/CMakeLists.txt b/examples/finite-volume/diffusion/CMakeLists.txt index 0092fee0c6..0dd7d220f6 100644 --- a/examples/finite-volume/diffusion/CMakeLists.txt +++ b/examples/finite-volume/diffusion/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(finite-volume-diffusion diffusion.cxx) -target_link_libraries(finite-volume-diffusion PRIVATE bout++::bout++) +bout_add_example(finite-volume-diffusion + SOURCES diffusion.cxx + EXTRA_FILES mms.py) diff --git a/examples/finite-volume/fluid/CMakeLists.txt b/examples/finite-volume/fluid/CMakeLists.txt index 5164b830e1..e9028459ec 100644 --- a/examples/finite-volume/fluid/CMakeLists.txt +++ b/examples/finite-volume/fluid/CMakeLists.txt @@ -6,5 +6,7 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(finite-volume-fluid fluid.cxx) -target_link_libraries(finite-volume-fluid PRIVATE bout++::bout++) +bout_add_example(finite-volume-fluid + SOURCES fluid.cxx + DATA_DIRS data mms + EXTRA_FILES mms.py) diff --git a/examples/finite-volume/test/CMakeLists.txt b/examples/finite-volume/test/CMakeLists.txt index f73b809b51..73fe99f960 100644 --- a/examples/finite-volume/test/CMakeLists.txt +++ b/examples/finite-volume/test/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(finite-volume-test finite_volume.cxx) -target_link_libraries(finite-volume-test PRIVATE bout++::bout++) +bout_add_example(finite-volume-test SOURCES finite_volume.cxx) diff --git a/examples/gas-compress/CMakeLists.txt b/examples/gas-compress/CMakeLists.txt index 7246303fd0..1b4416d32b 100644 --- a/examples/gas-compress/CMakeLists.txt +++ b/examples/gas-compress/CMakeLists.txt @@ -6,5 +6,7 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(gas-compress gas_compress.cxx gas_compress.hxx) -target_link_libraries(gas-compress PRIVATE bout++::bout++) +bout_add_example(gas-compress + SOURCES gas_compress.cxx gas_compress.hxx + DATA_DIRS rayleigh-taylor sod-shock + EXTRA_FILES rt.grd.nc sod.grd.nc) diff --git a/examples/gravity_reduced/CMakeLists.txt b/examples/gravity_reduced/CMakeLists.txt index 18f4aa95df..967472d4e6 100644 --- a/examples/gravity_reduced/CMakeLists.txt +++ b/examples/gravity_reduced/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(gravity_reduced gravity_reduced.cxx) -target_link_libraries(gravity_reduced PRIVATE bout++::bout++) +bout_add_example(gravity_reduced + SOURCES gravity_reduced.cxx + EXTRA_FILES slab_grid.nc) diff --git a/examples/gyro-gem/CMakeLists.txt b/examples/gyro-gem/CMakeLists.txt index 6176aa2b52..7189bb06b8 100644 --- a/examples/gyro-gem/CMakeLists.txt +++ b/examples/gyro-gem/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(gyro-gem gem.cxx) -target_link_libraries(gyro-gem PRIVATE bout++::bout++) +bout_add_example(gyro-gem + SOURCES gem.cxx + EXTRA_FILES cyclone_68x32.nc) diff --git a/examples/hasegawa-wakatani/CMakeLists.txt b/examples/hasegawa-wakatani/CMakeLists.txt index 2617b51c28..4d27772ad4 100644 --- a/examples/hasegawa-wakatani/CMakeLists.txt +++ b/examples/hasegawa-wakatani/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(hasegama-wakatani hw.cxx) -target_link_libraries(hasegama-wakatani PRIVATE bout++::bout++) +bout_add_example(hasegama-wakatani SOURCES hw.cxx) diff --git a/examples/invertable_operator/CMakeLists.txt b/examples/invertable_operator/CMakeLists.txt index 23f4ed9056..83f7c74e7a 100644 --- a/examples/invertable_operator/CMakeLists.txt +++ b/examples/invertable_operator/CMakeLists.txt @@ -9,5 +9,5 @@ endif() if(NOT BOUT_HAS_PETSC) message(FATAL_ERROR "This example requires PETSc. Please compile BOUT++ with PETSc") endif() -add_executable(invertable_operator invertable_operator.cxx) -target_link_libraries(invertable_operator PRIVATE bout++::bout++) + +bout_add_example(invertable_operator SOURCES invertable_operator.cxx) diff --git a/examples/jorek-compare/CMakeLists.txt b/examples/jorek-compare/CMakeLists.txt index b0c4766adb..fb9112a7a0 100644 --- a/examples/jorek-compare/CMakeLists.txt +++ b/examples/jorek-compare/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(jorek-compare jorek_compare.cxx) -target_link_libraries(jorek-compare PRIVATE bout++::bout++) +bout_add_example(jorek-compare + SOURCES jorek_compare.cxx + EXTRA_FILES d3d_119919.nc) diff --git a/examples/lapd-drift/CMakeLists.txt b/examples/lapd-drift/CMakeLists.txt index 940be3c681..250752303f 100644 --- a/examples/lapd-drift/CMakeLists.txt +++ b/examples/lapd-drift/CMakeLists.txt @@ -6,5 +6,12 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(lapd-drift lapd_drift.cxx) -target_link_libraries(lapd-drift PRIVATE bout++::bout++) +bout_add_example(lapd-drift + SOURCES lapd_drift.cxx + DATA_DIRS lapd + data + pisces + EXTRA_FILES BOUT.inp + BOUT.inp.nn + BOUT.inp.nn_zem + uedge.grd.nc) diff --git a/examples/laplacexy/alfven-wave/CMakeLists.txt b/examples/laplacexy/alfven-wave/CMakeLists.txt index f2444ac0c7..2423400519 100644 --- a/examples/laplacexy/alfven-wave/CMakeLists.txt +++ b/examples/laplacexy/alfven-wave/CMakeLists.txt @@ -6,5 +6,7 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(laplacexy-alfven-wave alfven.cxx) -target_link_libraries(laplacexy-alfven-wave PRIVATE bout++::bout++) +bout_add_example(laplacexy-alfven-wave + SOURCES alfven.cxx + DATA_DIRS cbm18 data + EXTRA_FILES cbm18_dens8.grid_nx68ny64.nc d3d_119919.nc) diff --git a/examples/laplacexy/laplace_perp/CMakeLists.txt b/examples/laplacexy/laplace_perp/CMakeLists.txt index d29bc8d3c8..388513b044 100644 --- a/examples/laplacexy/laplace_perp/CMakeLists.txt +++ b/examples/laplacexy/laplace_perp/CMakeLists.txt @@ -6,5 +6,7 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(laplacexy-laplace_perp test.cxx) -target_link_libraries(laplacexy-laplace_perp PRIVATE bout++::bout++) +bout_add_example(laplacexy-laplace_perp + SOURCES test.cxx + EXTRA_FILES cbm18_dens8.grid_nx68ny64.nc + DATA_DIRS square torus) diff --git a/examples/laplacexy/simple/CMakeLists.txt b/examples/laplacexy/simple/CMakeLists.txt index 08dd40ef01..ec5e7d2492 100644 --- a/examples/laplacexy/simple/CMakeLists.txt +++ b/examples/laplacexy/simple/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(laplacexy-simple test-laplacexy.cxx) -target_link_libraries(laplacexy-simple PRIVATE bout++::bout++) +bout_add_example(laplacexy-simple SOURCES test-laplacexy.cxx) diff --git a/examples/monitor-newapi/CMakeLists.txt b/examples/monitor-newapi/CMakeLists.txt index 481f1efe87..0ee3ee7f85 100644 --- a/examples/monitor-newapi/CMakeLists.txt +++ b/examples/monitor-newapi/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(monitor-newapi monitor.cxx) -target_link_libraries(monitor-newapi PRIVATE bout++::bout++) +bout_add_example(monitor-newapi SOURCES monitor.cxx) diff --git a/examples/monitor/CMakeLists.txt b/examples/monitor/CMakeLists.txt index a5dc0af31d..7316023256 100644 --- a/examples/monitor/CMakeLists.txt +++ b/examples/monitor/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(monitor monitor.cxx) -target_link_libraries(monitor PRIVATE bout++::bout++) +bout_add_example(monitor SOURCES monitor.cxx) diff --git a/examples/orszag-tang/CMakeLists.txt b/examples/orszag-tang/CMakeLists.txt index 9aa39b8a16..9ac8fd8d1c 100644 --- a/examples/orszag-tang/CMakeLists.txt +++ b/examples/orszag-tang/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(orszag-tang mhd.cxx) -target_link_libraries(orszag-tang PRIVATE bout++::bout++) +bout_add_example(orszag-tang + SOURCES mhd.cxx + EXTRA_FILES data/otv.grd.nc) diff --git a/examples/preconditioning/wave/CMakeLists.txt b/examples/preconditioning/wave/CMakeLists.txt index 93a9530ad7..437f39fe3a 100644 --- a/examples/preconditioning/wave/CMakeLists.txt +++ b/examples/preconditioning/wave/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(preconditioning-wave test_precon.cxx) -target_link_libraries(preconditioning-wave PRIVATE bout++::bout++) +bout_add_example(preconditioning-wave SOURCES test_precon.cxx) diff --git a/examples/reconnect-2field/CMakeLists.txt b/examples/reconnect-2field/CMakeLists.txt index 08145414ab..8c6f40da80 100644 --- a/examples/reconnect-2field/CMakeLists.txt +++ b/examples/reconnect-2field/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(reconnect-2field 2field.cxx) -target_link_libraries(reconnect-2field PRIVATE bout++::bout++) +bout_add_example(reconnect-2field + SOURCES 2field.cxx + EXTRA_FILES slab_68x32.nc) diff --git a/examples/shear-alfven-wave/CMakeLists.txt b/examples/shear-alfven-wave/CMakeLists.txt index a9295db3d5..03491f8b33 100644 --- a/examples/shear-alfven-wave/CMakeLists.txt +++ b/examples/shear-alfven-wave/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(shear-alfven-wave 2fluid.cxx) -target_link_libraries(shear-alfven-wave PRIVATE bout++::bout++) +bout_add_example(shear-alfven-wave + SOURCES 2fluid.cxx + EXTRA_FILES uedge.grd_Te_10.nc) diff --git a/examples/staggered_grid/CMakeLists.txt b/examples/staggered_grid/CMakeLists.txt index 3554d9536d..2f25f2ed06 100644 --- a/examples/staggered_grid/CMakeLists.txt +++ b/examples/staggered_grid/CMakeLists.txt @@ -6,5 +6,7 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(staggered_grid test_staggered.cxx) -target_link_libraries(staggered_grid PRIVATE bout++::bout++) +bout_add_example(staggered_grid + SOURCES test_staggered.cxx + EXTRA_FILES generate.py runandplot test-staggered.nc + DATA_DIRS data test) diff --git a/examples/staggered_grid/runandplot b/examples/staggered_grid/runandplot index ce60720276..1acac945e2 100755 --- a/examples/staggered_grid/runandplot +++ b/examples/staggered_grid/runandplot @@ -16,7 +16,7 @@ from boutdata.collect import collect from numpy.fft import rfftn from numpy import argmax -from boututils.run_wrapper import shell, launch +from boututils.run_wrapper import build_and_log, launch def analyse(path="test"): """ @@ -45,8 +45,8 @@ def analyse(path="test"): #################################################### -print("Making I/O test") -shell("make > make.log") + +build_and_log("Staggered wave") # Run with and without staggered grids print("Running with staggered grids") diff --git a/examples/subsampling/CMakeLists.txt b/examples/subsampling/CMakeLists.txt index 6316837429..86f71d98f5 100644 --- a/examples/subsampling/CMakeLists.txt +++ b/examples/subsampling/CMakeLists.txt @@ -6,5 +6,7 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(subsampling monitor.cxx) -target_link_libraries(subsampling PRIVATE bout++::bout++) +bout_add_example(subsampling + SOURCES monitor.cxx + EXTRA_FILES show.py) + diff --git a/examples/tokamak-2fluid/CMakeLists.txt b/examples/tokamak-2fluid/CMakeLists.txt index 246ca61dab..8998706373 100644 --- a/examples/tokamak-2fluid/CMakeLists.txt +++ b/examples/tokamak-2fluid/CMakeLists.txt @@ -6,5 +6,11 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(tokamak-2fluid 2fluid.cxx) -target_link_libraries(tokamak-2fluid PRIVATE bout++::bout++) +bout_add_example(tokamak-2fluid + SOURCES 2fluid.cxx + DATA_DIRS d3d-119919 + d3d-129131 + data + EXTRA_FILES uedge.grd.nc + uedge.grd_129131_newpproc.nc + data/uedge.grd.nc) diff --git a/examples/uedge-benchmark/CMakeLists.txt b/examples/uedge-benchmark/CMakeLists.txt index 1fc82e580d..7b2648ff2d 100644 --- a/examples/uedge-benchmark/CMakeLists.txt +++ b/examples/uedge-benchmark/CMakeLists.txt @@ -6,5 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(uedge-benchmark ue_bmark.cxx) -target_link_libraries(uedge-benchmark PRIVATE bout++::bout++) +bout_add_example(uedge-benchmark + SOURCES ue_bmark.cxx + EXTRA_FILES uedge.grd_Up_Ni_Tei_2d.nc) diff --git a/examples/wave-slab/CMakeLists.txt b/examples/wave-slab/CMakeLists.txt index f5d5037341..aa94834764 100644 --- a/examples/wave-slab/CMakeLists.txt +++ b/examples/wave-slab/CMakeLists.txt @@ -6,5 +6,4 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -add_executable(wave-slab wave_slab.cxx) -target_link_libraries(wave-slab PRIVATE bout++::bout++) +bout_add_example(wave-slab SOURCES wave_slab.cxx) From 0f7672a256bcc24db43ae503145af588c70eae0a Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 27 Jul 2020 13:40:03 +0100 Subject: [PATCH 257/428] Add gitignores for some examples --- examples/conduction-snb/.gitignore | 1 + examples/laplace-petsc3d/.gitignore | 1 + examples/make-script/.gitignore | 1 + examples/performance/communications/.gitignore | 1 + 4 files changed, 4 insertions(+) create mode 100644 examples/conduction-snb/.gitignore create mode 100644 examples/laplace-petsc3d/.gitignore create mode 100644 examples/make-script/.gitignore create mode 100644 examples/performance/communications/.gitignore diff --git a/examples/conduction-snb/.gitignore b/examples/conduction-snb/.gitignore new file mode 100644 index 0000000000..26ba429e8c --- /dev/null +++ b/examples/conduction-snb/.gitignore @@ -0,0 +1 @@ +conduction-snb diff --git a/examples/laplace-petsc3d/.gitignore b/examples/laplace-petsc3d/.gitignore new file mode 100644 index 0000000000..548062eebf --- /dev/null +++ b/examples/laplace-petsc3d/.gitignore @@ -0,0 +1 @@ +test-laplace3d diff --git a/examples/make-script/.gitignore b/examples/make-script/.gitignore new file mode 100644 index 0000000000..9daeafb986 --- /dev/null +++ b/examples/make-script/.gitignore @@ -0,0 +1 @@ +test diff --git a/examples/performance/communications/.gitignore b/examples/performance/communications/.gitignore new file mode 100644 index 0000000000..21bf1bdcce --- /dev/null +++ b/examples/performance/communications/.gitignore @@ -0,0 +1 @@ +communications From ffe29249cd07cca67abc64da494c48ac9e835023 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 6 Nov 2020 13:46:01 +0000 Subject: [PATCH 258/428] CMake: Add option to download and build netCDF C++ API --- CMakeLists.txt | 21 +++++++++++++++++---- cmake/FindNetCDF.cmake | 31 ++++++++----------------------- 2 files changed, 25 insertions(+), 27 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a4d60f08af..9fbdc960e2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -466,13 +466,26 @@ endif() message(STATUS "PVODE support: ${USE_PVODE}") set(BOUT_HAS_PVODE ${USE_PVODE}) -option(USE_NETCDF "Enable support for NetCDF output" ON) -if (USE_NETCDF) - find_package(NetCDF REQUIRED) +option(BOUT_USE_NETCDF "Enable support for NetCDF output" ON) +option(BOUT_DOWNLOAD_NETCDF_CXX4 "Download and build netCDF-cxx4" OFF) +if (BOUT_USE_NETCDF) + if (BOUT_DOWNLOAD_NETCDF_CXX4) + include(FetchContent) + FetchContent_Declare( + netcdf-cxx4 + GIT_REPOSITORY https://github.com/ZedThree/netcdf-cxx4 + GIT_TAG "ad3e50953190615cb69dcc8a4652f9a88a8499cf" + ) + # Don't build the netcdf tests, they have lots of warnings + set(NCXX_ENABLE_TESTS OFF CACHE BOOL "" FORCE) + FetchContent_MakeAvailable(netcdf-cxx4) + else() + find_package(NetCDF REQUIRED) + endif() + target_link_libraries(bout++ PUBLIC netCDF::netcdf-cxx4) target_compile_definitions(bout++ PUBLIC "NCDF4" PUBLIC "BOUT_HAS_NETCDF") - target_link_libraries(bout++ PUBLIC NetCDF::NetCDF_CXX) endif() message(STATUS "NetCDF support: ${USE_NETCDF}") set(BOUT_HAS_NETCDF ${USE_NETCDF}) diff --git a/cmake/FindNetCDF.cmake b/cmake/FindNetCDF.cmake index 1b1d68e09f..b406a37b54 100644 --- a/cmake/FindNetCDF.cmake +++ b/cmake/FindNetCDF.cmake @@ -16,8 +16,8 @@ # NetCDF_INCLUDE_DIRS - Location of the NetCDF includes # NetCDF_LIBRARIES - Required libraries # -# This module will also export ``NetCDF::NetCDF_C`` and -# ``NetCDF::NetCDF_CXX`` targets. +# This module will also export ``netCDF::netcdf`` and +# ``netCDF::netcdf-cxx4`` targets. # # You can also set the following variables: # @@ -27,23 +27,8 @@ # ``NetCDF_DEBUG`` # Set to TRUE to get extra debugging output - -# Taken from https://github.com/conan-io/conan/issues/2125#issuecomment-351176653 -# This is needed so we can make a clone of the NetCDF C++ target which -# has the name "netcdf-cxx4" by default -function(add_cloned_imported_target dst src) - add_library(${dst} INTERFACE IMPORTED) - foreach(name INTERFACE_LINK_LIBRARIES INTERFACE_INCLUDE_DIRECTORIES INTERFACE_COMPILE_DEFINITIONS INTERFACE_COMPILE_OPTIONS) - get_property(value TARGET ${src} PROPERTY ${name} ) - set_property(TARGET ${dst} PROPERTY ${name} ${value}) - endforeach() -endfunction() - find_package(netCDFCxx QUIET) if (netCDFCxx_FOUND) - if(NOT TARGET NetCDF::NetCDF_CXX) - add_cloned_imported_target(NetCDF::NetCDF_CXX netCDF::netcdf-cxx4) - endif() set(NetCDF_FOUND TRUE) return() endif() @@ -209,15 +194,15 @@ if (NetCDF_FOUND) set(NetCDF_INCLUDE_DIRS "${NetCDF_CXX_INCLUDE_DIR}" "${NetCDF_C_INCLUDE_DIR}") set(NetCDF_LIBRARIES "${NetCDF_CXX_LIBRARY}" "${NetCDF_LIBRARY}") - if (NOT TARGET NetCDF::NetCDF) - add_library(NetCDF::NetCDF_C UNKNOWN IMPORTED) - set_target_properties(NetCDF::NetCDF_C PROPERTIES + if (NOT TARGET netCDF::netcdf) + add_library(netCDF::netcdf UNKNOWN IMPORTED) + set_target_properties(netCDF::netcdf PROPERTIES IMPORTED_LOCATION "${NetCDF_C_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${NetCDF_C_INCLUDE_DIR}" ) - add_library(NetCDF::NetCDF_CXX UNKNOWN IMPORTED) - set_target_properties(NetCDF::NetCDF_CXX PROPERTIES - IMPORTED_LINK_INTERFACE_LIBRARIES NetCDF::NetCDF_C + add_library(netCDF::netcdf-cxx4 UNKNOWN IMPORTED) + set_target_properties(netCDF::netcdf-cxx4 PROPERTIES + IMPORTED_LINK_INTERFACE_LIBRARIES netCDF::netcdf IMPORTED_LOCATION "${NetCDF_CXX_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${NetCDF_CXX_INCLUDE_DIR}") endif () From fdd6f8ae3d2383d6064cb1f5da7b08a013898a8e Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 6 Nov 2020 17:01:11 +0000 Subject: [PATCH 259/428] CMake: Add better search for netCDF C when downloading C++ API Split FindNetCDF.cmake into two files so we can reuse the C bit if we're not downloading netcdf-cxx4 --- CMakeLists.txt | 4 +- cmake/FindNetCDF.cmake | 209 -------------------------------------- cmake/FindnetCDF.cmake | 143 ++++++++++++++++++++++++++ cmake/FindnetCDFCxx.cmake | 135 ++++++++++++++++++++++++ 4 files changed, 281 insertions(+), 210 deletions(-) delete mode 100644 cmake/FindNetCDF.cmake create mode 100644 cmake/FindnetCDF.cmake create mode 100644 cmake/FindnetCDFCxx.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 9fbdc960e2..429488c276 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -478,9 +478,11 @@ if (BOUT_USE_NETCDF) ) # Don't build the netcdf tests, they have lots of warnings set(NCXX_ENABLE_TESTS OFF CACHE BOOL "" FORCE) + # Use our own FindnetCDF module which uses nc-config + find_package(netCDF REQUIRED) FetchContent_MakeAvailable(netcdf-cxx4) else() - find_package(NetCDF REQUIRED) + find_package(netCDFCxx REQUIRED) endif() target_link_libraries(bout++ PUBLIC netCDF::netcdf-cxx4) target_compile_definitions(bout++ diff --git a/cmake/FindNetCDF.cmake b/cmake/FindNetCDF.cmake deleted file mode 100644 index b406a37b54..0000000000 --- a/cmake/FindNetCDF.cmake +++ /dev/null @@ -1,209 +0,0 @@ -# FindNetCDF -# ---------- -# -# Find the NetCDF IO library -# -# This module uses the ``nc-config`` and ``ncxx4-config`` helper scripts -# as hints for the location of the NetCDF libraries. They should be in -# your PATH. -# -# This module will define the following variables: -# -# :: -# -# NetCDF_FOUND - true if NetCDF was found -# NetCDF_VERSION - NetCDF version in format Major.Minor.Release -# NetCDF_INCLUDE_DIRS - Location of the NetCDF includes -# NetCDF_LIBRARIES - Required libraries -# -# This module will also export ``netCDF::netcdf`` and -# ``netCDF::netcdf-cxx4`` targets. -# -# You can also set the following variables: -# -# ``NetCDF_ROOT`` -# Specify the path to the NetCDF installation to use -# -# ``NetCDF_DEBUG`` -# Set to TRUE to get extra debugging output - -find_package(netCDFCxx QUIET) -if (netCDFCxx_FOUND) - set(NetCDF_FOUND TRUE) - return() -endif() - -# A function to call nx-config with an argument, and append the resulting path to a list -# Taken from https://github.com/LiamBindle/geos-chem/blob/feature/CMake/CMakeScripts/FindNetCDF.cmake -function(inspect_netcdf_config VAR NX_CONFIG ARG) - execute_process( - COMMAND ${NX_CONFIG} ${ARG} - OUTPUT_VARIABLE NX_CONFIG_OUTPUT - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - if(EXISTS "${NX_CONFIG_OUTPUT}") - set(${VAR} ${NX_CONFIG_OUTPUT} PARENT_SCOPE) - endif() -endfunction() - -find_program(NC_CONFIG "nc-config" - PATHS "${NetCDF_ROOT}" - PATH_SUFFIXES bin - DOC "Path to NetCDF C config helper" - NO_DEFAULT_PATH - ) - -find_program(NC_CONFIG "nc-config" - DOC "Path to NetCDF C config helper" - ) - -get_filename_component(NC_CONFIG_TMP "${NC_CONFIG}" DIRECTORY) -get_filename_component(NC_CONFIG_LOCATION "${NC_CONFIG_TMP}" DIRECTORY) -if (NetCDF_DEBUG) - message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " - " NC_CONFIG_LOCATION = ${NC_CONFIG_LOCATION}" - " NetCDF_ROOT = ${NetCDF_ROOT}") -endif() - -find_program(NCXX4_CONFIG "ncxx4-config" - PATHS "${NetCDF_ROOT}" - PATH_SUFFIXES bin - DOC "Path to NetCDF C++ config helper" - NO_DEFAULT_PATH - ) - -find_program(NCXX4_CONFIG "ncxx4-config" - DOC "Path to NetCDF C++ config helper" - ) - -get_filename_component(NCXX4_CONFIG_TMP "${NCXX4_CONFIG}" DIRECTORY) -get_filename_component(NCXX4_CONFIG_LOCATION "${NCXX4_CONFIG_TMP}" DIRECTORY) -if (NetCDF_DEBUG) - message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " - " NCXX4_CONFIG_LOCATION = ${NCXX4_CONFIG_LOCATION}") -endif() - -inspect_netcdf_config(NC_HINTS_INCLUDE_DIR "${NC_CONFIG}" "--includedir") -inspect_netcdf_config(NC_HINTS_PREFIX "${NC_CONFIG}" "--prefix") - -find_path(NetCDF_C_INCLUDE_DIR - NAMES netcdf.h - DOC "NetCDF C include directories" - HINTS - "${NC_HINTS_INCLUDE_DIR}" - "${NC_HINTS_PREFIX}" - "${NC_CONFIG_LOCATION}" - PATH_SUFFIXES - "include" - ) -if (NetCDF_DEBUG) - message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " - " NetCDF_C_INCLUDE_DIR = ${NetCDF_C_INCLUDE_DIR}" - " NC_HINTS_INCLUDE_DIR = ${NC_HINTS_INCLUDE_DIR}" - " NC_HINTS_PREFIX = ${NC_HINTS_PREFIX}" - ) -endif() -mark_as_advanced(NetCDF_C_INCLUDE_DIR) - -find_library(NetCDF_C_LIBRARY - NAMES netcdf - DOC "NetCDF C library" - HINTS - "${NC_HINTS_INCLUDE_DIR}" - "${NC_HINTS_PREFIX}" - "${NC_CONFIG_LOCATION}" - PATH_SUFFIXES - "lib" "lib64" - ) -if (NetCDF_DEBUG) - message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " - " NetCDF_C_LIBRARY = ${NetCDF_C_LIBRARY}" - " NC_HINTS_INCLUDE_DIR = ${NC_HINTS_INCLUDE_DIR}" - " NC_HINTS_PREFIX = ${NC_HINTS_PREFIX}" - ) -endif() -mark_as_advanced(NetCDF_C_LIBRARY) - -inspect_netcdf_config(NCXX4_HINTS_INCLUDE_DIR "${NCXX4_CONFIG}" "--includedir") -inspect_netcdf_config(NCXX4_HINTS_PREFIX "${NCXX4_CONFIG}" "--prefix") - -find_path(NetCDF_CXX_INCLUDE_DIR - NAMES netcdf - DOC "NetCDF C++ include directories" - HINTS - "${NetCDF_C_INCLUDE_DIR}" - "${NCXX4_HINTS_INCLUDE_DIR}" - "${NCXX4_HINTS_PREFIX}" - "${NCXX4_CONFIG_LOCATION}" - PATH_SUFFIXES - "include" - ) -if (NetCDF_DEBUG) - message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " - " NetCDF_CXX_INCLUDE_DIR = ${NetCDF_CXX_INCLUDE_DIR}" - " NCXX4_HINTS_INCLUDE_DIR = ${NCXX4_HINTS_INCLUDE_DIR}" - " NCXX4_HINTS_PREFIX = ${NCXX4_HINTS_PREFIX}" - ) -endif() -mark_as_advanced(NetCDF_CXX_INCLUDE_DIR) - -find_library(NetCDF_CXX_LIBRARY - NAMES netcdf_c++4 netcdf-cxx4 - DOC "NetCDF C++ library" - HINTS - "${NCXX4_HINTS_INCLUDE_DIR}" - "${NCXX4_HINTS_PREFIX}" - "${NCXX4_CONFIG_LOCATION}" - PATH_SUFFIXES - "lib" "lib64" - ) -if (NetCDF_DEBUG) - message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " - " NetCDF_CXX_LIBRARY = ${NetCDF_CXX_LIBRARY}" - " NCXX4_HINTS_INCLUDE_DIR = ${NCXX4_HINTS_INCLUDE_DIR}" - " NCXX4_HINTS_PREFIX = ${NCXX4_HINTS_PREFIX}" - ) -endif() -mark_as_advanced(NetCDF_CXX_LIBRARY) - -if (NetCDF_C_INCLUDE_DIR) - file(STRINGS "${NetCDF_C_INCLUDE_DIR}/netcdf_meta.h" _netcdf_version_lines - REGEX "#define[ \t]+NC_VERSION_(MAJOR|MINOR|PATCH|NOTE)") - string(REGEX REPLACE ".*NC_VERSION_MAJOR *\([0-9]*\).*" "\\1" _netcdf_version_major "${_netcdf_version_lines}") - string(REGEX REPLACE ".*NC_VERSION_MINOR *\([0-9]*\).*" "\\1" _netcdf_version_minor "${_netcdf_version_lines}") - string(REGEX REPLACE ".*NC_VERSION_PATCH *\([0-9]*\).*" "\\1" _netcdf_version_patch "${_netcdf_version_lines}") - string(REGEX REPLACE ".*NC_VERSION_NOTE *\"\([^\"]*\)\".*" "\\1" _netcdf_version_note "${_netcdf_version_lines}") - if (NOT _netcdf_version_note STREQUAL "") - # Make development version compare higher than any patch level - set(_netcdf_version_note ".99") - endif() - set(NetCDF_VERSION "${_netcdf_version_major}.${_netcdf_version_minor}.${_netcdf_version_patch}${_netcdf_version_note}") - unset(_netcdf_version_major) - unset(_netcdf_version_minor) - unset(_netcdf_version_patch) - unset(_netcdf_version_note) - unset(_netcdf_version_lines) -endif () - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(NetCDF - REQUIRED_VARS NetCDF_C_LIBRARY NetCDF_C_INCLUDE_DIR NetCDF_CXX_LIBRARY NetCDF_CXX_INCLUDE_DIR - VERSION_VAR NetCDF_VERSION) - -if (NetCDF_FOUND) - set(NetCDF_INCLUDE_DIRS "${NetCDF_CXX_INCLUDE_DIR}" "${NetCDF_C_INCLUDE_DIR}") - set(NetCDF_LIBRARIES "${NetCDF_CXX_LIBRARY}" "${NetCDF_LIBRARY}") - - if (NOT TARGET netCDF::netcdf) - add_library(netCDF::netcdf UNKNOWN IMPORTED) - set_target_properties(netCDF::netcdf PROPERTIES - IMPORTED_LOCATION "${NetCDF_C_LIBRARY}" - INTERFACE_INCLUDE_DIRECTORIES "${NetCDF_C_INCLUDE_DIR}" - ) - add_library(netCDF::netcdf-cxx4 UNKNOWN IMPORTED) - set_target_properties(netCDF::netcdf-cxx4 PROPERTIES - IMPORTED_LINK_INTERFACE_LIBRARIES netCDF::netcdf - IMPORTED_LOCATION "${NetCDF_CXX_LIBRARY}" - INTERFACE_INCLUDE_DIRECTORIES "${NetCDF_CXX_INCLUDE_DIR}") - endif () -endif () diff --git a/cmake/FindnetCDF.cmake b/cmake/FindnetCDF.cmake new file mode 100644 index 0000000000..60098b041e --- /dev/null +++ b/cmake/FindnetCDF.cmake @@ -0,0 +1,143 @@ +# FindnetCDF +# ---------- +# +# Find the netCDF IO library +# +# This module uses the ``nc-config`` helper script as a hint for the +# location of the netCDF libraries. It should be in your PATH. +# +# This module will define the following variables: +# +# :: +# +# netCDF_FOUND - true if netCDF was found +# netCDF_VERSION - netCDF version in format Major.Minor.Release +# netCDF_INCLUDE_DIRS - Location of the netCDF includes +# netCDF_LIBRARIES - Required libraries +# +# This module will also export the ``netCDF::netcdf`` target. +# +# You can also set the following variables: +# +# ``netCDF_ROOT`` +# Specify the path to the netCDF installation to use +# +# ``netCDF_DEBUG`` +# Set to TRUE to get extra debugging output + +find_package(netCDF QUIET CONFIG) +if (netCDF_FOUND) + set(netCDF_FOUND TRUE) + return() +endif() + +# A function to call nx-config with an argument, and append the resulting path to a list +# Taken from https://github.com/LiamBindle/geos-chem/blob/feature/CMake/CMakeScripts/FindNetCDF.cmake +function(inspect_netcdf_config VAR NX_CONFIG ARG) + execute_process( + COMMAND ${NX_CONFIG} ${ARG} + OUTPUT_VARIABLE NX_CONFIG_OUTPUT + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if(EXISTS "${NX_CONFIG_OUTPUT}") + set(${VAR} ${NX_CONFIG_OUTPUT} PARENT_SCOPE) + endif() +endfunction() + +find_program(NC_CONFIG "nc-config" + PATHS "${netCDF_ROOT}" + PATH_SUFFIXES bin + DOC "Path to netCDF C config helper" + NO_DEFAULT_PATH + ) + +find_program(NC_CONFIG "nc-config" + DOC "Path to netCDF C config helper" + ) + +get_filename_component(NC_CONFIG_TMP "${NC_CONFIG}" DIRECTORY) +get_filename_component(NC_CONFIG_LOCATION "${NC_CONFIG_TMP}" DIRECTORY) +if (netCDF_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " NC_CONFIG_LOCATION = ${NC_CONFIG_LOCATION}" + " netCDF_ROOT = ${netCDF_ROOT}") +endif() + +inspect_netcdf_config(NC_HINTS_INCLUDE_DIR "${NC_CONFIG}" "--includedir") +inspect_netcdf_config(NC_HINTS_PREFIX "${NC_CONFIG}" "--prefix") + +find_path(netCDF_C_INCLUDE_DIR + NAMES netcdf.h + DOC "netCDF C include directories" + HINTS + "${NC_HINTS_INCLUDE_DIR}" + "${NC_HINTS_PREFIX}" + "${NC_CONFIG_LOCATION}" + PATH_SUFFIXES + "include" + ) +if (netCDF_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " netCDF_C_INCLUDE_DIR = ${netCDF_C_INCLUDE_DIR}" + " NC_HINTS_INCLUDE_DIR = ${NC_HINTS_INCLUDE_DIR}" + " NC_HINTS_PREFIX = ${NC_HINTS_PREFIX}" + ) +endif() +mark_as_advanced(netCDF_C_INCLUDE_DIR) + +find_library(netCDF_C_LIBRARY + NAMES netcdf + DOC "netCDF C library" + HINTS + "${NC_HINTS_INCLUDE_DIR}" + "${NC_HINTS_PREFIX}" + "${NC_CONFIG_LOCATION}" + PATH_SUFFIXES + "lib" "lib64" + ) +if (netCDF_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " netCDF_C_LIBRARY = ${netCDF_C_LIBRARY}" + " NC_HINTS_INCLUDE_DIR = ${NC_HINTS_INCLUDE_DIR}" + " NC_HINTS_PREFIX = ${NC_HINTS_PREFIX}" + ) +endif() +mark_as_advanced(netCDF_C_LIBRARY) + +if (netCDF_C_INCLUDE_DIR) + file(STRINGS "${netCDF_C_INCLUDE_DIR}/netcdf_meta.h" _netcdf_version_lines + REGEX "#define[ \t]+NC_VERSION_(MAJOR|MINOR|PATCH|NOTE)") + string(REGEX REPLACE ".*NC_VERSION_MAJOR *\([0-9]*\).*" "\\1" _netcdf_version_major "${_netcdf_version_lines}") + string(REGEX REPLACE ".*NC_VERSION_MINOR *\([0-9]*\).*" "\\1" _netcdf_version_minor "${_netcdf_version_lines}") + string(REGEX REPLACE ".*NC_VERSION_PATCH *\([0-9]*\).*" "\\1" _netcdf_version_patch "${_netcdf_version_lines}") + string(REGEX REPLACE ".*NC_VERSION_NOTE *\"\([^\"]*\)\".*" "\\1" _netcdf_version_note "${_netcdf_version_lines}") + if (NOT _netcdf_version_note STREQUAL "") + # Make development version compare higher than any patch level + set(_netcdf_version_note ".99") + endif() + set(netCDF_VERSION "${_netcdf_version_major}.${_netcdf_version_minor}.${_netcdf_version_patch}${_netcdf_version_note}") + unset(_netcdf_version_major) + unset(_netcdf_version_minor) + unset(_netcdf_version_patch) + unset(_netcdf_version_note) + unset(_netcdf_version_lines) +endif () + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(netCDF + REQUIRED_VARS netCDF_C_LIBRARY netCDF_C_INCLUDE_DIR + VERSION_VAR netCDF_VERSION) + +if (netCDF_FOUND) + set(netCDF_INCLUDE_DIR "${netCDF_C_INCLUDE_DIR}") + set(netCDF_INCLUDE_DIRS "${netCDF_C_INCLUDE_DIR}") + set(netCDF_LIBRARIES "${netCDF_C_LIBRARY}") + + if (NOT TARGET netCDF::netcdf) + add_library(netCDF::netcdf UNKNOWN IMPORTED) + set_target_properties(netCDF::netcdf PROPERTIES + IMPORTED_LOCATION "${netCDF_C_LIBRARY}" + INTERFACE_INCLUDE_DIRECTORIES "${netCDF_C_INCLUDE_DIR}" + ) + endif () +endif () diff --git a/cmake/FindnetCDFCxx.cmake b/cmake/FindnetCDFCxx.cmake new file mode 100644 index 0000000000..8514ea5a25 --- /dev/null +++ b/cmake/FindnetCDFCxx.cmake @@ -0,0 +1,135 @@ +# FindnetCDFCxx +# ---------- +# +# Find the netCDF C++ API +# +# This module uses the ``ncxx4-config`` helper script as a hint for +# the location of the NetCDF C++ library. It should be in your PATH. +# +# This module will define the following variables: +# +# :: +# +# netCDFCxx_FOUND - true if netCDFCxx was found +# netCDFCxx_VERSION - netCDFCxx version in format Major.Minor.Release +# netCDFCxx_INCLUDE_DIRS - Location of the netCDFCxx includes +# netCDFCxx_LIBRARIES - Required libraries +# +# This module will also export the ``netCDF::netcdf-cxx4`` target. +# +# You can also set the following variables: +# +# ``netCDFCxx_ROOT`` +# Specify the path to the netCDF C++ installation to use +# +# ``netCDFCxx_DEBUG`` +# Set to TRUE to get extra debugging output + +find_package(netCDFCxx QUIET CONFIG) +if (netCDFCxx_FOUND) + set(netCDFCxx_FOUND TRUE) + return() +endif() + +find_package(netCDF REQUIRED) + +# A function to call nx-config with an argument, and append the resulting path to a list +# Taken from https://github.com/LiamBindle/geos-chem/blob/feature/CMake/CMakeScripts/FindNetCDF.cmake +function(inspect_netcdf_config VAR NX_CONFIG ARG) + execute_process( + COMMAND ${NX_CONFIG} ${ARG} + OUTPUT_VARIABLE NX_CONFIG_OUTPUT + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if(EXISTS "${NX_CONFIG_OUTPUT}") + set(${VAR} ${NX_CONFIG_OUTPUT} PARENT_SCOPE) + endif() +endfunction() + +find_program(NCXX4_CONFIG "ncxx4-config" + PATHS "${netCDFCxx_ROOT}" + PATH_SUFFIXES bin + DOC "Path to netCDF C++ config helper" + NO_DEFAULT_PATH + ) + +find_program(NCXX4_CONFIG "ncxx4-config" + DOC "Path to netCDF C++ config helper" + ) + +get_filename_component(NCXX4_CONFIG_TMP "${NCXX4_CONFIG}" DIRECTORY) +get_filename_component(NCXX4_CONFIG_LOCATION "${NCXX4_CONFIG_TMP}" DIRECTORY) +if (netCDFCxx_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " NCXX4_CONFIG_LOCATION = ${NCXX4_CONFIG_LOCATION}") +endif() + +inspect_netcdf_config(NCXX4_HINTS_INCLUDE_DIR "${NCXX4_CONFIG}" "--includedir") +inspect_netcdf_config(NCXX4_HINTS_PREFIX "${NCXX4_CONFIG}" "--prefix") + +find_path(netCDF_CXX_INCLUDE_DIR + NAMES netcdf + DOC "netCDF C++ include directories" + HINTS + "${netCDF_C_INCLUDE_DIR}" + "${NCXX4_HINTS_INCLUDE_DIR}" + "${NCXX4_HINTS_PREFIX}" + "${NCXX4_CONFIG_LOCATION}" + PATH_SUFFIXES + "include" + ) +if (netCDFCxx_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " netCDF_CXX_INCLUDE_DIR = ${netCDF_CXX_INCLUDE_DIR}" + " NCXX4_HINTS_INCLUDE_DIR = ${NCXX4_HINTS_INCLUDE_DIR}" + " NCXX4_HINTS_PREFIX = ${NCXX4_HINTS_PREFIX}" + ) +endif() +mark_as_advanced(netCDF_CXX_INCLUDE_DIR) + +find_library(netCDF_CXX_LIBRARY + NAMES netcdf_c++4 netcdf-cxx4 + DOC "netCDF C++ library" + HINTS + "${NCXX4_HINTS_INCLUDE_DIR}" + "${NCXX4_HINTS_PREFIX}" + "${NCXX4_CONFIG_LOCATION}" + PATH_SUFFIXES + "lib" "lib64" + ) +if (netCDFCxx_DEBUG) + message(STATUS "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ] " + " netCDF_CXX_LIBRARY = ${netCDF_CXX_LIBRARY}" + " NCXX4_HINTS_INCLUDE_DIR = ${NCXX4_HINTS_INCLUDE_DIR}" + " NCXX4_HINTS_PREFIX = ${NCXX4_HINTS_PREFIX}" + ) +endif() +mark_as_advanced(netCDF_CXX_LIBRARY) + +inspect_netcdf_config(_ncxx4_version "${NCXX4_CONFIG}" "--version") +string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\1" _netcdfcxx_version_major "${_ncxx4_version}") +string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\2" _netcdfcxx_version_minor "${_ncxx4_version}") +string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\3" _netcdfcxx_version_patch "${_ncxx4_version}") +set(netCDFCxx_VERSION "${_netcdf_version_major}.${_netcdf_version_minor}.${_netcdf_version_patch}${_netcdf_version_note}") +unset(_ncxx4_version) +unset(_netcdf_version_major) +unset(_netcdf_version_minor) +unset(_netcdf_version_patch) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(netCDF + REQUIRED_VARS netCDF_CXX_LIBRARY netCDF_CXX_INCLUDE_DIR + VERSION_VAR netCDFCxx_VERSION) + +if (netCDF_FOUND) + set(netCDFCxx_INCLUDE_DIRS "${netCDF_CXX_INCLUDE_DIR}") + set(netCDFCxx_LIBRARIES "${netCDF_CXX_LIBRARY}") + + if (NOT TARGET netCDF::netcdf-cxx4) + add_library(netCDF::netcdf-cxx4 UNKNOWN IMPORTED) + set_target_properties(netCDF::netcdf-cxx4 PROPERTIES + IMPORTED_LINK_INTERFACE_LIBRARIES netCDF::netcdf + IMPORTED_LOCATION "${netCDF_CXX_LIBRARY}" + INTERFACE_INCLUDE_DIRECTORIES "${netCDF_CXX_INCLUDE_DIR}") + endif () +endif () From bfec073634c29736223402088fde54096cc7aba3 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 9 Nov 2020 09:30:25 +0000 Subject: [PATCH 260/428] CMake: Fix typo in FindnetCDFCxx module --- cmake/FindnetCDFCxx.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/FindnetCDFCxx.cmake b/cmake/FindnetCDFCxx.cmake index 8514ea5a25..076a5dba45 100644 --- a/cmake/FindnetCDFCxx.cmake +++ b/cmake/FindnetCDFCxx.cmake @@ -117,11 +117,11 @@ unset(_netcdf_version_minor) unset(_netcdf_version_patch) include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(netCDF +find_package_handle_standard_args(netCDFCxx REQUIRED_VARS netCDF_CXX_LIBRARY netCDF_CXX_INCLUDE_DIR VERSION_VAR netCDFCxx_VERSION) -if (netCDF_FOUND) +if (netCDFCxx_FOUND) set(netCDFCxx_INCLUDE_DIRS "${netCDF_CXX_INCLUDE_DIR}") set(netCDFCxx_LIBRARIES "${netCDF_CXX_LIBRARY}") From 2561bc4af651808bce85aeb8733a85a0086c1ce7 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 9 Nov 2020 10:26:06 +0000 Subject: [PATCH 261/428] CMake: Move a couple of functions to a common module - bout_add_library_alias: workaround for add_library(ALIAS) in CMake < 3.18 - bout_inspect_netcdf_config: get details from nx-config --- cmake/BOUT++functions.cmake | 26 ++++++++++++++++++++++++++ cmake/FindnetCDF.cmake | 19 ++++--------------- cmake/FindnetCDFCxx.cmake | 21 +++++---------------- 3 files changed, 35 insertions(+), 31 deletions(-) diff --git a/cmake/BOUT++functions.cmake b/cmake/BOUT++functions.cmake index f283c0db7c..9c36948277 100644 --- a/cmake/BOUT++functions.cmake +++ b/cmake/BOUT++functions.cmake @@ -82,3 +82,29 @@ function(bout_add_example EXAMPLENAME) set_target_properties(${EXAMPLENAME} PROPERTIES FOLDER examples) endfunction() + + +# Add an alias for an imported target +# Workaround for CMAke < 3.18 +# Taken from https://github.com/conan-io/conan/issues/2125#issuecomment-351176653 +function(bout_add_library_alias dst src) + add_library(${dst} INTERFACE IMPORTED) + foreach(name INTERFACE_LINK_LIBRARIES INTERFACE_INCLUDE_DIRECTORIES INTERFACE_COMPILE_DEFINITIONS INTERFACE_COMPILE_OPTIONS) + get_property(value TARGET ${src} PROPERTY ${name} ) + set_property(TARGET ${dst} PROPERTY ${name} ${value}) + endforeach() +endfunction() + + +# Call nx-config with an argument, and append the resulting path to a list +# Taken from https://github.com/LiamBindle/geos-chem/blob/feature/CMake/CMakeScripts/FindNetCDF.cmake +function(bout_inspect_netcdf_config VAR NX_CONFIG ARG) + execute_process( + COMMAND ${NX_CONFIG} ${ARG} + OUTPUT_VARIABLE NX_CONFIG_OUTPUT + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if(EXISTS "${NX_CONFIG_OUTPUT}") + set(${VAR} ${NX_CONFIG_OUTPUT} PARENT_SCOPE) + endif() +endfunction() diff --git a/cmake/FindnetCDF.cmake b/cmake/FindnetCDF.cmake index 60098b041e..47906bf2f1 100644 --- a/cmake/FindnetCDF.cmake +++ b/cmake/FindnetCDF.cmake @@ -25,25 +25,14 @@ # ``netCDF_DEBUG`` # Set to TRUE to get extra debugging output +include(BOUT++functions) + find_package(netCDF QUIET CONFIG) if (netCDF_FOUND) set(netCDF_FOUND TRUE) return() endif() -# A function to call nx-config with an argument, and append the resulting path to a list -# Taken from https://github.com/LiamBindle/geos-chem/blob/feature/CMake/CMakeScripts/FindNetCDF.cmake -function(inspect_netcdf_config VAR NX_CONFIG ARG) - execute_process( - COMMAND ${NX_CONFIG} ${ARG} - OUTPUT_VARIABLE NX_CONFIG_OUTPUT - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - if(EXISTS "${NX_CONFIG_OUTPUT}") - set(${VAR} ${NX_CONFIG_OUTPUT} PARENT_SCOPE) - endif() -endfunction() - find_program(NC_CONFIG "nc-config" PATHS "${netCDF_ROOT}" PATH_SUFFIXES bin @@ -63,8 +52,8 @@ if (netCDF_DEBUG) " netCDF_ROOT = ${netCDF_ROOT}") endif() -inspect_netcdf_config(NC_HINTS_INCLUDE_DIR "${NC_CONFIG}" "--includedir") -inspect_netcdf_config(NC_HINTS_PREFIX "${NC_CONFIG}" "--prefix") +bout_inspect_netcdf_config(NC_HINTS_INCLUDE_DIR "${NC_CONFIG}" "--includedir") +bout_inspect_netcdf_config(NC_HINTS_PREFIX "${NC_CONFIG}" "--prefix") find_path(netCDF_C_INCLUDE_DIR NAMES netcdf.h diff --git a/cmake/FindnetCDFCxx.cmake b/cmake/FindnetCDFCxx.cmake index 076a5dba45..f2779dd70c 100644 --- a/cmake/FindnetCDFCxx.cmake +++ b/cmake/FindnetCDFCxx.cmake @@ -25,6 +25,8 @@ # ``netCDFCxx_DEBUG`` # Set to TRUE to get extra debugging output +include(BOUT++functions) + find_package(netCDFCxx QUIET CONFIG) if (netCDFCxx_FOUND) set(netCDFCxx_FOUND TRUE) @@ -33,19 +35,6 @@ endif() find_package(netCDF REQUIRED) -# A function to call nx-config with an argument, and append the resulting path to a list -# Taken from https://github.com/LiamBindle/geos-chem/blob/feature/CMake/CMakeScripts/FindNetCDF.cmake -function(inspect_netcdf_config VAR NX_CONFIG ARG) - execute_process( - COMMAND ${NX_CONFIG} ${ARG} - OUTPUT_VARIABLE NX_CONFIG_OUTPUT - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - if(EXISTS "${NX_CONFIG_OUTPUT}") - set(${VAR} ${NX_CONFIG_OUTPUT} PARENT_SCOPE) - endif() -endfunction() - find_program(NCXX4_CONFIG "ncxx4-config" PATHS "${netCDFCxx_ROOT}" PATH_SUFFIXES bin @@ -64,8 +53,8 @@ if (netCDFCxx_DEBUG) " NCXX4_CONFIG_LOCATION = ${NCXX4_CONFIG_LOCATION}") endif() -inspect_netcdf_config(NCXX4_HINTS_INCLUDE_DIR "${NCXX4_CONFIG}" "--includedir") -inspect_netcdf_config(NCXX4_HINTS_PREFIX "${NCXX4_CONFIG}" "--prefix") +bout_inspect_netcdf_config(NCXX4_HINTS_INCLUDE_DIR "${NCXX4_CONFIG}" "--includedir") +bout_inspect_netcdf_config(NCXX4_HINTS_PREFIX "${NCXX4_CONFIG}" "--prefix") find_path(netCDF_CXX_INCLUDE_DIR NAMES netcdf @@ -106,7 +95,7 @@ if (netCDFCxx_DEBUG) endif() mark_as_advanced(netCDF_CXX_LIBRARY) -inspect_netcdf_config(_ncxx4_version "${NCXX4_CONFIG}" "--version") +bout_inspect_netcdf_config(_ncxx4_version "${NCXX4_CONFIG}" "--version") string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\1" _netcdfcxx_version_major "${_ncxx4_version}") string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\2" _netcdfcxx_version_minor "${_ncxx4_version}") string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\3" _netcdfcxx_version_patch "${_ncxx4_version}") From 7ad77f68595df718921696e187769105d78d733e Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 9 Nov 2020 11:24:39 +0000 Subject: [PATCH 262/428] CMake: Ensure namespaced targets exist for netCDF --- cmake/FindnetCDF.cmake | 3 +++ cmake/FindnetCDFCxx.cmake | 3 +++ 2 files changed, 6 insertions(+) diff --git a/cmake/FindnetCDF.cmake b/cmake/FindnetCDF.cmake index 47906bf2f1..198fbb920b 100644 --- a/cmake/FindnetCDF.cmake +++ b/cmake/FindnetCDF.cmake @@ -30,6 +30,9 @@ include(BOUT++functions) find_package(netCDF QUIET CONFIG) if (netCDF_FOUND) set(netCDF_FOUND TRUE) + if (NOT TARGET netCDF::netcdf) + bout_add_library_alias(netCDF::netcdf netcdf) + endif() return() endif() diff --git a/cmake/FindnetCDFCxx.cmake b/cmake/FindnetCDFCxx.cmake index f2779dd70c..e0e4d313f4 100644 --- a/cmake/FindnetCDFCxx.cmake +++ b/cmake/FindnetCDFCxx.cmake @@ -30,6 +30,9 @@ include(BOUT++functions) find_package(netCDFCxx QUIET CONFIG) if (netCDFCxx_FOUND) set(netCDFCxx_FOUND TRUE) + if (NOT TARGET netCDF::netcdf-cxx4) + bout_add_library_alias(netCDF::netcdf-cxx4 netcdf-cxx4) + endif() return() endif() From d4ddb8496e0ceaf5a3d6e1307b774f63eee579a7 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 1 Dec 2020 15:24:58 +0000 Subject: [PATCH 263/428] Fix CMake config file following change to netCDF cmake module Renamed the FindNetCDF module to FindnetCDFCxx, but didn't update the config file --- bout++Config.cmake.in | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bout++Config.cmake.in b/bout++Config.cmake.in index cf12d9089e..fa7446211c 100644 --- a/bout++Config.cmake.in +++ b/bout++Config.cmake.in @@ -55,8 +55,8 @@ endif() if(EXISTS "@ScoreP_ROOT@") set(ScoreP_ROOT "@ScoreP_ROOT@") endif() -if(EXISTS "@NetCDF_ROOT@") - set(NetCDF_ROOT "@NetCDF_ROOT@") +if(EXISTS "@netCDFCxx_ROOT@") + set(netCDFCxx_ROOT "@netCDFCxx_ROOT@") endif() if(EXISTS "@HDF5F_ROOT@") set(HDF5F_ROOT "@HDF5F_ROOT@") @@ -79,7 +79,7 @@ if (BOUT_USE_OPENMP) find_dependency(OpenMP) endif() if (BOUT_HAS_NETCDF) - find_dependency(NetCDF @NetCDF_VERSION@) + find_dependency(netCDFCxx @netCDFCxx_VERSION@) endif() if (BOUT_HAS_HDF5) find_dependency(HDF5 @HDF5_VERSION@) From 986496d5db729f43186c67e43c7d83721d501be4 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Dec 2020 14:57:09 +0000 Subject: [PATCH 264/428] CMake: Set PYTHONPATH for tests Fixes #1905 --- CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 429488c276..8820c221e3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -726,6 +726,9 @@ function(bout_add_integrated_test TESTNAME) # Set the actual test command if (BOUT_TEST_OPTIONS_USE_RUNTEST) add_test(NAME ${TESTNAME} COMMAND ./runtest) + set_tests_properties(${TESTNAME} PROPERTIES + ENVIRONMENT PYTHONPATH=${BOUT_PYTHONPATH}:$ENV{PYTHONPATH} + ) bout_copy_file(runtest) else() add_test(NAME ${TESTNAME} COMMAND ${TESTNAME}) From 4b6a1f9d6759bf7f418f6f1501f5abc1114aba9c Mon Sep 17 00:00:00 2001 From: Ben Dudson Date: Wed, 7 Apr 2021 21:47:18 +0100 Subject: [PATCH 265/428] CMake: Check if _ncxx4_version is set If for some reason the call to bout_inspect_netcdf_config fails to set the version string, don't set `netCDFCxx_VERSION`. I don't know why `_ncxx4_version` is not being set in this case: `ncxx4-config` is in the `PATH`, and `ncxx4-config --version` returns the expected string. --- cmake/FindnetCDFCxx.cmake | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/cmake/FindnetCDFCxx.cmake b/cmake/FindnetCDFCxx.cmake index e0e4d313f4..5721953c8f 100644 --- a/cmake/FindnetCDFCxx.cmake +++ b/cmake/FindnetCDFCxx.cmake @@ -99,10 +99,18 @@ endif() mark_as_advanced(netCDF_CXX_LIBRARY) bout_inspect_netcdf_config(_ncxx4_version "${NCXX4_CONFIG}" "--version") -string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\1" _netcdfcxx_version_major "${_ncxx4_version}") -string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\2" _netcdfcxx_version_minor "${_ncxx4_version}") -string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\3" _netcdfcxx_version_patch "${_ncxx4_version}") -set(netCDFCxx_VERSION "${_netcdf_version_major}.${_netcdf_version_minor}.${_netcdf_version_patch}${_netcdf_version_note}") +if (${_ncxx4_version}) + string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\1" _netcdfcxx_version_major "${_ncxx4_version}") + string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\2" _netcdfcxx_version_minor "${_ncxx4_version}") + string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\3" _netcdfcxx_version_patch "${_ncxx4_version}") + + message("${_ncxx4_version}") + set(netCDFCxx_VERSION "${_netcdf_version_major}.${_netcdf_version_minor}.${_netcdf_version_patch}${_netcdf_version_note}") + +else () + message(WARNING "Couldn't get NetCDF version") +endif() + unset(_ncxx4_version) unset(_netcdf_version_major) unset(_netcdf_version_minor) From d73daa91f65db579b731b21d5b0a86dad741f80e Mon Sep 17 00:00:00 2001 From: Ben Dudson Date: Wed, 7 Apr 2021 21:49:57 +0100 Subject: [PATCH 266/428] CMake: Find the Libuuid dependency If `BOUT_USE_UUID_SYSTEM_GENERATOR` is true, then the Libuuid dependency is needed to build the examples. --- bout++Config.cmake.in | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bout++Config.cmake.in b/bout++Config.cmake.in index fa7446211c..e9579161f8 100644 --- a/bout++Config.cmake.in +++ b/bout++Config.cmake.in @@ -18,6 +18,7 @@ set(BOUT_HAS_LAPACK @BOUT_HAS_LAPACK@) set(BOUT_HAS_PETSC @BOUT_HAS_PETSC@) set(BOUT_HAS_SLEPC @BOUT_HAS_SLEPC@) set(BOUT_HAS_SCOREP @BOUT_HAS_SCOREP@) +set(BOUT_USE_UUID_SYSTEM_GENERATOR @BOUT_USE_UUID_SYSTEM_GENERATOR@) set(BOUT_HAS_SUNDIALS @BOUT_HAS_SUNDIALS@) set(BOUT_HAS_GETTEXT @BOUT_HAS_GETTEXT@) @@ -110,5 +111,8 @@ endif() if (BOUT_HAS_SCOREP) find_dependency(ScoreP) endif() +if (BOUT_USE_UUID_SYSTEM_GENERATOR) + find_dependency(Libuuid) +endif() include("${CMAKE_CURRENT_LIST_DIR}/bout++Targets.cmake") From d07cc65db7215e4eed9b0d748693a5960f2daed5 Mon Sep 17 00:00:00 2001 From: Ben Dudson Date: Thu, 8 Apr 2021 09:24:31 +0100 Subject: [PATCH 267/428] Update cmake/FindnetCDFCxx.cmake Co-authored-by: Peter Hill --- cmake/FindnetCDFCxx.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/FindnetCDFCxx.cmake b/cmake/FindnetCDFCxx.cmake index 5721953c8f..e67c85f549 100644 --- a/cmake/FindnetCDFCxx.cmake +++ b/cmake/FindnetCDFCxx.cmake @@ -104,7 +104,7 @@ if (${_ncxx4_version}) string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\2" _netcdfcxx_version_minor "${_ncxx4_version}") string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\3" _netcdfcxx_version_patch "${_ncxx4_version}") - message("${_ncxx4_version}") + message(STATUS "Found netCDFCxx version ${_ncxx4_version}") set(netCDFCxx_VERSION "${_netcdf_version_major}.${_netcdf_version_minor}.${_netcdf_version_patch}${_netcdf_version_note}") else () From e1872599c0128ce12f90684758ffcb8f07a06bf3 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 8 Apr 2021 10:18:47 +0100 Subject: [PATCH 268/428] CMake: Fix netCDFCxx version parsing - `bout_inspect_netcdf_config` had wrong conditional - used wrong variables when reconstructing the full version number No need to get the separate version parts and recombine them, when we can just get the full version to begin with --- cmake/BOUT++functions.cmake | 2 +- cmake/FindnetCDFCxx.cmake | 11 +++-------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/cmake/BOUT++functions.cmake b/cmake/BOUT++functions.cmake index 9c36948277..5e7afaac58 100644 --- a/cmake/BOUT++functions.cmake +++ b/cmake/BOUT++functions.cmake @@ -104,7 +104,7 @@ function(bout_inspect_netcdf_config VAR NX_CONFIG ARG) OUTPUT_VARIABLE NX_CONFIG_OUTPUT OUTPUT_STRIP_TRAILING_WHITESPACE ) - if(EXISTS "${NX_CONFIG_OUTPUT}") + if (NX_CONFIG_OUTPUT) set(${VAR} ${NX_CONFIG_OUTPUT} PARENT_SCOPE) endif() endfunction() diff --git a/cmake/FindnetCDFCxx.cmake b/cmake/FindnetCDFCxx.cmake index e67c85f549..25642a80a2 100644 --- a/cmake/FindnetCDFCxx.cmake +++ b/cmake/FindnetCDFCxx.cmake @@ -99,14 +99,9 @@ endif() mark_as_advanced(netCDF_CXX_LIBRARY) bout_inspect_netcdf_config(_ncxx4_version "${NCXX4_CONFIG}" "--version") -if (${_ncxx4_version}) - string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\1" _netcdfcxx_version_major "${_ncxx4_version}") - string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\2" _netcdfcxx_version_minor "${_ncxx4_version}") - string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\)\.\([0-9]+\)\.\([0-9]+\)" "\\3" _netcdfcxx_version_patch "${_ncxx4_version}") - - message(STATUS "Found netCDFCxx version ${_ncxx4_version}") - set(netCDFCxx_VERSION "${_netcdf_version_major}.${_netcdf_version_minor}.${_netcdf_version_patch}${_netcdf_version_note}") - +if (_ncxx4_version) + string(REGEX REPLACE "netCDF-cxx4 \([0-9]+\.[0-9]+\.[0-9]+\)" "\\1" netCDFCxx_VERSION "${_ncxx4_version}") + message(STATUS "Found netCDFCxx version ${netCDFCxx_VERSION}") else () message(WARNING "Couldn't get NetCDF version") endif() From 5556b4e08f409dfede343fdc04d5885361162705 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 16 Apr 2021 11:49:10 +0100 Subject: [PATCH 269/428] CMake: Move bout_add_integrated_test out of main CML file --- CMakeLists.txt | 86 ------------------------------------- cmake/BOUT++functions.cmake | 86 +++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 86 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8820c221e3..e6d9504b7d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -666,92 +666,6 @@ target_compile_definitions(bout++ ################################################## # Tests -# Add a new integrated test. By default, the executable is named like -# the first source, stripped of its file extension. -# -# Required arguments: -# -# - TESTNAME: name of the test -# -# - SOURCES: list of source files -# -# Optional arguments: -# -# - USE_RUNTEST: if given, the test uses `./runtest` as the test -# command, otherwise it uses the executable -# -# - USE_DATA_BOUT_INP: if given, copy `data/BOUT.inp` -# -# - EXTRA_FILES: any extra files that are required to run the test -# -# - REQUIRES: list of variables that must be truthy to enable test -# -# - EXECUTABLE_NAME: name of the executable, if different from the -# first source name - -function(bout_add_integrated_test TESTNAME) - set(options USE_RUNTEST USE_DATA_BOUT_INP) - set(oneValueArgs EXECUTABLE_NAME) - set(multiValueArgs SOURCES EXTRA_FILES REQUIRES) - cmake_parse_arguments(BOUT_TEST_OPTIONS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - foreach (REQUIREMENT IN LISTS BOUT_TEST_OPTIONS_REQUIRES) - if (NOT ${REQUIREMENT}) - message(STATUS "Not building test ${TESTNAME}, requirement not met: ${REQUIREMENT}") - return() - endif() - endforeach() - - add_executable(${TESTNAME} ${BOUT_TEST_OPTIONS_SOURCES}) - target_link_libraries(${TESTNAME} bout++) - target_include_directories(${TESTNAME} PRIVATE $) - - # Set the name of the executable. We either take it as an option, - # or use the first source file, stripping the file suffix - if (BOUT_TEST_OPTIONS_EXECUTABLE_NAME) - set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_OPTIONS_EXECUTABLE_NAME}) - else() - # If more than one source file, just get the first one - list(LENGTH ${BOUT_TEST_OPTIONS_SOURCES} BOUT_SOURCES_LENGTH) - if (BOUT_SOURCES_LENGTH GREATER 0) - list(GET ${BOUT_TEST_OPTIONS_SOURCES} 0 BOUT_TEST_FIRST_SOURCE) - else() - set(BOUT_TEST_FIRST_SOURCE ${BOUT_TEST_OPTIONS_SOURCES}) - endif() - # Strip the directory and file extension from the source file - get_filename_component(BOUT_TEST_EXECUTABLE_NAME ${BOUT_TEST_FIRST_SOURCE} NAME_WE) - set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_EXECUTABLE_NAME}) - endif() - - # Set the actual test command - if (BOUT_TEST_OPTIONS_USE_RUNTEST) - add_test(NAME ${TESTNAME} COMMAND ./runtest) - set_tests_properties(${TESTNAME} PROPERTIES - ENVIRONMENT PYTHONPATH=${BOUT_PYTHONPATH}:$ENV{PYTHONPATH} - ) - bout_copy_file(runtest) - else() - add_test(NAME ${TESTNAME} COMMAND ${TESTNAME}) - endif() - - # Copy the input file if needed - if (BOUT_TEST_OPTIONS_USE_DATA_BOUT_INP) - bout_copy_file(data/BOUT.inp) - endif() - - # Copy any other needed files - if (BOUT_TEST_OPTIONS_EXTRA_FILES) - foreach (FILE ${BOUT_TEST_OPTIONS_EXTRA_FILES}) - bout_copy_file("${FILE}") - endforeach() - endif() - - set_target_properties(${TESTNAME} PROPERTIES FOLDER tests/integrated) - - # Add the test to the build-check-integrated-tests target - add_dependencies(build-check-integrated-tests ${TESTNAME}) -endfunction() - option(PACKAGE_TESTS "Build the tests" ON) if(PACKAGE_TESTS) enable_testing() diff --git a/cmake/BOUT++functions.cmake b/cmake/BOUT++functions.cmake index 5e7afaac58..d89a285a85 100644 --- a/cmake/BOUT++functions.cmake +++ b/cmake/BOUT++functions.cmake @@ -84,6 +84,92 @@ function(bout_add_example EXAMPLENAME) endfunction() +# Add a new integrated test. By default, the executable is named like +# the first source, stripped of its file extension. +# +# Required arguments: +# +# - TESTNAME: name of the test +# +# - SOURCES: list of source files +# +# Optional arguments: +# +# - USE_RUNTEST: if given, the test uses `./runtest` as the test +# command, otherwise it uses the executable +# +# - USE_DATA_BOUT_INP: if given, copy `data/BOUT.inp` +# +# - EXTRA_FILES: any extra files that are required to run the test +# +# - REQUIRES: list of variables that must be truthy to enable test +# +# - EXECUTABLE_NAME: name of the executable, if different from the +# first source name + +function(bout_add_integrated_test TESTNAME) + set(options USE_RUNTEST USE_DATA_BOUT_INP) + set(oneValueArgs EXECUTABLE_NAME) + set(multiValueArgs SOURCES EXTRA_FILES REQUIRES TESTARGS) + cmake_parse_arguments(BOUT_TEST_OPTIONS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + foreach (REQUIREMENT IN LISTS BOUT_TEST_OPTIONS_REQUIRES) + if (NOT ${REQUIREMENT}) + message(STATUS "Not building test ${TESTNAME}, requirement not met: ${REQUIREMENT}") + return() + endif() + endforeach() + + add_executable(${TESTNAME} ${BOUT_TEST_OPTIONS_SOURCES}) + target_link_libraries(${TESTNAME} bout++) + target_include_directories(${TESTNAME} PRIVATE $) + + # Set the name of the executable. We either take it as an option, + # or use the first source file, stripping the file suffix + if (BOUT_TEST_OPTIONS_EXECUTABLE_NAME) + set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_OPTIONS_EXECUTABLE_NAME}) + else() + # If more than one source file, just get the first one + list(LENGTH ${BOUT_TEST_OPTIONS_SOURCES} BOUT_SOURCES_LENGTH) + if (BOUT_SOURCES_LENGTH GREATER 0) + list(GET ${BOUT_TEST_OPTIONS_SOURCES} 0 BOUT_TEST_FIRST_SOURCE) + else() + set(BOUT_TEST_FIRST_SOURCE ${BOUT_TEST_OPTIONS_SOURCES}) + endif() + # Strip the directory and file extension from the source file + get_filename_component(BOUT_TEST_EXECUTABLE_NAME ${BOUT_TEST_FIRST_SOURCE} NAME_WE) + set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_EXECUTABLE_NAME}) + endif() + + # Set the actual test command + if (BOUT_TEST_OPTIONS_USE_RUNTEST) + add_test(NAME ${TESTNAME} COMMAND ./runtest ${BOUT_TEST_OPTIONS_TESTARGS}) + set_tests_properties(${TESTNAME} PROPERTIES + ENVIRONMENT PYTHONPATH=${BOUT_PYTHONPATH}:$ENV{PYTHONPATH} + ) + bout_copy_file(runtest) + else() + add_test(NAME ${TESTNAME} COMMAND ${TESTNAME} ${BOUT_TEST_OPTIONS_TESTARGS}) + endif() + + # Copy the input file if needed + if (BOUT_TEST_OPTIONS_USE_DATA_BOUT_INP) + bout_copy_file(data/BOUT.inp) + endif() + + # Copy any other needed files + if (BOUT_TEST_OPTIONS_EXTRA_FILES) + foreach (FILE ${BOUT_TEST_OPTIONS_EXTRA_FILES}) + bout_copy_file("${FILE}") + endforeach() + endif() + + set_target_properties(${TESTNAME} PROPERTIES FOLDER tests/integrated) + + # Add the test to the build-check-integrated-tests target + add_dependencies(build-check-integrated-tests ${TESTNAME}) +endfunction() + # Add an alias for an imported target # Workaround for CMAke < 3.18 # Taken from https://github.com/conan-io/conan/issues/2125#issuecomment-351176653 From 115e442c94e6b960770a9a5ca3ec9e8ff3a3ffa5 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 16 Apr 2021 15:44:13 +0100 Subject: [PATCH 270/428] CMake: Generalise add test function to add MMS tests --- CMakeLists.txt | 10 ++++++++-- cmake/BOUT++functions.cmake | 18 ++++++++++++++---- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e6d9504b7d..a9d1fca9d9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -674,13 +674,15 @@ if(PACKAGE_TESTS) # Tests need to add themselves as dependencies to these targets add_custom_target(build-check-unit-tests) add_custom_target(build-check-integrated-tests) + add_custom_target(build-check-mms-tests) # Build all the tests add_custom_target(build-check) - add_dependencies(build-check build-check-unit-tests build-check-integrated-tests) + add_dependencies(build-check build-check-unit-tests build-check-integrated-tests build-check-mms-tests) add_subdirectory(tests/unit EXCLUDE_FROM_ALL) add_subdirectory(tests/integrated EXCLUDE_FROM_ALL) + add_subdirectory(tests/MMS EXCLUDE_FROM_ALL) # Targets for running the tests add_custom_target(check-unit-tests @@ -691,9 +693,13 @@ if(PACKAGE_TESTS) COMMAND ctest -R "test-" --output-on-failure) add_dependencies(check-integrated-tests build-check-integrated-tests) + add_custom_target(check-mms-tests + COMMAND ctest -R "MMS-" --output-on-failure) + add_dependencies(check-mms-tests build-check-mms-tests) + # Run all the tests add_custom_target(check) - add_dependencies(check check-unit-tests check-integrated-tests) + add_dependencies(check check-unit-tests check-integrated-tests check-mms-tests) endif() option(BOUT_BUILD_EXAMPLES "Build the examples" OFF) diff --git a/cmake/BOUT++functions.cmake b/cmake/BOUT++functions.cmake index d89a285a85..da96c109f0 100644 --- a/cmake/BOUT++functions.cmake +++ b/cmake/BOUT++functions.cmake @@ -84,8 +84,8 @@ function(bout_add_example EXAMPLENAME) endfunction() -# Add a new integrated test. By default, the executable is named like -# the first source, stripped of its file extension. +# Add a new integrated or MMS test. By default, the executable is +# named like the first source, stripped of its file extension. # # Required arguments: # @@ -107,7 +107,7 @@ endfunction() # - EXECUTABLE_NAME: name of the executable, if different from the # first source name -function(bout_add_integrated_test TESTNAME) +function(bout_add_integrated_or_mms_test BUILD_CHECK_TARGET TESTNAME) set(options USE_RUNTEST USE_DATA_BOUT_INP) set(oneValueArgs EXECUTABLE_NAME) set(multiValueArgs SOURCES EXTRA_FILES REQUIRES TESTARGS) @@ -167,7 +167,17 @@ function(bout_add_integrated_test TESTNAME) set_target_properties(${TESTNAME} PROPERTIES FOLDER tests/integrated) # Add the test to the build-check-integrated-tests target - add_dependencies(build-check-integrated-tests ${TESTNAME}) + add_dependencies(${BUILD_CHECK_TARGET} ${TESTNAME}) +endfunction() + +# Add a new integrated test. See `bout_add_integrated_or_mms_test` for arguments +function(bout_add_integrated_test TESTNAME) + bout_add_integrated_or_mms_test(build-check-integrated-tests ${TESTNAME} ${ARGV}) +endfunction() + +# Add a new MMS test. See `bout_add_integrated_or_mms_test` for arguments +function(bout_add_mms_test TESTNAME) + bout_add_integrated_or_mms_test(build-check-mms-tests ${TESTNAME} ${ARGV}) endfunction() # Add an alias for an imported target From bcc886e9a1b3249961824fd7adabfb150ed23803 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 16 Apr 2021 15:45:24 +0100 Subject: [PATCH 271/428] CMake: Add option to disable running expensive tests --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index a9d1fca9d9..8a802382fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -667,6 +667,7 @@ target_compile_definitions(bout++ # Tests option(PACKAGE_TESTS "Build the tests" ON) +option(BOUT_RUN_ALL_TESTS "Run all of the tests (this can be slow!)" OFF) if(PACKAGE_TESTS) enable_testing() From 25a25c3c0246a440b442c3dec84444a75a6ac509 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 16 Apr 2021 16:33:02 +0100 Subject: [PATCH 272/428] Use global mesh/dump in some MMS tests --- tests/MMS/laplace/laplace.cxx | 3 +++ tests/MMS/spatial/advection/advection.cxx | 3 +++ tests/MMS/spatial/d2dx2/test_d2dx2.cxx | 3 +++ tests/MMS/spatial/d2dz2/test_d2dz2.cxx | 3 +++ 4 files changed, 12 insertions(+) diff --git a/tests/MMS/laplace/laplace.cxx b/tests/MMS/laplace/laplace.cxx index e762637af1..e5b8985f44 100644 --- a/tests/MMS/laplace/laplace.cxx +++ b/tests/MMS/laplace/laplace.cxx @@ -4,6 +4,9 @@ #include #include +using bout::globals::mesh; +using bout::globals::dump; + int main(int argc, char **argv) { int init_err = BoutInitialise(argc, argv); if (init_err < 0) { diff --git a/tests/MMS/spatial/advection/advection.cxx b/tests/MMS/spatial/advection/advection.cxx index 9ef5efa201..d0285ee8dc 100644 --- a/tests/MMS/spatial/advection/advection.cxx +++ b/tests/MMS/spatial/advection/advection.cxx @@ -2,6 +2,9 @@ #include "derivs.hxx" #include "field_factory.hxx" +using bout::globals::mesh; +using bout::globals::dump; + int main(int argc, char** argv) { BoutInitialise(argc, argv); diff --git a/tests/MMS/spatial/d2dx2/test_d2dx2.cxx b/tests/MMS/spatial/d2dx2/test_d2dx2.cxx index aeb9984223..90861f29a4 100644 --- a/tests/MMS/spatial/d2dx2/test_d2dx2.cxx +++ b/tests/MMS/spatial/d2dx2/test_d2dx2.cxx @@ -6,6 +6,9 @@ #include #include +using bout::globals::mesh; +using bout::globals::dump; + int main(int argc, char** argv) { BoutInitialise(argc, argv); diff --git a/tests/MMS/spatial/d2dz2/test_d2dz2.cxx b/tests/MMS/spatial/d2dz2/test_d2dz2.cxx index c06cc5831d..13eae36b47 100644 --- a/tests/MMS/spatial/d2dz2/test_d2dz2.cxx +++ b/tests/MMS/spatial/d2dz2/test_d2dz2.cxx @@ -6,6 +6,9 @@ #include #include +using bout::globals::mesh; +using bout::globals::dump; + int main(int argc, char** argv) { BoutInitialise(argc, argv); From 66c29e0e0a65ce3ffca91c3b7a5a13c693253bca Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 16 Apr 2021 16:32:44 +0100 Subject: [PATCH 273/428] CMake: Add most MMS tests --- tests/MMS/CMakeLists.txt | 30 ++++++++++++++++++++++ tests/MMS/advection/runtest | 23 +++++++---------- tests/MMS/diffusion/CMakeLists.txt | 6 +++++ tests/MMS/diffusion2/CMakeLists.txt | 10 ++++++++ tests/MMS/hw/CMakeLists.txt | 6 +++++ tests/MMS/laplace/CMakeLists.txt | 6 +++++ tests/MMS/spatial/advection/CMakeLists.txt | 6 +++++ tests/MMS/spatial/d2dx2/CMakeLists.txt | 6 +++++ tests/MMS/spatial/d2dz2/CMakeLists.txt | 6 +++++ tests/MMS/spatial/diffusion/CMakeLists.txt | 6 +++++ tests/MMS/spatial/fci/CMakeLists.txt | 5 ++++ tests/MMS/time/CMakeLists.txt | 6 +++++ tests/MMS/wave-1d-y/CMakeLists.txt | 5 ++++ tests/MMS/wave-1d/CMakeLists.txt | 5 ++++ 14 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 tests/MMS/CMakeLists.txt create mode 100644 tests/MMS/diffusion/CMakeLists.txt create mode 100644 tests/MMS/diffusion2/CMakeLists.txt create mode 100644 tests/MMS/hw/CMakeLists.txt create mode 100644 tests/MMS/laplace/CMakeLists.txt create mode 100644 tests/MMS/spatial/advection/CMakeLists.txt create mode 100644 tests/MMS/spatial/d2dx2/CMakeLists.txt create mode 100644 tests/MMS/spatial/d2dz2/CMakeLists.txt create mode 100644 tests/MMS/spatial/diffusion/CMakeLists.txt create mode 100644 tests/MMS/spatial/fci/CMakeLists.txt create mode 100644 tests/MMS/time/CMakeLists.txt create mode 100644 tests/MMS/wave-1d-y/CMakeLists.txt create mode 100644 tests/MMS/wave-1d/CMakeLists.txt diff --git a/tests/MMS/CMakeLists.txt b/tests/MMS/CMakeLists.txt new file mode 100644 index 0000000000..18e552253c --- /dev/null +++ b/tests/MMS/CMakeLists.txt @@ -0,0 +1,30 @@ +# add_subdirectory(advection) +add_subdirectory(diffusion) +add_subdirectory(diffusion2) +add_subdirectory(hw) +add_subdirectory(laplace) +add_subdirectory(spatial/advection) +add_subdirectory(spatial/d2dx2) +add_subdirectory(spatial/d2dz2) +add_subdirectory(spatial/diffusion) +add_subdirectory(spatial/fci) +add_subdirectory(time) +# add_subdirectory(time-petsc) +add_subdirectory(wave-1d) +add_subdirectory(wave-1d-y) + +######################################## +# The following require boutcore: + +# add_subdirectory(bracket) +# add_subdirectory(derivatives3) +# add_subdirectory(shiftedmetricinterp) +# add_subdirectory(upwinding3) + +######################################## +# The following are marked as broken + +# add_subdirectory(elm-pb) +# add_subdirectory(fieldalign) +# add_subdirectory(GBS) +# add_subdirectory(tokamak) diff --git a/tests/MMS/advection/runtest b/tests/MMS/advection/runtest index 1b721d9af2..93c346682f 100755 --- a/tests/MMS/advection/runtest +++ b/tests/MMS/advection/runtest @@ -8,7 +8,7 @@ #requires: all_tests #requires: make -from boututils.run_wrapper import shell, shell_safe, launch_safe +from boututils.run_wrapper import shell, launch_safe, build_and_log from boutdata.collect import collect from numpy import sqrt, max, abs, mean, array, log, pi @@ -22,12 +22,10 @@ import time if __name__ == "__main__": - print("Making MMS advection test") - shell_safe("make > make.log") + build_and_log("MMS advection") def make_parent(): - print("Making MMS advection test") - shell_safe("make -C .. > make.log") + build_and_log("MMS advection") # List of NX values to use #[16, 32, 64, 128, 256, 512, 1024] @@ -41,10 +39,8 @@ dt0 = 0.15 def run_mms(options,exit=True): success = True - err_2_all = [] - err_inf_all = [] - for opts,exp_order in options: - error_2 = [] # The L2 error (RMS) + for opts, exp_order in options: + error_2 = [] # The L2 error (RMS) error_inf = [] # The maximum error for nx in nxlist: @@ -52,12 +48,12 @@ def run_mms(options,exit=True): dx = 2.*pi / (nx) - args = opts + " mesh:nx="+str(nx+4)+" mesh:dx="+str(dx)+" MZ="+str(nx) #+" solver:timestep="+str(dt0/nx) + args = f"{opts} mesh:nx={nx+4} mesh:dx={dx} MZ={nx}" print(" Running with " + args) # Delete old data - shell("rm data/BOUT.dmp.*.nc") + shell("rm -f data/BOUT.dmp.*.nc") # Command to run cmd = "./advection "+args @@ -67,9 +63,8 @@ def run_mms(options,exit=True): s, out = launch_safe(cmd, nproc=nproc, pipe=True) # Save output to log file - f = open("run.log."+str(nx), "w") - f.write(out) - f.close() + with open(f"run.log.{nx}", "w") as f: + f.write(out) # Collect data E_f = collect("E_f", xguards=False, tind=[1,1], info=False, path="data") diff --git a/tests/MMS/diffusion/CMakeLists.txt b/tests/MMS/diffusion/CMakeLists.txt new file mode 100644 index 0000000000..b91b6a8327 --- /dev/null +++ b/tests/MMS/diffusion/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_mms_test(MMS-diffusion + SOURCES diffusion.cxx + EXECUTABLE_NAME cyto + USE_RUNTEST + USE_DATA_BOUT_INP + ) diff --git a/tests/MMS/diffusion2/CMakeLists.txt b/tests/MMS/diffusion2/CMakeLists.txt new file mode 100644 index 0000000000..f5b255c3c4 --- /dev/null +++ b/tests/MMS/diffusion2/CMakeLists.txt @@ -0,0 +1,10 @@ +bout_add_mms_test(MMS-diffusion2 + SOURCES diffusion.cxx + EXECUTABLE_NAME cyto + USE_RUNTEST + EXTRA_FILES + X/BOUT.inp + Y/BOUT.inp + Z/BOUT.inp + REQUIRES BOUT_RUN_ALL_TESTS + ) diff --git a/tests/MMS/hw/CMakeLists.txt b/tests/MMS/hw/CMakeLists.txt new file mode 100644 index 0000000000..e1a9a6ede2 --- /dev/null +++ b/tests/MMS/hw/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_mms_test(MMS-hw + SOURCES hw.cxx + USE_RUNTEST + USE_DATA_BOUT_INP + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/laplace/CMakeLists.txt b/tests/MMS/laplace/CMakeLists.txt new file mode 100644 index 0000000000..a0d83c977e --- /dev/null +++ b/tests/MMS/laplace/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_mms_test(MMS-laplace + SOURCES laplace.cxx + USE_RUNTEST + USE_DATA_BOUT_INP + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/spatial/advection/CMakeLists.txt b/tests/MMS/spatial/advection/CMakeLists.txt new file mode 100644 index 0000000000..c4a1f8f8d6 --- /dev/null +++ b/tests/MMS/spatial/advection/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_mms_test(MMS-spatial-advection + SOURCES advection.cxx + USE_RUNTEST + USE_DATA_BOUT_INP + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/spatial/d2dx2/CMakeLists.txt b/tests/MMS/spatial/d2dx2/CMakeLists.txt new file mode 100644 index 0000000000..0affa9beee --- /dev/null +++ b/tests/MMS/spatial/d2dx2/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_mms_test(MMS-spatial-d2dx2 + SOURCES test_d2dx2.cxx + USE_RUNTEST + USE_DATA_BOUT_INP + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/spatial/d2dz2/CMakeLists.txt b/tests/MMS/spatial/d2dz2/CMakeLists.txt new file mode 100644 index 0000000000..01b61eaa5d --- /dev/null +++ b/tests/MMS/spatial/d2dz2/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_mms_test(MMS-spatial-d2dz2 + SOURCES test_d2dz2.cxx + USE_RUNTEST + USE_DATA_BOUT_INP + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/spatial/diffusion/CMakeLists.txt b/tests/MMS/spatial/diffusion/CMakeLists.txt new file mode 100644 index 0000000000..470628d006 --- /dev/null +++ b/tests/MMS/spatial/diffusion/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_mms_test(MMS-spatial-diffusion + SOURCES diffusion.cxx + USE_RUNTEST + EXTRA_FILES X/BOUT.inp + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/spatial/fci/CMakeLists.txt b/tests/MMS/spatial/fci/CMakeLists.txt new file mode 100644 index 0000000000..430899a9c6 --- /dev/null +++ b/tests/MMS/spatial/fci/CMakeLists.txt @@ -0,0 +1,5 @@ +bout_add_mms_test(MMS-spatial-fci + SOURCES fci_mms.cxx + USE_RUNTEST + USE_DATA_BOUT_INP +) diff --git a/tests/MMS/time/CMakeLists.txt b/tests/MMS/time/CMakeLists.txt new file mode 100644 index 0000000000..9c3480629e --- /dev/null +++ b/tests/MMS/time/CMakeLists.txt @@ -0,0 +1,6 @@ +bout_add_mms_test(MMS-time + SOURCES time.cxx + USE_RUNTEST + USE_DATA_BOUT_INP + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/wave-1d-y/CMakeLists.txt b/tests/MMS/wave-1d-y/CMakeLists.txt new file mode 100644 index 0000000000..cc5cddfff4 --- /dev/null +++ b/tests/MMS/wave-1d-y/CMakeLists.txt @@ -0,0 +1,5 @@ +bout_add_mms_test(MMS-wave-1d-y + SOURCES wave.cxx + USE_RUNTEST + USE_DATA_BOUT_INP +) diff --git a/tests/MMS/wave-1d/CMakeLists.txt b/tests/MMS/wave-1d/CMakeLists.txt new file mode 100644 index 0000000000..a9ae3d748c --- /dev/null +++ b/tests/MMS/wave-1d/CMakeLists.txt @@ -0,0 +1,5 @@ +bout_add_mms_test(MMS-wave-1d + SOURCES wave.cxx + USE_RUNTEST + USE_DATA_BOUT_INP +) From 3bcd88ef44352ae178c61c11a1bd11955e044cf7 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 16 Apr 2021 17:03:23 +0100 Subject: [PATCH 274/428] CMake: Allow tests that rely on executables from other tests --- cmake/BOUT++functions.cmake | 72 ++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 28 deletions(-) diff --git a/cmake/BOUT++functions.cmake b/cmake/BOUT++functions.cmake index da96c109f0..941c0e7947 100644 --- a/cmake/BOUT++functions.cmake +++ b/cmake/BOUT++functions.cmake @@ -32,7 +32,7 @@ function(bout_add_model MODEL) target_link_libraries(${MODEL} bout++::bout++) target_include_directories(${MODEL} PRIVATE $) endfunction() - + # Build a BOUT++ example # @@ -85,16 +85,21 @@ endfunction() # Add a new integrated or MMS test. By default, the executable is -# named like the first source, stripped of its file extension. +# named like the first source, stripped of its file extension. If no +# sources are given, then you probably at least want to set +# USE_RUNTEST # # Required arguments: # -# - TESTNAME: name of the test +# - BUILD_CHECK_TARGET: the specific build-check target that should +# depend on this test # -# - SOURCES: list of source files +# - TESTNAME: name of the test # # Optional arguments: # +# - SOURCES: list of source files +# # - USE_RUNTEST: if given, the test uses `./runtest` as the test # command, otherwise it uses the executable # @@ -106,11 +111,13 @@ endfunction() # # - EXECUTABLE_NAME: name of the executable, if different from the # first source name - +# +# - EXTRA_DEPENDS: list of other targets that this test depends on +# function(bout_add_integrated_or_mms_test BUILD_CHECK_TARGET TESTNAME) set(options USE_RUNTEST USE_DATA_BOUT_INP) set(oneValueArgs EXECUTABLE_NAME) - set(multiValueArgs SOURCES EXTRA_FILES REQUIRES TESTARGS) + set(multiValueArgs SOURCES EXTRA_FILES REQUIRES TESTARGS EXTRA_DEPENDS) cmake_parse_arguments(BOUT_TEST_OPTIONS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) foreach (REQUIREMENT IN LISTS BOUT_TEST_OPTIONS_REQUIRES) @@ -120,25 +127,39 @@ function(bout_add_integrated_or_mms_test BUILD_CHECK_TARGET TESTNAME) endif() endforeach() - add_executable(${TESTNAME} ${BOUT_TEST_OPTIONS_SOURCES}) - target_link_libraries(${TESTNAME} bout++) - target_include_directories(${TESTNAME} PRIVATE $) - - # Set the name of the executable. We either take it as an option, - # or use the first source file, stripping the file suffix - if (BOUT_TEST_OPTIONS_EXECUTABLE_NAME) - set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_OPTIONS_EXECUTABLE_NAME}) - else() - # If more than one source file, just get the first one - list(LENGTH ${BOUT_TEST_OPTIONS_SOURCES} BOUT_SOURCES_LENGTH) - if (BOUT_SOURCES_LENGTH GREATER 0) - list(GET ${BOUT_TEST_OPTIONS_SOURCES} 0 BOUT_TEST_FIRST_SOURCE) + if (BOUT_TEST_OPTIONS_SOURCES) + # We've got some sources, so compile them into an executable and + # link against BOUT++ + add_executable(${TESTNAME} ${BOUT_TEST_OPTIONS_SOURCES}) + target_link_libraries(${TESTNAME} bout++) + target_include_directories(${TESTNAME} PRIVATE $) + set_target_properties(${TESTNAME} PROPERTIES FOLDER tests/integrated) + + # Set the name of the executable. We either take it as an option, + # or use the first source file, stripping the file suffix + if (BOUT_TEST_OPTIONS_EXECUTABLE_NAME) + set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_OPTIONS_EXECUTABLE_NAME}) else() - set(BOUT_TEST_FIRST_SOURCE ${BOUT_TEST_OPTIONS_SOURCES}) + # If more than one source file, just get the first one + list(LENGTH ${BOUT_TEST_OPTIONS_SOURCES} BOUT_SOURCES_LENGTH) + if (BOUT_SOURCES_LENGTH GREATER 0) + list(GET ${BOUT_TEST_OPTIONS_SOURCES} 0 BOUT_TEST_FIRST_SOURCE) + else() + set(BOUT_TEST_FIRST_SOURCE ${BOUT_TEST_OPTIONS_SOURCES}) + endif() + # Strip the directory and file extension from the source file + get_filename_component(BOUT_TEST_EXECUTABLE_NAME ${BOUT_TEST_FIRST_SOURCE} NAME_WE) + set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_EXECUTABLE_NAME}) endif() - # Strip the directory and file extension from the source file - get_filename_component(BOUT_TEST_EXECUTABLE_NAME ${BOUT_TEST_FIRST_SOURCE} NAME_WE) - set_target_properties(${TESTNAME} PROPERTIES OUTPUT_NAME ${BOUT_TEST_EXECUTABLE_NAME}) + + # Add the test to the build-check-integrated-tests target + add_dependencies(${BUILD_CHECK_TARGET} ${TESTNAME}) + else() + add_custom_target(${TESTNAME}) + endif() + + if (BOUT_TEST_OPTIONS_EXTRA_DEPENDS) + add_dependencies(${TESTNAME} ${BOUT_TEST_OPTIONS_EXTRA_DEPENDS}) endif() # Set the actual test command @@ -163,11 +184,6 @@ function(bout_add_integrated_or_mms_test BUILD_CHECK_TARGET TESTNAME) bout_copy_file("${FILE}") endforeach() endif() - - set_target_properties(${TESTNAME} PROPERTIES FOLDER tests/integrated) - - # Add the test to the build-check-integrated-tests target - add_dependencies(${BUILD_CHECK_TARGET} ${TESTNAME}) endfunction() # Add a new integrated test. See `bout_add_integrated_or_mms_test` for arguments From 3cb038a4b97aed1566ba17ac09a71b6c4431c9f4 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 16 Apr 2021 17:32:31 +0100 Subject: [PATCH 275/428] CMake: Add more MMS tests Those tests that rely on executables built "elsewhere" --- tests/MMS/CMakeLists.txt | 4 ++-- tests/MMS/advection/CMakeLists.txt | 12 ++++++++++++ tests/MMS/advection/arakawa/CMakeLists.txt | 7 +++++++ tests/MMS/advection/arakawa/advection | 1 - tests/MMS/advection/central/CMakeLists.txt | 7 +++++++ tests/MMS/advection/central/advection | 1 - tests/MMS/advection/runtest | 2 +- tests/MMS/advection/upwind/CMakeLists.txt | 7 +++++++ tests/MMS/advection/upwind/advection | 1 - tests/MMS/advection/weno3/CMakeLists.txt | 7 +++++++ tests/MMS/advection/weno3/advection | 1 - tests/MMS/time-petsc/CMakeLists.txt | 7 +++++++ 12 files changed, 50 insertions(+), 7 deletions(-) create mode 100644 tests/MMS/advection/CMakeLists.txt create mode 100644 tests/MMS/advection/arakawa/CMakeLists.txt delete mode 120000 tests/MMS/advection/arakawa/advection create mode 100644 tests/MMS/advection/central/CMakeLists.txt delete mode 120000 tests/MMS/advection/central/advection create mode 100644 tests/MMS/advection/upwind/CMakeLists.txt delete mode 120000 tests/MMS/advection/upwind/advection create mode 100644 tests/MMS/advection/weno3/CMakeLists.txt delete mode 120000 tests/MMS/advection/weno3/advection create mode 100644 tests/MMS/time-petsc/CMakeLists.txt diff --git a/tests/MMS/CMakeLists.txt b/tests/MMS/CMakeLists.txt index 18e552253c..9317d1f67a 100644 --- a/tests/MMS/CMakeLists.txt +++ b/tests/MMS/CMakeLists.txt @@ -1,4 +1,4 @@ -# add_subdirectory(advection) +add_subdirectory(advection) add_subdirectory(diffusion) add_subdirectory(diffusion2) add_subdirectory(hw) @@ -9,7 +9,7 @@ add_subdirectory(spatial/d2dz2) add_subdirectory(spatial/diffusion) add_subdirectory(spatial/fci) add_subdirectory(time) -# add_subdirectory(time-petsc) +add_subdirectory(time-petsc) add_subdirectory(wave-1d) add_subdirectory(wave-1d-y) diff --git a/tests/MMS/advection/CMakeLists.txt b/tests/MMS/advection/CMakeLists.txt new file mode 100644 index 0000000000..1e08b97cd1 --- /dev/null +++ b/tests/MMS/advection/CMakeLists.txt @@ -0,0 +1,12 @@ +add_executable(MMS-advection advection.cxx) +target_link_libraries(MMS-advection bout++) +set_target_properties(MMS-advection PROPERTIES FOLDER tests/MMS) +set_target_properties(MMS-advection PROPERTIES OUTPUT_NAME advection) +add_dependencies(build-check-mms-tests MMS-advection) +bout_copy_file(runtest) +bout_copy_file(data/BOUT.inp) + +add_subdirectory(arakawa) +add_subdirectory(central) +add_subdirectory(upwind) +add_subdirectory(weno3) diff --git a/tests/MMS/advection/arakawa/CMakeLists.txt b/tests/MMS/advection/arakawa/CMakeLists.txt new file mode 100644 index 0000000000..751daf308a --- /dev/null +++ b/tests/MMS/advection/arakawa/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_mms_test(MMS-advection-arakawa + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES parent.py + EXTRA_DEPENDS MMS-advection + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/advection/arakawa/advection b/tests/MMS/advection/arakawa/advection deleted file mode 120000 index 8cf5b0a575..0000000000 --- a/tests/MMS/advection/arakawa/advection +++ /dev/null @@ -1 +0,0 @@ -../advection \ No newline at end of file diff --git a/tests/MMS/advection/central/CMakeLists.txt b/tests/MMS/advection/central/CMakeLists.txt new file mode 100644 index 0000000000..0326bdc996 --- /dev/null +++ b/tests/MMS/advection/central/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_mms_test(MMS-advection-central + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES parent.py + EXTRA_DEPENDS MMS-advection + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/advection/central/advection b/tests/MMS/advection/central/advection deleted file mode 120000 index 8cf5b0a575..0000000000 --- a/tests/MMS/advection/central/advection +++ /dev/null @@ -1 +0,0 @@ -../advection \ No newline at end of file diff --git a/tests/MMS/advection/runtest b/tests/MMS/advection/runtest index 93c346682f..79f065a25b 100755 --- a/tests/MMS/advection/runtest +++ b/tests/MMS/advection/runtest @@ -56,7 +56,7 @@ def run_mms(options,exit=True): shell("rm -f data/BOUT.dmp.*.nc") # Command to run - cmd = "./advection "+args + cmd = "../advection " + args # Launch using MPI start_time_ = time.time() diff --git a/tests/MMS/advection/upwind/CMakeLists.txt b/tests/MMS/advection/upwind/CMakeLists.txt new file mode 100644 index 0000000000..c71489f70e --- /dev/null +++ b/tests/MMS/advection/upwind/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_mms_test(MMS-advection-upwind + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES parent.py + EXTRA_DEPENDS MMS-advection + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/advection/upwind/advection b/tests/MMS/advection/upwind/advection deleted file mode 120000 index 8cf5b0a575..0000000000 --- a/tests/MMS/advection/upwind/advection +++ /dev/null @@ -1 +0,0 @@ -../advection \ No newline at end of file diff --git a/tests/MMS/advection/weno3/CMakeLists.txt b/tests/MMS/advection/weno3/CMakeLists.txt new file mode 100644 index 0000000000..32c5ebf3b6 --- /dev/null +++ b/tests/MMS/advection/weno3/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_mms_test(MMS-advection-weno3 + USE_RUNTEST + USE_DATA_BOUT_INP + EXTRA_FILES parent.py + EXTRA_DEPENDS MMS-advection + REQUIRES BOUT_RUN_ALL_TESTS +) diff --git a/tests/MMS/advection/weno3/advection b/tests/MMS/advection/weno3/advection deleted file mode 120000 index 8cf5b0a575..0000000000 --- a/tests/MMS/advection/weno3/advection +++ /dev/null @@ -1 +0,0 @@ -../advection \ No newline at end of file diff --git a/tests/MMS/time-petsc/CMakeLists.txt b/tests/MMS/time-petsc/CMakeLists.txt new file mode 100644 index 0000000000..3cbbe6f717 --- /dev/null +++ b/tests/MMS/time-petsc/CMakeLists.txt @@ -0,0 +1,7 @@ +bout_add_mms_test(MMS-time-petsc + USE_RUNTEST + REQUIRES + BOUT_HAS_PETSC + BOUT_RUN_ALL_TESTS + EXTRA_DEPENDS MMS-time +) From 14648833148fca3bcfcaf6539451a261e33b2bbb Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 26 May 2020 11:41:42 +0100 Subject: [PATCH 276/428] Force output_debug on if CHECK > 3 --- CMakeLists.txt | 11 ++++++----- configure | 1 + configure.ac | 1 + 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8a802382fe..377b8154fb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -371,15 +371,16 @@ target_compile_definitions(bout++ PUBLIC "BOUT_CHECK=${CHECK}") set(BOUT_CHECK_LEVEL ${CHECK}) -option(DEBUG_ENABLED "Enable extra debug output" OFF) -option(ENABLE_OUTPUT_DEBUG "Enable extra debug output" OFF) -if (ENABLE_OUTPUT_DEBUG OR DEBUG_ENABLED) +include(CMakeDependentOption) +cmake_dependent_option(ENABLE_OUTPUT_DEBUG "Enable extra debug output" OFF + "CHECK LESS 3" ON) +message(STATUS "Extra debug output: BOUT_USE_OUTPUT_DEBUG=${ENABLE_OUTPUT_DEBUG}") +set(BOUT_USE_OUTPUT_DEBUG ${ENABLE_OUTPUT_DEBUG}) +if (BOUT_USE_OUTPUT_DEBUG) target_compile_definitions(bout++ PUBLIC "DEBUG_ENABLED" PUBLIC "BOUT_OUTPUT_DEBUG") endif() -message(STATUS "Extra debug output: DEBUG_ENABLED=${DEBUG_ENABLED}") -set(BOUT_USE_OUTPUT_DEBUG ${DEBUG_ENABLED}) option(ENABLE_SIGNAL "SegFault handling" ON) if (ENABLE_SIGNAL) diff --git a/configure b/configure index 363d7cad7b..da1c6680e2 100755 --- a/configure +++ b/configure @@ -6094,6 +6094,7 @@ $as_echo "$as_me: -> Level 1 (Basic checking)" >&6;} 3) : { $as_echo "$as_me:${as_lineno-$LINENO}: -> Level 3 (Full checking + stack tracing)" >&5 $as_echo "$as_me: -> Level 3 (Full checking + stack tracing)" >&6;} + enable_output_debug=yes CXXFLAGS="$CXXFLAGS -DCHECK=3" CHECK_LEVEL="3" ;; #( *) : diff --git a/configure.ac b/configure.ac index bd1db73019..f6793d504a 100644 --- a/configure.ac +++ b/configure.ac @@ -314,6 +314,7 @@ AS_IF([test "x$enable_checks" != "xno" && test "x$enable_checks" != "x0"], [ CXXFLAGS="$CXXFLAGS -DCHECK=1" CHECK_LEVEL="1"], [3], [AC_MSG_NOTICE([ -> Level 3 (Full checking + stack tracing)]) + enable_output_debug=yes CXXFLAGS="$CXXFLAGS -DCHECK=3" CHECK_LEVEL="3"], [AC_MSG_NOTICE([ -> Level 2 (Basic checking + stack tracing)]) From 134c2a69480bfef0fb47bebd35d5e24653f5a13e Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 26 May 2020 15:25:42 +0100 Subject: [PATCH 277/428] Namespace all CMake options Also use cmake_dependent_option for using external fmt/mpark.variant --- CMakeLists.txt | 153 ++++++++++--------- manual/sphinx/user_docs/advanced_install.rst | 2 +- manual/sphinx/user_docs/installing.rst | 34 ++--- manual/sphinx/user_docs/physics_models.rst | 10 +- src/bout++.cxx | 2 +- 5 files changed, 102 insertions(+), 99 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 377b8154fb..1ecdee8a6d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -21,17 +21,19 @@ project(BOUT++ find_program(MPIEXEC_EXECUTABLE NAMES mpiexec mpirun) find_package(MPI REQUIRED) +include(CMakeDependentOption) + # Override default option(INSTALL_GTEST "Enable installation of googletest. (Projects embedding googletest may want to turn this OFF.)" OFF) set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) include(BOUT++functions) -option(GIT_SUBMODULE "Check submodules during build" ON) +option(BOUT_USE_GIT_SUBMODULE "Check submodules during build" ON) # Adapted from https://cliutils.gitlab.io/modern-cmake/chapters/projects/submodule.html # Update submodules as needed function(bout_update_submodules) - if(NOT GIT_SUBMODULE) + if(NOT BOUT_USE_GIT_SUBMODULE) return() endif() find_package(Git QUIET) @@ -319,11 +321,8 @@ target_compile_definitions(bout++ target_compile_features(bout++ PUBLIC cxx_std_11) set_target_properties(bout++ PROPERTIES CXX_EXTENSIONS OFF) -if(GIT_SUBMODULE OR (EXISTS externalpackages/mpark.variant/CMakeLists.txt)) - option(BOUT_USE_SYSTEM_MPARK_VARIANT "Use external installation of mpark.variant" OFF) -else() - option(BOUT_USE_SYSTEM_MPARK_VARIANT "Use external installation of mpark.variant" ON) -endif() +cmake_dependent_option(BOUT_USE_SYSTEM_MPARK_VARIANT "Use external installation of mpark.variant" OFF + "BOUT_USE_GIT_SUBMODULE;EXISTS externalpackages/mpark.variant/CMakeLists.txt" ON) if(BOUT_USE_SYSTEM_MPARK_VARIANT) find_package(mpark_variant REQUIRED) @@ -334,13 +333,13 @@ else() if(TARGET mpark_variant) message(STATUS "Using mpark.variant submodule") else() - message(FATAL_ERROR "mpark_variant not found! Have you disabled the git submodules (GIT_SUBMODULE)?") + message(FATAL_ERROR "mpark_variant not found! Have you disabled the git submodules (BOUT_USE_GIT_SUBMODULE)?") endif() endif() target_link_libraries(bout++ PUBLIC mpark_variant) -option(ENABLE_WARNINGS "Enable compiler warnings" ON) -if (ENABLE_WARNINGS) +option(BOUT_ENABLE_WARNINGS "Enable compiler warnings" ON) +if (BOUT_ENABLE_WARNINGS) target_compile_options(bout++ PRIVATE $<$,$,$>: -Wall -Wextra > @@ -371,54 +370,53 @@ target_compile_definitions(bout++ PUBLIC "BOUT_CHECK=${CHECK}") set(BOUT_CHECK_LEVEL ${CHECK}) -include(CMakeDependentOption) -cmake_dependent_option(ENABLE_OUTPUT_DEBUG "Enable extra debug output" OFF +cmake_dependent_option(BOUT_ENABLE_OUTPUT_DEBUG "Enable extra debug output" OFF "CHECK LESS 3" ON) -message(STATUS "Extra debug output: BOUT_USE_OUTPUT_DEBUG=${ENABLE_OUTPUT_DEBUG}") -set(BOUT_USE_OUTPUT_DEBUG ${ENABLE_OUTPUT_DEBUG}) +message(STATUS "Extra debug output: BOUT_USE_OUTPUT_DEBUG=${BOUT_ENABLE_OUTPUT_DEBUG}") +set(BOUT_USE_OUTPUT_DEBUG ${BOUT_ENABLE_OUTPUT_DEBUG}) if (BOUT_USE_OUTPUT_DEBUG) target_compile_definitions(bout++ PUBLIC "DEBUG_ENABLED" PUBLIC "BOUT_OUTPUT_DEBUG") endif() -option(ENABLE_SIGNAL "SegFault handling" ON) -if (ENABLE_SIGNAL) +option(BOUT_ENABLE_SIGNAL "SegFault handling" ON) +message(STATUS "Signal handling: BOUT_USE_SIGNAL=${BOUT_ENABLE_SIGNAL}") +set(BOUT_USE_SIGNAL ${BOUT_ENABLE_SIGNAL}) +if (BOUT_USE_SIGNAL) target_compile_definitions(bout++ PUBLIC "SIGHANDLE" PUBLIC "BOUT_SIGHANDLE") endif() -message(STATUS "Signal handling: SIGHANDLE=${ENABLE_SIGNAL}") -set(BOUT_USE_SIGNAL ${ENABLE_SIGNAL}) -option(ENABLE_COLOR "Output coloring" ON) -if (ENABLE_COLOR) +option(BOUT_ENABLE_COLOR "Output coloring" ON) +message(STATUS "Output coloring: BOUT_USE_COLOR=${BOUT_ENABLE_COLOR}") +set(BOUT_USE_COLOR ${BOUT_ENABLE_COLOR}) +if (BOUT_USE_COLOR) target_compile_definitions(bout++ PUBLIC "LOGCOLOR" PUBLIC "BOUT_LOGCOLOR") endif() -message(STATUS "Output coloring: LOGCOLOR=${ENABLE_COLOR}") -set(BOUT_USE_COLOR ${ENABLE_COLOR}) -option(ENABLE_TRACK "Field name tracking" ON) -if (ENABLE_TRACK) +option(BOUT_ENABLE_TRACK "Field name tracking" ON) +message(STATUS "Field name tracking: BOUT_USE_TRACK=${BOUT_ENABLE_TRACK}") +set(BOUT_USE_TRACK ${BOUT_ENABLE_TRACK}) +if (BOUT_USE_TRACK) target_compile_definitions(bout++ PUBLIC "TRACK" PUBLIC "BOUT_TRACK") endif() -message(STATUS "Field name tracking: TRACK=${ENABLE_TRACK}") -set(BOUT_USE_TRACK ${ENABLE_TRACK}) -option(ENABLE_SIGFPE "Signalling floating point exceptions" OFF) -if (ENABLE_SIGFPE) +option(BOUT_ENABLE_SIGFPE "Signalling floating point exceptions" OFF) +message(STATUS "Signalling floating point exceptions: BOUT_USE_SIGFPE=${BOUT_ENABLE_SIGFPE}") +set(BOUT_USE_SIGFPE ${BOUT_ENABLE_SIGFPE}) +if (BOUT_USE_SIGFPE) target_compile_definitions(bout++ PUBLIC "BOUT_FPE") endif() -message(STATUS "Signalling floating point exceptions: BOUT_FPE=${ENABLE_SIGFPE}") -set(BOUT_USE_SIGFPE ${ENABLE_SIGFPE}) -option(ENABLE_BACKTRACE "Enable backtrace" ON) -if (ENABLE_BACKTRACE) +option(BOUT_ENABLE_BACKTRACE "Enable backtrace" ON) +if (BOUT_ENABLE_BACKTRACE) find_program(ADDR2LINE_FOUND addr2line) if (NOT ADDR2LINE_FOUND) message(FATAL_ERROR "addr2line not found") @@ -428,11 +426,13 @@ if (ENABLE_BACKTRACE) PUBLIC "BOUT_BACKTRACE") target_link_libraries(bout++ PUBLIC ${CMAKE_DL_LIBS}) endif() -message(STATUS "Enable backtrace: BACKTRACE=${ENABLE_BACKTRACE}") -set(BOUT_USE_BACKTRACE ${ENABLE_BACKTRACE}) +message(STATUS "Enable backtrace: BOUT_USE_BACKTRACE=${BOUT_ENABLE_BACKTRACE}") +set(BOUT_USE_BACKTRACE ${BOUT_ENABLE_BACKTRACE}) -option(ENABLE_OPENMP "Enable OpenMP support" OFF) -if (ENABLE_OPENMP) +option(BOUT_ENABLE_OPENMP "Enable OpenMP support" OFF) +set(BOUT_OPENMP_SCHEDULE static CACHE STRING "Set OpenMP schedule") +set_property(CACHE BOUT_OPENMP_SCHEDULE PROPERTY STRINGS static dynamic guided auto) +if (BOUT_ENABLE_OPENMP) find_package(OpenMP REQUIRED) target_link_libraries(bout++ PUBLIC OpenMP::OpenMP_CXX) set(possible_openmp_schedules static dynamic guided auto) @@ -445,8 +445,8 @@ if (ENABLE_OPENMP) PUBLIC "BOUT_OPENMP_SCHEDULE=${OPENMP_SCHEDULE}") message(STATUS "OpenMP schedule: ${OPENMP_SCHEDULE}") endif() -message(STATUS "Enable OpenMP: ${ENABLE_OPENMP}") -set(BOUT_USE_OPENMP ${ENABLE_OPENMP}) +message(STATUS "Enable OpenMP: ${BOUT_ENABLE_OPENMP}") +set(BOUT_USE_OPENMP ${BOUT_ENABLE_OPENMP}) include(GetGitRevisionDescription) get_git_head_revision(GIT_REFSPEC GIT_SHA1) @@ -457,15 +457,15 @@ set(BOUT_GIT_REVISION ${GIT_SHA1}) # Optional dependencies -option(USE_PVODE "Enable support for bundled PVODE" ON) -if (USE_PVODE) +option(BOUT_USE_PVODE "Enable support for bundled PVODE" ON) +if (BOUT_USE_PVODE) add_subdirectory(externalpackages/PVODE) target_link_libraries(bout++ PUBLIC pvode pvpre) target_compile_definitions(bout++ PUBLIC "BOUT_HAS_PVODE") endif() -message(STATUS "PVODE support: ${USE_PVODE}") -set(BOUT_HAS_PVODE ${USE_PVODE}) +message(STATUS "PVODE support: ${BOUT_USE_PVODE}") +set(BOUT_HAS_PVODE ${BOUT_USE_PVODE}) option(BOUT_USE_NETCDF "Enable support for NetCDF output" ON) option(BOUT_DOWNLOAD_NETCDF_CXX4 "Download and build netCDF-cxx4" OFF) @@ -490,11 +490,11 @@ if (BOUT_USE_NETCDF) PUBLIC "NCDF4" PUBLIC "BOUT_HAS_NETCDF") endif() -message(STATUS "NetCDF support: ${USE_NETCDF}") -set(BOUT_HAS_NETCDF ${USE_NETCDF}) +message(STATUS "NetCDF support: ${BOUT_USE_NETCDF}") +set(BOUT_HAS_NETCDF ${BOUT_USE_NETCDF}) -option(USE_HDF5 "Enable support for HDF5 output" OFF) -if (USE_HDF5) +option(BOUT_USE_HDF5 "Enable support for HDF5 output" OFF) +if (BOUT_USE_HDF5) find_package(HDF5 REQUIRED COMPONENTS CXX) target_compile_definitions(bout++ PUBLIC "HDF5" @@ -502,21 +502,21 @@ if (USE_HDF5) target_link_libraries(bout++ PUBLIC "${HDF5_CXX_LIBRARIES}") target_include_directories(bout++ PUBLIC "${HDF5_CXX_INCLUDE_DIRS}") endif() -message(STATUS "HDF5 support: ${USE_HDF5}") -set(BOUT_HAS_HDF5 ${USE_HDF5}) +message(STATUS "HDF5 support: ${BOUT_USE_HDF5}") +set(BOUT_HAS_HDF5 ${BOUT_USE_HDF5}) -option(USE_FFTW "Enable support for FFTW" ON) -if (USE_FFTW) +option(BOUT_USE_FFTW "Enable support for FFTW" ON) +if (BOUT_USE_FFTW) find_package(FFTW REQUIRED) target_compile_definitions(bout++ PUBLIC "BOUT_HAS_FFTW") target_link_libraries(bout++ PUBLIC FFTW::FFTW) endif() -message(STATUS "FFTW support: ${USE_FFTW}") -set(BOUT_HAS_FFTW ${USE_FFTW}) +message(STATUS "FFTW support: ${BOUT_USE_FFTW}") +set(BOUT_HAS_FFTW ${BOUT_USE_FFTW}) -option(USE_LAPACK "Enable support for LAPACK" ON) -if (USE_LAPACK) +option(BOUT_USE_LAPACK "Enable support for LAPACK" ON) +if (BOUT_USE_LAPACK) if (NOT CMAKE_SYSTEM_NAME STREQUAL "CrayLinuxEnvironment") # Cray wrappers sort this out for us find_package(LAPACK REQUIRED) @@ -526,11 +526,11 @@ if (USE_LAPACK) PUBLIC "LAPACK" PUBLIC "BOUT_HAS_LAPACK") endif() -message(STATUS "LAPACK support: ${USE_LAPACK}") -set(BOUT_HAS_LAPACK ${USE_LAPACK}) +message(STATUS "LAPACK support: ${BOUT_USE_LAPACK}") +set(BOUT_HAS_LAPACK ${BOUT_USE_LAPACK}) -option(USE_PETSC "Enable support for PETSc time solvers and inversions" OFF) -if (USE_PETSC) +option(BOUT_USE_PETSC "Enable support for PETSc time solvers and inversions" OFF) +if (BOUT_USE_PETSC) if (NOT CMAKE_SYSTEM_NAME STREQUAL "CrayLinuxEnvironment") # Cray wrappers sort this out for us find_package(PETSc REQUIRED) @@ -539,21 +539,21 @@ if (USE_PETSC) target_compile_definitions(bout++ PUBLIC "BOUT_HAS_PETSC") endif() -message(STATUS "PETSc support: ${USE_PETSC}") -set(BOUT_HAS_PETSC ${USE_PETSC}) +message(STATUS "PETSc support: ${BOUT_USE_PETSC}") +set(BOUT_HAS_PETSC ${BOUT_USE_PETSC}) -option(USE_SLEPC "Enable support for SLEPc eigen solver" OFF) -if (USE_SLEPC) +option(BOUT_USE_SLEPC "Enable support for SLEPc eigen solver" OFF) +if (BOUT_USE_SLEPC) find_package(SLEPc REQUIRED) target_compile_definitions(bout++ PUBLIC "BOUT_HAS_SLEPC") target_link_libraries(bout++ PUBLIC SLEPc::SLEPc) endif() -message(STATUS "SLEPc support: ${USE_SLEPC}") -set(BOUT_HAS_SLEPC ${USE_SLEPC}) +message(STATUS "SLEPc support: ${BOUT_USE_SLEPC}") +set(BOUT_HAS_SLEPC ${BOUT_USE_SLEPC}) -option(USE_SUNDIALS "Enable support for SUNDIALS time solvers" OFF) -if (USE_SUNDIALS) +option(BOUT_USE_SUNDIALS "Enable support for SUNDIALS time solvers" OFF) +if (BOUT_USE_SUNDIALS) find_package(SUNDIALS REQUIRED) target_compile_definitions(bout++ PUBLIC "BOUT_HAS_CVODE" @@ -564,11 +564,14 @@ if (USE_SUNDIALS) target_link_libraries(bout++ PUBLIC SUNDIALS::ida) target_link_libraries(bout++ PUBLIC SUNDIALS::arkode) endif() -message(STATUS "SUNDIALS support: ${USE_SUNDIALS}") -set(BOUT_HAS_SUNDIALS ${USE_SUNDIALS}) - -option(USE_NLS "Enable Native Language Support" ON) -if (USE_NLS) +message(STATUS "SUNDIALS support: ${BOUT_USE_SUNDIALS}") +set(BOUT_HAS_SUNDIALS ${BOUT_USE_SUNDIALS}) +set(BOUT_HAS_ARKODE ${BOUT_USE_SUNDIALS}) +set(BOUT_HAS_CVODE ${BOUT_USE_SUNDIALS}) +set(BOUT_HAS_IDA ${BOUT_USE_SUNDIALS}) + +option(BOUT_USE_NLS "Enable Native Language Support" ON) +if (BOUT_USE_NLS) find_package(Gettext) if (GETTEXT_FOUND) target_compile_definitions(bout++ @@ -582,10 +585,10 @@ if (USE_NLS) endif() endif() endif() -set(BOUT_HAS_GETTEXT ${USE_NLS}) +set(BOUT_HAS_GETTEXT ${BOUT_USE_NLS}) -option(USE_SCOREP "Enable support for Score-P based instrumentation" OFF) -if (USE_SCOREP) +option(BOUT_USE_SCOREP "Enable support for Score-P based instrumentation" OFF) +if (BOUT_USE_SCOREP) target_compile_definitions(bout++ PUBLIC "BOUT_HAS_SCOREP") message(STATUS "Score-P support enabled. Please make sure you are calling CMake like so: @@ -593,7 +596,7 @@ if (USE_SCOREP) SCOREP_WRAPPER=off cmake -DCMAKE_C_COMPILER=scorep-mpicc -DCMAKE_CXX_COMPILER=scorep-mpicxx ") endif() -set(BOUT_HAS_SCOREP ${USE_SCOREP}) +set(BOUT_HAS_SCOREP ${BOUT_USE_SCOREP}) option(BOUT_USE_UUID_SYSTEM_GENERATOR "Enable support for using a system UUID generator" ON) if (BOUT_USE_UUID_SYSTEM_GENERATOR) diff --git a/manual/sphinx/user_docs/advanced_install.rst b/manual/sphinx/user_docs/advanced_install.rst index 2a5e5c2fae..c58570392b 100644 --- a/manual/sphinx/user_docs/advanced_install.rst +++ b/manual/sphinx/user_docs/advanced_install.rst @@ -823,7 +823,7 @@ The minimal required CMake options are as follows: .. code-block:: bash - -DENABLE_BACKTRACE=OFF \ + -DBOUT_ENABLE_BACKTRACE=OFF \ -DCMAKE_CXX_FLAGS="/permissive- /EHsc /bigobj" \ -DBUILD_SHARED_LIBS=OFF diff --git a/manual/sphinx/user_docs/installing.rst b/manual/sphinx/user_docs/installing.rst index a0e29114ca..bbbcb3927c 100644 --- a/manual/sphinx/user_docs/installing.rst +++ b/manual/sphinx/user_docs/installing.rst @@ -300,16 +300,16 @@ You can see what build options are available with:: $ cmake . -B build -LH ... // Enable backtrace - ENABLE_BACKTRACE:BOOL=ON + BOUT_ENABLE_BACKTRACE:BOOL=ON // Output coloring - ENABLE_COLOR:BOOL=ON + BOUT_ENABLE_COLOR:BOOL=ON // Enable OpenMP support - ENABLE_OPENMP:BOOL=OFF + BOUT_ENABLE_OPENMP:BOOL=OFF // Enable support for PETSc time solvers and inversions - USE_PETSC:BOOL=OFF + BOUT_USE_PETSC:BOOL=OFF ... CMake uses the ``-D=`` syntax to control these @@ -329,12 +329,12 @@ A more complicated CMake configuration command might look like:: $ CC=mpicc CXX=mpic++ cmake . -B build \ - -DUSE_PETSC=ON -DPETSC_DIR=/path/to/petsc/ \ - -DUSE_SLEPC=ON -DSLEPC_DIR=/path/to/slepc/ \ - -DUSE_SUNDIALS=ON -DSUNDIALS_ROOT=/path/to/sundials \ - -DUSE_NETCDF=ON -DNetCDF_ROOT=/path/to/netcdf \ - -DENABLE_OPENMP=ON \ - -DENABLE_SIGFPE=OFF \ + -DBOUT_USE_PETSC=ON -DPETSC_DIR=/path/to/petsc/ \ + -DBOUT_USE_SLEPC=ON -DSLEPC_DIR=/path/to/slepc/ \ + -DBOUT_USE_SUNDIALS=ON -DSUNDIALS_ROOT=/path/to/sundials \ + -DBOUT_USE_NETCDF=ON -DNetCDF_ROOT=/path/to/netcdf \ + -DBOUT_ENABLE_OPENMP=ON \ + -DBOUT_ENABLE_SIGFPE=OFF \ -DCMAKE_BUILD_TYPE=Debug \ -DBUILD_SHARED_LIBS=ON -DCMAKE_INSTALL_PREFIX=/path/to/install/BOUT++ @@ -352,13 +352,13 @@ BOUT++ bundles some dependencies, currently `mpark.variant `googletest `_. If you wish to use an existing installation of ``mpark.variant``, you can set ``-DBOUT_USE_SYSTEM_MPARK_VARIANT=ON``, and supply the installation -path using ``mpark_variant_ROOT`` via the command line or -environment variable if it is installed in a non standard -loction. Similarly for ``fmt``, using ``-DBOUT_USE_SYSTEM_FMT=ON`` -and ``fmt_ROOT`` respectively. The recommended way to use -``googletest`` is to compile it at the same time as your project, -therefore there is no option to use an external installation for -that. +path using ``mpark_variant_ROOT`` via the command line or environment +variable if it is installed in a non standard loction. You can also +set ``-DBOUT_USE_GIT_SUBMODULE=OFF``. + +The recommended way to use ``googletest`` is to compile it at the same +time as your project, therefore there is no option to use an external +installation for that. Using CMake with your physics model ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/manual/sphinx/user_docs/physics_models.rst b/manual/sphinx/user_docs/physics_models.rst index 5695a04d81..8beb929bcd 100644 --- a/manual/sphinx/user_docs/physics_models.rst +++ b/manual/sphinx/user_docs/physics_models.rst @@ -1047,11 +1047,11 @@ suppresses the ``output_info`` messages, so that they will not appear in the console or log file. Running with ``-q -q`` suppresses everything except ``output_warn`` and ``output_error``. -To enable the ``output_debug`` messages, first configure BOUT++ with -debug messages enabled by adding ``-DDEBUG_ENABLED`` to ``BOUT_FLAGS`` -in ``make.config`` and then recompiling with ``make clean; -make``. When running BOUT++ add a "-v" flag to see ``output_debug`` -messages. +To enable the ``output_debug`` messages, configure BOUT++ with a +``CHECK`` level ``>= 3``. To enable it at lower check levels, +configure BOUT++ with ``--enable-debug-output`` (for ``./configure``) +or ``-DBOUT_ENABLE_OUTPUT_DEBUG`` (for ``CMake``). When running BOUT++ +add a ``-v -v`` flag to see ``output_debug`` messages. .. _sec-3to4: diff --git a/src/bout++.cxx b/src/bout++.cxx index 3dfad83924..bba2e27496 100644 --- a/src/bout++.cxx +++ b/src/bout++.cxx @@ -504,7 +504,7 @@ void setupOutput(const std::string& data_dir, const std::string& log_file, int v output_progress.enable(verbosity > 2); output_info.enable(verbosity > 3); output_verbose.enable(verbosity > 4); - // Only actually enabled if also compiled with DEBUG + // Only actually enabled if also compiled with BOUT_ENABLE_OUTPUT_DEBUG output_debug.enable(verbosity > 5); // The backward-compatible output object same as output_progress From ae1602eb8ce05dd48deb11914d0940c59b328860 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 28 May 2020 10:28:56 +0100 Subject: [PATCH 278/428] Fix conditional in cmake_dependent_option for submodules Also: - Rename BOUT_USE_GIT_SUBMODULE to BOUT_UPDATE_GIT_SUBMODULE for clarity over purpose - Rearrange messages for clearer feedback over selected option --- CMakeLists.txt | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1ecdee8a6d..314f6733d5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -29,11 +29,11 @@ option(INSTALL_GTEST "Enable installation of googletest. (Projects embedding goo set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH}) include(BOUT++functions) -option(BOUT_USE_GIT_SUBMODULE "Check submodules during build" ON) +option(BOUT_UPDATE_GIT_SUBMODULE "Check submodules are up-to-date during build" ON) # Adapted from https://cliutils.gitlab.io/modern-cmake/chapters/projects/submodule.html # Update submodules as needed function(bout_update_submodules) - if(NOT BOUT_USE_GIT_SUBMODULE) + if(NOT BOUT_UPDATE_GIT_SUBMODULE) return() endif() find_package(Git QUIET) @@ -322,18 +322,17 @@ target_compile_features(bout++ PUBLIC cxx_std_11) set_target_properties(bout++ PROPERTIES CXX_EXTENSIONS OFF) cmake_dependent_option(BOUT_USE_SYSTEM_MPARK_VARIANT "Use external installation of mpark.variant" OFF - "BOUT_USE_GIT_SUBMODULE;EXISTS externalpackages/mpark.variant/CMakeLists.txt" ON) + "BOUT_UPDATE_GIT_SUBMODULE OR EXISTS externalpackages/mpark.variant/CMakeLists.txt" ON) if(BOUT_USE_SYSTEM_MPARK_VARIANT) - find_package(mpark_variant REQUIRED) message(STATUS "Using external mpark.variant") + find_package(mpark_variant REQUIRED) else() + message(STATUS "Using mpark.variant submodule") bout_update_submodules() add_subdirectory(externalpackages/mpark.variant) - if(TARGET mpark_variant) - message(STATUS "Using mpark.variant submodule") - else() - message(FATAL_ERROR "mpark_variant not found! Have you disabled the git submodules (BOUT_USE_GIT_SUBMODULE)?") + if(NOT TARGET mpark_variant) + message(FATAL_ERROR "mpark_variant not found! Have you disabled the git submodules (BOUT_UPDATE_GIT_SUBMODULE)?") endif() endif() target_link_libraries(bout++ PUBLIC mpark_variant) From 3c200d9c6df9beb7a3082ecd341b26835f9ed26c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 28 May 2020 10:30:30 +0100 Subject: [PATCH 279/428] Workaround for git submodule update sometimes failing For some reason, `git submodule update --init --recursive` doesn't seem to play well with the global config `submodule.recurse = true` --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 314f6733d5..6868290152 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,7 +39,7 @@ function(bout_update_submodules) find_package(Git QUIET) if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git") message(STATUS "Submodule update") - execute_process(COMMAND ${GIT_EXECUTABLE} submodule update --init --recursive + execute_process(COMMAND ${GIT_EXECUTABLE} -c submodule.recurse=false submodule update --init --recursive WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} RESULT_VARIABLE GIT_SUBMOD_RESULT) if(NOT GIT_SUBMOD_RESULT EQUAL "0") From cb01496cdd81940e6909863cfc1a192328054141 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 4 May 2021 18:07:32 +0100 Subject: [PATCH 280/428] CMake: Build documentation --- CMakeLists.txt | 8 ++++++++ cmake/FindSphinx.cmake | 26 ++++++++++++++++++++++++++ manual/CMakeLists.txt | 19 +++++++++++++++++++ 3 files changed, 53 insertions(+) create mode 100644 cmake/FindSphinx.cmake create mode 100644 manual/CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index 6868290152..6a52d5d4ed 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -711,6 +711,14 @@ if(BOUT_BUILD_EXAMPLES) add_subdirectory(examples) endif() +################################################## +# Documentation + +option(BOUT_BUILD_DOCS "Build the documentation" ON) +if (BOUT_BUILD_DOCS) + add_subdirectory(manual) +endif() + ################################################## # Installation diff --git a/cmake/FindSphinx.cmake b/cmake/FindSphinx.cmake new file mode 100644 index 0000000000..fd377d0d00 --- /dev/null +++ b/cmake/FindSphinx.cmake @@ -0,0 +1,26 @@ +# FindSphinx +# ---------- +# +# Find the Sphinx documentation generator +# +# This module will define the following variables: +# +# :: +# +# Sphinx_FOUND - true if Sphinx was found +# Sphinx_EXECUTABLE - Path to the ``sphinx-build`` executable + +# Taken from +# https://devblogs.microsoft.com/cppblog/clear-functional-c-documentation-with-sphinx-breathe-doxygen-cmake/ + +#Look for an executable called sphinx-build +find_program(SPHINX_EXECUTABLE + NAMES sphinx-build sphinx-build-3 + DOC "Path to sphinx-build executable") + +include(FindPackageHandleStandardArgs) + +#Handle standard arguments to find_package like REQUIRED and QUIET +find_package_handle_standard_args(Sphinx + "Failed to find sphinx-build executable" + SPHINX_EXECUTABLE) diff --git a/manual/CMakeLists.txt b/manual/CMakeLists.txt new file mode 100644 index 0000000000..f8d2b5e093 --- /dev/null +++ b/manual/CMakeLists.txt @@ -0,0 +1,19 @@ +# BUOT++ Documentation + +find_package(Doxygen) + +find_package(Sphinx REQUIRED) +set(BOUT_SPHINX_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/sphinx) +set(BOUT_SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR}/docs) + +add_custom_target(sphinx-html + COMMAND ${SPHINX_EXECUTABLE} -b html ${BOUT_SPHINX_SOURCE} ${BOUT_SPHINX_BUILD} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating PDF documentation with Sphinx in ${BOUT_SPHINX_BUILD}" +) + +add_custom_target(sphinx-pdf + COMMAND ${SPHINX_EXECUTABLE} -b pdf ${BOUT_SPHINX_SOURCE} ${BOUT_SPHINX_BUILD} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating HTML documentation with Sphinx in ${BOUT_SPHINX_BUILD}" +) From 0370c93ccefe8e9bfadb7ff24ebd05986218f08c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 4 May 2021 18:11:43 +0100 Subject: [PATCH 281/428] GHA: Don't build docs on github --- .github/workflows/tests.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5df1145ea3..b304ec790e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -74,11 +74,11 @@ jobs: - name: "CMake, shared, Ubuntu 20.04" os: ubuntu-20.04 cmake_options: "-DBUILD_SHARED_LIBS=ON - -DENABLE_OPENMP=ON - -DUSE_PETSC=ON - -DUSE_SLEPC=ON - -DUSE_HDF5=ON - -DUSE_SUNDIALS=ON + -DBOUT_ENABLE_OPENMP=ON + -DBOUT_USE_PETSC=ON + -DBOUT_USE_SLEPC=ON + -DBOUT_USE_SUNDIALS=ON + -DBOUT_BUILD_DOCS=OFF -DSUNDIALS_ROOT=/home/runner/local" omp_num_threads: 2 From 7761ddb72959c911ff6e31950f0c7245d698bc11 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 5 May 2021 09:07:16 +0100 Subject: [PATCH 282/428] GHA: Don't build the docs for other Github actions either --- .github/workflows/clang-tidy-review.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/clang-tidy-review.yml b/.github/workflows/clang-tidy-review.yml index bd2c04c877..92617854cf 100644 --- a/.github/workflows/clang-tidy-review.yml +++ b/.github/workflows/clang-tidy-review.yml @@ -42,6 +42,7 @@ jobs: -DBOUT_USE_HDF5=ON \ -DBOUT_USE_SUNDIALS=ON \ -DBOUT_BUILD_EXAMPLES=ON \ + -DBOUT_BUILD_DOCS=OFF \ -DCMAKE_EXPORT_COMPILE_COMMANDS=On - name: Run clang-tidy From b6fa6c27399a852d2902b5cd8f37f4f977c08b87 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 6 May 2021 17:53:12 +0100 Subject: [PATCH 283/428] CMake: Fix typo [skip ci] --- manual/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manual/CMakeLists.txt b/manual/CMakeLists.txt index f8d2b5e093..246a7bea7e 100644 --- a/manual/CMakeLists.txt +++ b/manual/CMakeLists.txt @@ -1,4 +1,4 @@ -# BUOT++ Documentation +# BOUT++ Documentation find_package(Doxygen) From b0a81f0273aee5c721eabfe6974828c6ab30ddef Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 6 May 2021 17:53:43 +0100 Subject: [PATCH 284/428] CMake: Be explicit that docs aren't built in `all` [skip ci] --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6a52d5d4ed..c19d06d000 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -716,7 +716,7 @@ endif() option(BOUT_BUILD_DOCS "Build the documentation" ON) if (BOUT_BUILD_DOCS) - add_subdirectory(manual) + add_subdirectory(manual EXCLUDE_FROM_ALL) endif() ################################################## From 9f094aba6a18b10dbc1c1d78d4a264c888fdfbe0 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 6 May 2021 17:54:04 +0100 Subject: [PATCH 285/428] CMake: Default to not building the docs [skip ci] --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c19d06d000..22592ceeaf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -714,7 +714,7 @@ endif() ################################################## # Documentation -option(BOUT_BUILD_DOCS "Build the documentation" ON) +option(BOUT_BUILD_DOCS "Build the documentation" OFF) if (BOUT_BUILD_DOCS) add_subdirectory(manual EXCLUDE_FROM_ALL) endif() From b9037a08b583805759a39cde7c64c3afcac782f8 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 17 May 2021 09:38:31 +0100 Subject: [PATCH 286/428] Fix for FindPETSc not being reentrant if pkg-config used --- cmake/FindPETSc.cmake | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/cmake/FindPETSc.cmake b/cmake/FindPETSc.cmake index 7a57c9f8ac..f049f7c2db 100644 --- a/cmake/FindPETSc.cmake +++ b/cmake/FindPETSc.cmake @@ -358,20 +358,24 @@ int main(int argc,char *argv[]) { set (PETSC_INCLUDES ${petsc_includes_needed} CACHE STRING "PETSc include path" FORCE) set (PETSC_LIBRARIES ${PETSC_LIBRARIES_ALL} CACHE STRING "PETSc libraries" FORCE) set (PETSC_COMPILER ${petsc_cc} CACHE FILEPATH "PETSc compiler" FORCE) - # Note that we have forced values for all these choices. If you - # change these, you are telling the system to trust you that they - # work. It is likely that you will end up with a broken build. - mark_as_advanced (PETSC_INCLUDES PETSC_LIBRARIES PETSC_COMPILER PETSC_DEFINITIONS PETSC_MPIEXEC PETSC_EXECUTABLE_RUNS) endif () -if (NOT PETSC_INCLUDES) - include(FindPkgConfig) - pkg_search_module(PkgPETSC PETSc>3.4.0 petsc>3.4.0) - set (PETSC_LIBRARIES ${PkgPETSC_LINK_LIBRARIES} CACHE STRING "PETSc libraries" FORCE) - set (PETSC_INCLUDES ${PkgPETSC_INCLUDE_DIRS} CACHE STRING "PETSc include path" FORCE) - set (PETSC_EXECUTABLE_RUNS "not-needed") +if (NOT PETSC_INCLUDES AND NOT TARGET PETSc::PETSc) + find_package(PkgConfig) + if (PkgConfig_FOUND) + pkg_search_module(PkgPETSC PETSc>3.4.0 petsc>3.4.0) + set (PETSC_LIBRARIES ${PkgPETSC_LINK_LIBRARIES} CACHE STRING "PETSc libraries" FORCE) + set (PETSC_INCLUDES ${PkgPETSC_INCLUDE_DIRS} CACHE STRING "PETSc include path" FORCE) + set (PETSC_EXECUTABLE_RUNS "YES" CACHE BOOL + "Can the system successfully run a PETSc executable? This variable can be manually set to \"YES\" to force CMake to accept a given PETSc configuration, but this will almost always result in a broken build. If you change PETSC_DIR, PETSC_ARCH, or PETSC_CURRENT you would have to reset this variable." FORCE) + endif() endif() +# Note that we have forced values for all these choices. If you +# change these, you are telling the system to trust you that they +# work. It is likely that you will end up with a broken build. +mark_as_advanced (PETSC_INCLUDES PETSC_LIBRARIES PETSC_COMPILER PETSC_DEFINITIONS PETSC_MPIEXEC PETSC_EXECUTABLE_RUNS) + include (FindPackageHandleStandardArgs) find_package_handle_standard_args (PETSc REQUIRED_VARS PETSC_INCLUDES PETSC_LIBRARIES PETSC_EXECUTABLE_RUNS From 56074b1f1f036e3ac8ec0308ce24af8d401c3624 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 5 May 2021 14:38:38 +0100 Subject: [PATCH 287/428] CMake: Add coverage/sanitizers --- CMakeLists.txt | 4 ++ cmake/Sanitizers.cmake | 109 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+) create mode 100644 cmake/Sanitizers.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 22592ceeaf..6494c1cf1b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -321,6 +321,10 @@ target_compile_definitions(bout++ target_compile_features(bout++ PUBLIC cxx_std_11) set_target_properties(bout++ PROPERTIES CXX_EXTENSIONS OFF) +# Various sanitizers, including coverage and address sanitizer +include(cmake/Sanitizers.cmake) +enable_sanitizers(bout++) + cmake_dependent_option(BOUT_USE_SYSTEM_MPARK_VARIANT "Use external installation of mpark.variant" OFF "BOUT_UPDATE_GIT_SUBMODULE OR EXISTS externalpackages/mpark.variant/CMakeLists.txt" ON) diff --git a/cmake/Sanitizers.cmake b/cmake/Sanitizers.cmake new file mode 100644 index 0000000000..ee960a5621 --- /dev/null +++ b/cmake/Sanitizers.cmake @@ -0,0 +1,109 @@ +# Adapted from +# https://github.com/lefticus/cpp_starter_project/blob/master/cmake/Sanitizers.cmake +# Public domain + +function(enable_sanitizers target_name) + + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") + option(ENABLE_COVERAGE "Enable coverage reporting for gcc/clang" FALSE) + message(STATUS "Enable coverage: ${ENABLE_COVERAGE}") + + if(ENABLE_COVERAGE) + target_compile_options(${target_name} PUBLIC --coverage -O0 -g) + target_link_libraries(${target_name} PUBLIC --coverage) + + find_program(fastcov_FOUND fastcov) + message(STATUS "Looking for fastcov: ${fastcov_FOUND}") + find_program(genhtml_FOUND genhtml) + message(STATUS "Looking for genhtml: ${fastcov_FOUND}") + + if (fastcov_FOUND AND genhtml_FOUND) + set(COVERAGE_NAME coverage CACHE STRING "Name of coverage output file") + set(COVERAGE_FILE "${COVERAGE_NAME}.info") + set(COVERAGE_MSG "Open file://${PROJECT_SOURCE_DIR}/${COVERAGE_NAME}/index.html in your browser to view coverage HTML output") + + add_custom_target(code-coverage-capture + COMMAND + fastcov --include "${CMAKE_CURRENT_SOURCE_DIR}/src" "${CMAKE_CURRENT_SOURCE_DIR}/include" + --exclude "${CMAKE_CURRENT_SOURCE_DIR}/externalpackages" + --lcov --process-gcno + --output "${COVERAGE_FILE}" + COMMAND + genhtml --output-directory "${COVERAGE_NAME}" --demangle-cpp --legend --show-details "${COVERAGE_FILE}" + COMMAND + "${CMAKE_COMMAND}" -E echo ${COVERAGE_MSG} + WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" + COMMENT "Capturing coverage information" + BYPRODUCTS + "${COVERAGE_FILE}" + "${COVERAGE_NAME}/index.html" + ) + + add_custom_target(code-coverage-clean + COMMAND + fastcov --zerocounters + COMMENT "Cleaning coverage information" + ) + else() + message(STATUS "Coverage enabled, but coverage-capture not available. Please install fastcov and lcov") + endif() + + endif() + + set(SANITIZERS "") + + option(ENABLE_SANITIZER_ADDRESS "Enable address sanitizer" FALSE) + if(ENABLE_SANITIZER_ADDRESS) + list(APPEND SANITIZERS "address") + endif() + + option(ENABLE_SANITIZER_LEAK "Enable leak sanitizer" FALSE) + if(ENABLE_SANITIZER_LEAK) + list(APPEND SANITIZERS "leak") + endif() + + option(ENABLE_SANITIZER_UNDEFINED_BEHAVIOR "Enable undefined behavior sanitizer" FALSE) + if(ENABLE_SANITIZER_UNDEFINED_BEHAVIOR) + list(APPEND SANITIZERS "undefined") + endif() + + option(ENABLE_SANITIZER_THREAD "Enable thread sanitizer" FALSE) + if(ENABLE_SANITIZER_THREAD) + if("address" IN_LIST SANITIZERS OR "leak" IN_LIST SANITIZERS) + message(WARNING "Thread sanitizer does not work with Address and Leak sanitizer enabled") + else() + list(APPEND SANITIZERS "thread") + endif() + endif() + + option(ENABLE_SANITIZER_MEMORY "Enable memory sanitizer" FALSE) + if(ENABLE_SANITIZER_MEMORY AND CMAKE_CXX_COMPILER_ID MATCHES ".*Clang") + if("address" IN_LIST SANITIZERS + OR "thread" IN_LIST SANITIZERS + OR "leak" IN_LIST SANITIZERS) + message(WARNING "Memory sanitizer does not work with Address, Thread and Leak sanitizer enabled") + else() + list(APPEND SANITIZERS "memory") + endif() + endif() + + list( + JOIN + SANITIZERS + "," + LIST_OF_SANITIZERS) + + endif() + + if(LIST_OF_SANITIZERS) + if(NOT + "${LIST_OF_SANITIZERS}" + STREQUAL + "") + message(STATUS "Sanitizers enabled: ${LIST_OF_SANITIZERS}") + target_compile_options(${target_name} PUBLIC -fsanitize=${LIST_OF_SANITIZERS} -fno-omit-frame-pointer) + target_link_options(${target_name} PUBLIC -fsanitize=${LIST_OF_SANITIZERS}) + endif() + endif() + +endfunction() From 9f59d7a8d662f0a8e3dd1ea84f3296f454730cd2 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 18 May 2021 09:21:49 +0100 Subject: [PATCH 288/428] CMake: Add CONFLICTS to bout_add_integrated_or_mms_test Disables a test if it conflicts with some variable --- cmake/BOUT++functions.cmake | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cmake/BOUT++functions.cmake b/cmake/BOUT++functions.cmake index 941c0e7947..f46888aff6 100644 --- a/cmake/BOUT++functions.cmake +++ b/cmake/BOUT++functions.cmake @@ -108,6 +108,10 @@ endfunction() # - EXTRA_FILES: any extra files that are required to run the test # # - REQUIRES: list of variables that must be truthy to enable test +# (note: use `CONFLICTS` to negate the variable, rather than `NOT +# VARIABLE`) +# +# - CONFLICTS: list of variables that must be falsey to enable test # # - EXECUTABLE_NAME: name of the executable, if different from the # first source name @@ -117,7 +121,7 @@ endfunction() function(bout_add_integrated_or_mms_test BUILD_CHECK_TARGET TESTNAME) set(options USE_RUNTEST USE_DATA_BOUT_INP) set(oneValueArgs EXECUTABLE_NAME) - set(multiValueArgs SOURCES EXTRA_FILES REQUIRES TESTARGS EXTRA_DEPENDS) + set(multiValueArgs SOURCES EXTRA_FILES REQUIRES CONFLICTS TESTARGS EXTRA_DEPENDS) cmake_parse_arguments(BOUT_TEST_OPTIONS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) foreach (REQUIREMENT IN LISTS BOUT_TEST_OPTIONS_REQUIRES) @@ -127,6 +131,13 @@ function(bout_add_integrated_or_mms_test BUILD_CHECK_TARGET TESTNAME) endif() endforeach() + foreach (CONFLICT IN LISTS BOUT_TEST_OPTIONS_CONFLICTS) + if (${CONFLICT}) + message(STATUS "Not building test ${TESTNAME}, conflicts with: ${CONFLICT}") + return() + endif() + endforeach() + if (BOUT_TEST_OPTIONS_SOURCES) # We've got some sources, so compile them into an executable and # link against BOUT++ From 9894428b3b96f6dea26846224dc44abb4bd45c49 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 19 May 2021 11:45:36 +0100 Subject: [PATCH 289/428] CMake: Install missing cmake modules --- CMakeLists.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6494c1cf1b..c548d4a787 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -756,14 +756,19 @@ install( FILES "${CMAKE_CURRENT_BINARY_DIR}/bout++Config.cmake" "${CMAKE_CURRENT_BINARY_DIR}/bout++ConfigVersion.cmake" + "${CMAKE_CURRENT_SOURCE_DIR}/cmake/BOUT++functions.cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/CorrectWindowsPaths.cmake" + "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindClangFormat.cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindFFTW.cmake" - "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindNetCDF.cmake" + "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindnetCDF.cmake" + "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindnetCDFCxx.cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindPackageMultipass.cmake" + "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindLibuuid.cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindPETSc.cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindScoreP.cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindSLEPc.cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindSUNDIALS.cmake" + "${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindSphinx.cmake" "${CMAKE_CURRENT_SOURCE_DIR}/cmake/ResolveCompilerPaths.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/bout++" ) From 2a0e41a7eaeda8429d06059eb019f4552e19dc3d Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 19 May 2021 11:52:59 +0100 Subject: [PATCH 290/428] CMake: Provide bout-config --- CMakeLists.txt | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index c548d4a787..dfc8fbb621 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -312,6 +312,14 @@ target_include_directories(bout++ PUBLIC $ $ ) +set_target_properties(bout++ PROPERTIES + LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/lib" + ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/lib") + +# Set some variables for the bout-config script +set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} -L${CMAKE_BINARY_DIR}/lib -lbout++") +set(BOUT_INCLUDE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/include") +set(CONFIG_CFLAGS "${CONFIG_CFLAGS} -I\${BOUT_INCLUDE_PATH} -I${CMAKE_CURRENT_BINARY_DIR}/include") target_compile_definitions(bout++ PUBLIC "BOUT_VERSION_STRING=\"${BOUT_FULL_VERSION}\"" @@ -331,6 +339,7 @@ cmake_dependent_option(BOUT_USE_SYSTEM_MPARK_VARIANT "Use external installation if(BOUT_USE_SYSTEM_MPARK_VARIANT) message(STATUS "Using external mpark.variant") find_package(mpark_variant REQUIRED) + get_target_property(MPARK_VARIANT_INCLUDE_PATH mpark_variant INTERFACE_INCLUDE_DIRECTORIES) else() message(STATUS "Using mpark.variant submodule") bout_update_submodules() @@ -338,6 +347,8 @@ else() if(NOT TARGET mpark_variant) message(FATAL_ERROR "mpark_variant not found! Have you disabled the git submodules (BOUT_UPDATE_GIT_SUBMODULE)?") endif() + set(MPARK_VARIANT_INCLUDE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/externalpackages/mpark.variant/include") + set(CONFIG_CFLAGS "${CONFIG_CFLAGS} -I\${MPARK_VARIANT_INCLUDE_PATH}") endif() target_link_libraries(bout++ PUBLIC mpark_variant) @@ -428,6 +439,7 @@ if (BOUT_ENABLE_BACKTRACE) PUBLIC "BACKTRACE" PUBLIC "BOUT_BACKTRACE") target_link_libraries(bout++ PUBLIC ${CMAKE_DL_LIBS}) + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} -l${CMAKE_DL_LIBS}") endif() message(STATUS "Enable backtrace: BOUT_USE_BACKTRACE=${BOUT_ENABLE_BACKTRACE}") set(BOUT_USE_BACKTRACE ${BOUT_ENABLE_BACKTRACE}) @@ -466,6 +478,12 @@ if (BOUT_USE_PVODE) target_link_libraries(bout++ PUBLIC pvode pvpre) target_compile_definitions(bout++ PUBLIC "BOUT_HAS_PVODE") + # Build the libraries in /lib: this makes updating the + # path for bout-config much easier + set_target_properties(pvode pvpre PROPERTIES + LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/lib" + ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/lib") + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} -lpvode -lpvpre") endif() message(STATUS "PVODE support: ${BOUT_USE_PVODE}") set(BOUT_HAS_PVODE ${BOUT_USE_PVODE}) @@ -487,6 +505,7 @@ if (BOUT_USE_NETCDF) FetchContent_MakeAvailable(netcdf-cxx4) else() find_package(netCDFCxx REQUIRED) + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} ${netCDF_CXX_LIBRARY} ${netCDF_LIBRARIES}") endif() target_link_libraries(bout++ PUBLIC netCDF::netcdf-cxx4) target_compile_definitions(bout++ @@ -514,6 +533,7 @@ if (BOUT_USE_FFTW) target_compile_definitions(bout++ PUBLIC "BOUT_HAS_FFTW") target_link_libraries(bout++ PUBLIC FFTW::FFTW) + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} ${FFTW_LIBRARIES}") endif() message(STATUS "FFTW support: ${BOUT_USE_FFTW}") set(BOUT_HAS_FFTW ${BOUT_USE_FFTW}) @@ -524,6 +544,8 @@ if (BOUT_USE_LAPACK) # Cray wrappers sort this out for us find_package(LAPACK REQUIRED) target_link_libraries(bout++ PUBLIC "${LAPACK_LIBRARIES}") + string(JOIN " " CONFIG_LAPACK_LIBRARIES ${LAPACK_LIBRARIES}) + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} ${CONFIG_LAPACK_LIBRARIES}") endif() target_compile_definitions(bout++ PUBLIC "LAPACK" @@ -538,6 +560,8 @@ if (BOUT_USE_PETSC) # Cray wrappers sort this out for us find_package(PETSc REQUIRED) target_link_libraries(bout++ PUBLIC PETSc::PETSc) + string(JOIN " " CONFIG_PETSC_LIBRARIES ${PETSC_LIBRARIES}) + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} ${CONFIG_PETSC_LIBRARIES}") endif() target_compile_definitions(bout++ PUBLIC "BOUT_HAS_PETSC") @@ -551,6 +575,8 @@ if (BOUT_USE_SLEPC) target_compile_definitions(bout++ PUBLIC "BOUT_HAS_SLEPC") target_link_libraries(bout++ PUBLIC SLEPc::SLEPc) + string(JOIN " " CONFIG_SLEPC_LIBRARIES ${SLEPC_LIBRARIES}) + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} ${CONFIG_SLEPC_LIBRARIES}") endif() message(STATUS "SLEPc support: ${BOUT_USE_SLEPC}") set(BOUT_HAS_SLEPC ${BOUT_USE_SLEPC}) @@ -566,6 +592,7 @@ if (BOUT_USE_SUNDIALS) target_link_libraries(bout++ PUBLIC SUNDIALS::cvode) target_link_libraries(bout++ PUBLIC SUNDIALS::ida) target_link_libraries(bout++ PUBLIC SUNDIALS::arkode) + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} ${SUNDIALS_cvode_LIBRARY} ${SUNDIALS_ida_LIBRARY} ${SUNDIALS_arkode_LIBRARY} ${SUNDIALS_nvecparallel_LIBRARY}") endif() message(STATUS "SUNDIALS support: ${BOUT_USE_SUNDIALS}") set(BOUT_HAS_SUNDIALS ${BOUT_USE_SUNDIALS}) @@ -607,6 +634,7 @@ if (BOUT_USE_UUID_SYSTEM_GENERATOR) if (Libuuid_FOUND) target_link_libraries(bout++ PUBLIC Libuuid::libuuid) + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} ${Libuuid_LIBRARIES}") else() message(STATUS "libuuid not found, using fallback UUID generator") set(BOUT_USE_UUID_SYSTEM_GENERATOR FALSE) @@ -723,6 +751,44 @@ if (BOUT_BUILD_DOCS) add_subdirectory(manual EXCLUDE_FROM_ALL) endif() +################################################## +# Generate the bout-config script + +# Set some variables to match autotools so we can use the same input file +set(CXX "${MPI_CXX_COMPILER}") +set(PYTHONCONFIGPATH "${BOUT_PYTHONPATH}") +set(BOUT_HAS_LEGACY_NETCDF OFF) +set(BOUT_HAS_PNETCDF OFF) + +# For shared libraries we only need to know how to link against BOUT++, +# while for static builds we need the dependencies too +if (BUILD_SHARED_LIBS) + # Include rpath linker flag so user doesn't need to set LD_LIBRARY_PATH + set(CONFIG_LDFLAGS "${CMAKE_SHARED_LIBRARY_RUNTIME_CXX_FLAG}${CMAKE_BINARY_DIR} -L${CMAKE_BINARY_DIR} -lbout++") +else() + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS}") +endif() + +# This version of the file allows the build directory to be used directly +configure_file(bin/bout-config.in bin/bout-config @ONLY) + +# We need to generate a separate version for installation, with the +# correct install paths. So first we need to replace the build +# directory library path with the installation path +string(REPLACE + "${CMAKE_BINARY_DIR}/lib" "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}" + CONFIG_LDFLAGS "${CONFIG_LDFLAGS}") +# Update mpark.variant include paths if we're building it +if (NOT BOUT_USE_SYSTEM_MPARK_VARIANT) + set(MPARK_VARIANT_INCLUDE_PATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}") +endif() +set(BOUT_INCLUDE_PATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}") +# We don't need the build include path any more +string(REPLACE "-I${CMAKE_CURRENT_BINARY_DIR}/include" "" CONFIG_CFLAGS "${CONFIG_CFLAGS}") + +# This version now has the correct paths to use the final installation +configure_file(bin/bout-config.in bin/bout-config-install @ONLY) + ################################################## # Installation @@ -736,6 +802,12 @@ install(TARGETS bout++ ) install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) +# The installed version of bout-config needs renaming when we install it +install(PROGRAMS "${CMAKE_CURRENT_BINARY_DIR}/bin/bout-config-install" + DESTINATION "${CMAKE_INSTALL_BINDIR}" + RENAME "bout-config" + ) + include(CMakePackageConfigHelpers) write_basic_package_version_file( bout++ConfigVersion.cmake From 8f1154651d7dbad652da7fcce6287eb9742af51a Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 19 May 2021 11:58:59 +0100 Subject: [PATCH 291/428] CMake: Install the various helper scripts --- CMakeLists.txt | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index dfc8fbb621..a9f0b779e9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -802,7 +802,15 @@ install(TARGETS bout++ ) install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) -# The installed version of bout-config needs renaming when we install it +# The various helper scripts +install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/bin/" + USE_SOURCE_PERMISSIONS + DESTINATION "${CMAKE_INSTALL_BINDIR}") + +# The installed version of bout-config needs renaming when we install +# it. Note this MUST be done after the installation of bin/, to make +# sure we clobber any versions of bout-config hanging around from an +# autotools build install(PROGRAMS "${CMAKE_CURRENT_BINARY_DIR}/bin/bout-config-install" DESTINATION "${CMAKE_INSTALL_BINDIR}" RENAME "bout-config" @@ -824,6 +832,8 @@ install(EXPORT bout++Targets configure_package_config_file(bout++Config.cmake.in bout++Config.cmake INSTALL_DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/bout++Config.cmake" ) + +# CMake configuration files install( FILES "${CMAKE_CURRENT_BINARY_DIR}/bout++Config.cmake" From 2fd841a75bfb86d91a7419eb134c1b3e565a7c0e Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 19 May 2021 13:19:29 +0100 Subject: [PATCH 292/428] CMake: Fix some issues with FindSLEPc module - wrong syntax for `mark_as_advanced` - cache package required variables - explicitly set version variable - don't rerun the tests if we've already found slepc --- cmake/FindSLEPc.cmake | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/cmake/FindSLEPc.cmake b/cmake/FindSLEPc.cmake index 7a809efba9..2db1a25a1f 100644 --- a/cmake/FindSLEPc.cmake +++ b/cmake/FindSLEPc.cmake @@ -2,8 +2,8 @@ # Once done this will define # # SLEPC_FOUND - system has SLEPc -# SLEPC_INCLUDE_DIR - include directories for SLEPc -# SLEPC_LIBARIES - libraries for SLEPc +# SLEPC_INCLUDE_DIRS - include directories for SLEPc +# SLEPC_LIBRARIES - libraries for SLEPc # SLEPC_DIR - directory where SLEPc is built # SLEPC_VERSION - version of SLEPc # SLEPC_VERSION_MAJOR - First number in SLEPC_VERSION @@ -140,7 +140,7 @@ if (SLEPC_SKIP_BUILD_TESTS) set(SLEPC_TEST_RUNS TRUE) set(SLEPC_VERSION "UNKNOWN") set(SLEPC_VERSION_OK TRUE) -elseif (SLEPC_LIBRARIES AND SLEPC_INCLUDE_DIRS) +elseif (SLEPC_LIBRARIES AND SLEPC_INCLUDE_DIRS AND NOT SLEPC_TEST_RUNS) # Set flags for building test program set(CMAKE_REQUIRED_INCLUDES ${SLEPC_INCLUDE_DIRS}) @@ -179,17 +179,17 @@ int main() { list(GET SLEPC_VERSION_LIST 1 SLEPC_VERSION_MINOR) list(GET SLEPC_VERSION_LIST 2 SLEPC_VERSION_SUBMINOR) mark_as_advanced(SLEPC_VERSION) - mark_as_advanced(SLEPC_VERSION_MAJOR, SLEPC_VERSION_MINOR, SLEPC_VERSION_SUBMINOR) + mark_as_advanced(SLEPC_VERSION_MAJOR SLEPC_VERSION_MINOR SLEPC_VERSION_SUBMINOR) endif() if (SLEPc_FIND_VERSION) # Check if version found is >= required version if (NOT "${SLEPC_VERSION}" VERSION_LESS "${SLEPc_FIND_VERSION}") - set(SLEPC_VERSION_OK TRUE) + set(SLEPC_VERSION_OK TRUE CACHE BOOL "") endif() else() # No specific version requested - set(SLEPC_VERSION_OK TRUE) + set(SLEPC_VERSION_OK TRUE CACHE BOOL "") endif() mark_as_advanced(SLEPC_VERSION_OK) @@ -231,7 +231,7 @@ int main() if (SLEPC_TEST_LIB_COMPILED AND SLEPC_TEST_LIB_EXITCODE EQUAL 0) message(STATUS "Performing test SLEPC_TEST_RUNS - Success") - set(SLEPC_TEST_RUNS TRUE) + set(SLEPC_TEST_RUNS TRUE CACHE BOOL "SLEPc test program can run") else() message(STATUS "Performing test SLEPC_TEST_RUNS - Failed") @@ -252,8 +252,8 @@ int main() if (SLEPC_TEST_3RD_PARTY_LIBS_COMPILED AND SLEPC_TEST_3RD_PARTY_LIBS_EXITCODE EQUAL 0) message(STATUS "Performing test SLEPC_TEST_3RD_PARTY_LIBS_RUNS - Success") set(SLEPC_LIBRARIES ${SLEPC_LIBRARIES} ${SLEPC_EXTERNAL_LIBRARIES} - CACHE STRING "SLEPc libraries." FORCE) - set(SLEPC_TEST_RUNS TRUE) + CACHE STRING "SLEPc libraries." FORCE) + set(SLEPC_TEST_RUNS TRUE CACHE BOOL "SLEPc test program can run") else() message(STATUS "Performing test SLEPC_TEST_3RD_PARTY_LIBS_RUNS - Failed") endif() @@ -263,9 +263,11 @@ endif() # Standard package handling include(FindPackageHandleStandardArgs) find_package_handle_standard_args(SLEPc - "SLEPc could not be found. Be sure to set SLEPC_DIR, PETSC_DIR, and PETSC_ARCH." - SLEPC_LIBRARIES SLEPC_DIR SLEPC_INCLUDE_DIRS SLEPC_TEST_RUNS - SLEPC_VERSION SLEPC_VERSION_OK) + FOUND_VAR SLEPC_FOUND + FAIL_MESSAGE "SLEPc could not be found. Be sure to set SLEPC_DIR, PETSC_DIR, and PETSC_ARCH." + VERSION_VAR SLEPC_VERSION + REQUIRED_VARS SLEPC_LIBRARIES SLEPC_DIR SLEPC_INCLUDE_DIRS SLEPC_TEST_RUNS + SLEPC_VERSION_OK) if (SLEPC_FOUND) if (NOT TARGET SLEPc::SLEPc) From ae35bd45dc811070ff4a1fdc6235fd151e151e78 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 19 May 2021 13:23:30 +0100 Subject: [PATCH 293/428] CMake: Cache SUNDIALS variables --- cmake/FindSUNDIALS.cmake | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cmake/FindSUNDIALS.cmake b/cmake/FindSUNDIALS.cmake index 2896e9b0be..ceaeb3ebce 100644 --- a/cmake/FindSUNDIALS.cmake +++ b/cmake/FindSUNDIALS.cmake @@ -47,7 +47,8 @@ endif() set(SUNDIALS_INCLUDE_DIRS "${SUNDIALS_INCLUDE_DIR}" - "${SUNDIALS_INCLUDE_DIR}/..") + "${SUNDIALS_INCLUDE_DIR}/.." + CACHE STRING "SUNDIALS include directories") find_library(SUNDIALS_nvecparallel_LIBRARY NAMES sundials_nvecparallel @@ -102,10 +103,10 @@ if (SUNDIALS_INCLUDE_DIR) ".*#define SUNDIALS_PACKAGE_VERSION \"([0-9]+)\\.([0-9]+)\\.([0-9]+)\".*") endif() string(REGEX MATCH ${SUNDIALS_VERSION_REGEX_PATTERN} _ "${SUNDIALS_CONFIG_FILE}") - set(SUNDIALS_VERSION_MAJOR ${CMAKE_MATCH_1}) - set(SUNDIALS_VERSION_MINOR ${CMAKE_MATCH_2}) - set(SUNDIALS_VERSION_PATCH ${CMAKE_MATCH_3}) - set(SUNDIALS_VERSION "${SUNDIALS_VERSION_MAJOR}.${SUNDIALS_VERSION_MINOR}.${SUNDIALS_VERSION_PATCH}") + set(SUNDIALS_VERSION_MAJOR ${CMAKE_MATCH_1} CACHE STRING "") + set(SUNDIALS_VERSION_MINOR ${CMAKE_MATCH_2} CACHE STRING "") + set(SUNDIALS_VERSION_PATCH ${CMAKE_MATCH_3} CACHE STRING "") + set(SUNDIALS_VERSION "${SUNDIALS_VERSION_MAJOR}.${SUNDIALS_VERSION_MINOR}.${SUNDIALS_VERSION_PATCH}" CACHE STRING "SUNDIALS version") endif() if (SUNDIALS_DEBUG) @@ -118,6 +119,7 @@ find_package_handle_standard_args(SUNDIALS VERSION_VAR SUNDIALS_VERSION ) +set(SUNDIALS_LIBRARIES "${SUNDIALS_LIBRARIES}" CACHE STRING "SUNDIALS libraries") mark_as_advanced(SUNDIALS_LIBRARIES SUNDIALS_INCLUDE_DIR SUNDIALS_INCLUDE_DIRS) if (SUNDIALS_FOUND AND NOT TARGET SUNDIALS::SUNDIALS) From a0f88fcf1a03ecfe8098e5057aa7b647c7aa80ca Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 25 May 2021 16:50:57 +0100 Subject: [PATCH 294/428] CMake: Fix correct library location for shared lib in bout-config --- CMakeLists.txt | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a9f0b779e9..07d9167663 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -312,12 +312,13 @@ target_include_directories(bout++ PUBLIC $ $ ) +set(BOUT_LIB_PATH "${CMAKE_CURRENT_BINARY_DIR}/lib") set_target_properties(bout++ PROPERTIES - LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/lib" - ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/lib") + LIBRARY_OUTPUT_DIRECTORY "${BOUT_LIB_PATH}" + ARCHIVE_OUTPUT_DIRECTORY "${BOUT_LIB_PATH}") # Set some variables for the bout-config script -set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} -L${CMAKE_BINARY_DIR}/lib -lbout++") +set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} -L\$BOUT_LIB_PATH -lbout++") set(BOUT_INCLUDE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/include") set(CONFIG_CFLAGS "${CONFIG_CFLAGS} -I\${BOUT_INCLUDE_PATH} -I${CMAKE_CURRENT_BINARY_DIR}/include") @@ -764,7 +765,7 @@ set(BOUT_HAS_PNETCDF OFF) # while for static builds we need the dependencies too if (BUILD_SHARED_LIBS) # Include rpath linker flag so user doesn't need to set LD_LIBRARY_PATH - set(CONFIG_LDFLAGS "${CMAKE_SHARED_LIBRARY_RUNTIME_CXX_FLAG}${CMAKE_BINARY_DIR} -L${CMAKE_BINARY_DIR} -lbout++") + set(CONFIG_LDFLAGS "${CMAKE_SHARED_LIBRARY_RUNTIME_CXX_FLAG}\$BOUT_LIB_PATH -L\$BOUT_LIB_PATH -lbout++") else() set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS}") endif() From d4fd557673252aad0d5e4bb6cf2f144e81241013 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 25 May 2021 16:55:12 +0100 Subject: [PATCH 295/428] CMake: Use less jargon in function docstring --- cmake/BOUT++functions.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/BOUT++functions.cmake b/cmake/BOUT++functions.cmake index f46888aff6..1ba4eb1424 100644 --- a/cmake/BOUT++functions.cmake +++ b/cmake/BOUT++functions.cmake @@ -107,11 +107,11 @@ endfunction() # # - EXTRA_FILES: any extra files that are required to run the test # -# - REQUIRES: list of variables that must be truthy to enable test +# - REQUIRES: list of variables that must be true to enable test # (note: use `CONFLICTS` to negate the variable, rather than `NOT # VARIABLE`) # -# - CONFLICTS: list of variables that must be falsey to enable test +# - CONFLICTS: list of variables that must be false to enable test # # - EXECUTABLE_NAME: name of the executable, if different from the # first source name From 5411adcc3ff418549d0880a4d2023747fb4f7a5f Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 25 May 2021 16:58:40 +0100 Subject: [PATCH 296/428] CMake: Fix issue on Fedora with PETSc linking order with bout-config --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 07d9167663..7e22fd3c37 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -562,7 +562,7 @@ if (BOUT_USE_PETSC) find_package(PETSc REQUIRED) target_link_libraries(bout++ PUBLIC PETSc::PETSc) string(JOIN " " CONFIG_PETSC_LIBRARIES ${PETSC_LIBRARIES}) - set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} ${CONFIG_PETSC_LIBRARIES}") + set(CONFIG_LDFLAGS "${CONFIG_PETSC_LIBRARIES} ${CONFIG_LDFLAGS}") endif() target_compile_definitions(bout++ PUBLIC "BOUT_HAS_PETSC") From 3f73cde042d4994023cb48df3e3166234d61954c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Tue, 25 May 2021 13:51:16 +0200 Subject: [PATCH 297/428] Add simple test for bout-config --- .ci_with_cmake.sh | 17 +++++++++++++++++ .github/workflows/tests.yml | 7 +------ 2 files changed, 18 insertions(+), 6 deletions(-) create mode 100644 .ci_with_cmake.sh diff --git a/.ci_with_cmake.sh b/.ci_with_cmake.sh new file mode 100644 index 0000000000..a3a4d100b2 --- /dev/null +++ b/.ci_with_cmake.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +cmake --version +cmake . -B build $@ -DCMAKE_INSTALL_PREFIX=$(pwd)/installed +cmake --build build --target build-check -j 2 +cd build +ctest --output-on-failure --timeout 300 + +# Test bout-config basic functionallity +cd ../examples/make-script +PATH=../../build/bin:$PATH make +./test --help +cd - +make install -j 2 +cd - +rm test +PATH=../../installed/bin:$PATH make +./test --help diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b304ec790e..0aa0dadfe3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -157,12 +157,7 @@ jobs: - name: Build (CMake) if: ${{ contains(matrix.config.name, 'CMake') }} - run: | - cmake --version - cmake . -B build ${{ matrix.config.cmake_options }} - cmake --build build -j 2 - cd build - ctest --output-on-failure --timeout 300 + run: ./.ci_with_cmake.sh ${{ matrix.config.cmake_options }} - name: Capture coverage if: ${{ matrix.config.name == 'Coverage' }} From 912d5632605eaf3bbf874866cf530ae060f60694 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Tue, 25 May 2021 13:56:04 +0200 Subject: [PATCH 298/428] Fix permissions --- .ci_with_cmake.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 .ci_with_cmake.sh diff --git a/.ci_with_cmake.sh b/.ci_with_cmake.sh old mode 100644 new mode 100755 From ff353f133ef7119d9177757628bc56c6fb68b2e9 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 27 May 2021 09:30:44 +0100 Subject: [PATCH 299/428] CMake: Add REQUIRES, CONFLICTS arguments to bout_add_model/example --- cmake/BOUT++functions.cmake | 61 ++++++++++++++++----- examples/invertable_operator/CMakeLists.txt | 8 +-- 2 files changed, 49 insertions(+), 20 deletions(-) diff --git a/cmake/BOUT++functions.cmake b/cmake/BOUT++functions.cmake index 1ba4eb1424..ccf85f0b91 100644 --- a/cmake/BOUT++functions.cmake +++ b/cmake/BOUT++functions.cmake @@ -8,6 +8,27 @@ macro(bout_copy_file FILENAME) COPYONLY) endmacro() +# Handle the REQUIRES and CONFLICTS arguments for models, examples, +# and tests. Returns from those functions if REQUIRES are not met, or +# if CONFLICTS are true +macro(bout_handle_requires_conflicts TYPENAME TYPEVAR) + set(multiValueArgs REQUIRES CONFLICTS) + cmake_parse_arguments(BOUT_HANDLE_OPTIONS "" "" "${multiValueArgs}" ${ARGN}) + + foreach (REQUIREMENT IN LISTS BOUT_HANDLE_OPTIONS_REQUIRES) + if (NOT ${REQUIREMENT}) + message(STATUS "Not building ${TYPENAME} ${TYPEVAR}, requirement not met: ${REQUIREMENT}") + return() + endif() + endforeach() + + foreach (CONFLICT IN LISTS BOUT_HANDLE_OPTIONS_CONFLICTS) + if (${CONFLICT}) + message(STATUS "Not building ${TYPENAME} ${TYPEVAR}, conflicts with: ${CONFLICT}") + return() + endif() + endforeach() +endmacro() # Build a BOUT++ physics model # @@ -17,8 +38,18 @@ endmacro() # Arguments: # - MODEL: Name of the executable # - SOURCES: List of source files to compile +# - REQUIRES: list of variables that must be true to build model +# (note: use `CONFLICTS` to negate the variable, rather than `NOT +# VARIABLE`) +# - CONFLICTS: list of variables that must be false to enable test function(bout_add_model MODEL) - cmake_parse_arguments(BOUT_MODEL_OPTIONS "" "" "SOURCES" ${ARGN}) + set(multiValueArgs SOURCES REQUIRES CONFLICTS) + cmake_parse_arguments(BOUT_MODEL_OPTIONS "" "" "${multiValueArgs}" ${ARGN}) + + bout_handle_requires_conflicts("model" MODEL + REQUIRES ${BOUT_MODEL_OPTIONS_REQUIRES} + CONFLICTS ${BOUT_MODEL_OPTIONS_CONFLICTS} + ) if (NOT BOUT_MODEL_OPTIONS_SOURCES) message(FATAL_ERROR "Required argument SOURCES missing from 'bout_add_model'") @@ -46,10 +77,19 @@ endfunction() # - SOURCES: List of source files to compile # - DATA_DIRS: List of data directories to copy (default: 'data') # - EXTRA_FILES: List of other files to copy +# - REQUIRES: list of variables that must be true to build example +# (note: use `CONFLICTS` to negate the variable, rather than `NOT +# VARIABLE`) +# - CONFLICTS: list of variables that must be false to enable test function(bout_add_example EXAMPLENAME) - set(multiValueArgs SOURCES DATA_DIRS EXTRA_FILES) + set(multiValueArgs SOURCES REQUIRES CONFLICTS DATA_DIRS EXTRA_FILES) cmake_parse_arguments(BOUT_EXAMPLE_OPTIONS "" "" "${multiValueArgs}" ${ARGN}) + bout_handle_requires_conflicts("example" ${EXAMPLENAME} + REQUIRES ${BOUT_EXAMPLE_OPTIONS_REQUIRES} + CONFLICTS ${BOUT_EXAMPLE_OPTIONS_CONFLICTS} + ) + bout_add_model(${EXAMPLENAME} SOURCES ${BOUT_EXAMPLE_OPTIONS_SOURCES}) # If this is a standalone project, we can stop here. Otherwise, we @@ -124,19 +164,10 @@ function(bout_add_integrated_or_mms_test BUILD_CHECK_TARGET TESTNAME) set(multiValueArgs SOURCES EXTRA_FILES REQUIRES CONFLICTS TESTARGS EXTRA_DEPENDS) cmake_parse_arguments(BOUT_TEST_OPTIONS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - foreach (REQUIREMENT IN LISTS BOUT_TEST_OPTIONS_REQUIRES) - if (NOT ${REQUIREMENT}) - message(STATUS "Not building test ${TESTNAME}, requirement not met: ${REQUIREMENT}") - return() - endif() - endforeach() - - foreach (CONFLICT IN LISTS BOUT_TEST_OPTIONS_CONFLICTS) - if (${CONFLICT}) - message(STATUS "Not building test ${TESTNAME}, conflicts with: ${CONFLICT}") - return() - endif() - endforeach() + bout_handle_requires_conflicts("test" ${TESTNAME} + REQUIRES ${BOUT_TEST_OPTIONS_REQUIRES} + CONFLICTS ${BOUT_TEST_OPTIONS_CONFLICTS} + ) if (BOUT_TEST_OPTIONS_SOURCES) # We've got some sources, so compile them into an executable and diff --git a/examples/invertable_operator/CMakeLists.txt b/examples/invertable_operator/CMakeLists.txt index 83f7c74e7a..f054466f23 100644 --- a/examples/invertable_operator/CMakeLists.txt +++ b/examples/invertable_operator/CMakeLists.txt @@ -6,8 +6,6 @@ if (NOT TARGET bout++::bout++) find_package(bout++ REQUIRED) endif() -if(NOT BOUT_HAS_PETSC) - message(FATAL_ERROR "This example requires PETSc. Please compile BOUT++ with PETSc") -endif() - -bout_add_example(invertable_operator SOURCES invertable_operator.cxx) +bout_add_example(invertable_operator + SOURCES invertable_operator.cxx + REQUIRES BOUT_HAS_PETSC) From 114b81be450c3e87428fbfa01ed2e5cee1f68b36 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 27 May 2021 09:32:18 +0100 Subject: [PATCH 300/428] CMake: Fail fast in CI script --- .ci_with_cmake.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.ci_with_cmake.sh b/.ci_with_cmake.sh index a3a4d100b2..3479510105 100755 --- a/.ci_with_cmake.sh +++ b/.ci_with_cmake.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash + +set -ex + cmake --version cmake . -B build $@ -DCMAKE_INSTALL_PREFIX=$(pwd)/installed cmake --build build --target build-check -j 2 From cbf78734a59c5bdbd6167cc7da1442089e1d947f Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 21 May 2021 16:52:57 +0100 Subject: [PATCH 301/428] CMake: Add option to download SUNDIALS at configure time Currently needs my (Peter's) fork because the latest release is missing an `export()` call that allows it to work with `FetchContent`. Also a small change to `FindSundials` to make the download/non-download options work in the same way --- CMakeLists.txt | 33 +++++++++++++++++++++++++++------ cmake/FindSUNDIALS.cmake | 11 ++++++++--- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7e22fd3c37..9f7c9c64e2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -582,18 +582,39 @@ endif() message(STATUS "SLEPc support: ${BOUT_USE_SLEPC}") set(BOUT_HAS_SLEPC ${BOUT_USE_SLEPC}) -option(BOUT_USE_SUNDIALS "Enable support for SUNDIALS time solvers" OFF) +option(BOUT_DOWNLOAD_SUNDIALS "Download and build SUNDIALS" OFF) +# Force BOUT_USE_SUNDIALS if we're downloading it! +cmake_dependent_option(BOUT_USE_SUNDIALS "Enable support for SUNDIALS time solvers" OFF + "NOT BOUT_DOWNLOAD_SUNDIALS" ON) if (BOUT_USE_SUNDIALS) - find_package(SUNDIALS REQUIRED) + if (BOUT_DOWNLOAD_SUNDIALS) + message(STATUS "Downloading and configuring SUNDIALS") + include(FetchContent) + FetchContent_Declare( + sundials + GIT_REPOSITORY https://github.com/ZedThree/sundials + GIT_TAG "4f3bb8281c7b27343bcb95386ebbb665fb6196a5" + ) + set(EXAMPLES_ENABLE_C OFF CACHE BOOL "" FORCE) + set(EXAMPLES_INSTALL OFF CACHE BOOL "" FORCE) + set(ENABLE_MPI ON CACHE BOOL "" FORCE) + set(ENABLE_OPENMP OFF CACHE BOOL "" FORCE) + set(BUILD_STATIC_LIBS OFF CACHE BOOL "" FORCE) + FetchContent_MakeAvailable(sundials) + message(STATUS "SUNDIALS done configuring") + else() + find_package(SUNDIALS REQUIRED) + endif() + target_link_libraries(bout++ PUBLIC SUNDIALS::nvecparallel) + target_link_libraries(bout++ PUBLIC SUNDIALS::cvode) + target_link_libraries(bout++ PUBLIC SUNDIALS::ida) + target_link_libraries(bout++ PUBLIC SUNDIALS::arkode) + set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} ${SUNDIALS_cvode_LIBRARY} ${SUNDIALS_ida_LIBRARY} ${SUNDIALS_arkode_LIBRARY} ${SUNDIALS_nvecparallel_LIBRARY}") target_compile_definitions(bout++ PUBLIC "BOUT_HAS_CVODE" PUBLIC "BOUT_HAS_IDA" PUBLIC "BOUT_HAS_ARKODE" PUBLIC "BOUT_HAS_SUNDIALS") - target_link_libraries(bout++ PUBLIC SUNDIALS::cvode) - target_link_libraries(bout++ PUBLIC SUNDIALS::ida) - target_link_libraries(bout++ PUBLIC SUNDIALS::arkode) - set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} ${SUNDIALS_cvode_LIBRARY} ${SUNDIALS_ida_LIBRARY} ${SUNDIALS_arkode_LIBRARY} ${SUNDIALS_nvecparallel_LIBRARY}") endif() message(STATUS "SUNDIALS support: ${BOUT_USE_SUNDIALS}") set(BOUT_HAS_SUNDIALS ${BOUT_USE_SUNDIALS}) diff --git a/cmake/FindSUNDIALS.cmake b/cmake/FindSUNDIALS.cmake index ceaeb3ebce..8f9e1a6310 100644 --- a/cmake/FindSUNDIALS.cmake +++ b/cmake/FindSUNDIALS.cmake @@ -31,6 +31,11 @@ include(FindPackageHandleStandardArgs) +find_package(SUNDIALS CONFIG QUIET) +if (SUNDIALS_FOUND) + return() +endif() + find_path(SUNDIALS_INCLUDE_DIR sundials_config.h HINTS @@ -123,8 +128,8 @@ set(SUNDIALS_LIBRARIES "${SUNDIALS_LIBRARIES}" CACHE STRING "SUNDIALS libraries" mark_as_advanced(SUNDIALS_LIBRARIES SUNDIALS_INCLUDE_DIR SUNDIALS_INCLUDE_DIRS) if (SUNDIALS_FOUND AND NOT TARGET SUNDIALS::SUNDIALS) - add_library(SUNDIALS::NVecParallel UNKNOWN IMPORTED) - set_target_properties(SUNDIALS::NVecParallel PROPERTIES + add_library(SUNDIALS::nvecparallel UNKNOWN IMPORTED) + set_target_properties(SUNDIALS::nvecparallel PROPERTIES IMPORTED_LOCATION "${SUNDIALS_nvecparallel_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${SUNDIALS_INCLUDE_DIRS}") @@ -133,6 +138,6 @@ if (SUNDIALS_FOUND AND NOT TARGET SUNDIALS::SUNDIALS) set_target_properties(SUNDIALS::${LIB} PROPERTIES IMPORTED_LOCATION "${SUNDIALS_${LIB}_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${SUNDIALS_INCLUDE_DIRS}" - INTERFACE_LINK_LIBRARIES SUNDIALS::NVecParallel) + INTERFACE_LINK_LIBRARIES SUNDIALS::nvecparallel) endforeach() endif() From cdc704d18563d02c349fe94c8757bdb53e4a22c8 Mon Sep 17 00:00:00 2001 From: Ben Dudson Date: Mon, 24 May 2021 10:06:08 +0100 Subject: [PATCH 302/428] Add downloading dependencies to CMake install doc Mention that CMake can be used to download NetCDF and SUNDIALS dependencies. --- manual/sphinx/user_docs/installing.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/manual/sphinx/user_docs/installing.rst b/manual/sphinx/user_docs/installing.rst index bbbcb3927c..816bd9d7ea 100644 --- a/manual/sphinx/user_docs/installing.rst +++ b/manual/sphinx/user_docs/installing.rst @@ -344,6 +344,18 @@ it's wise to delete the ``CMakeCache.txt`` file in the build directory. The equivalent of ``make distclean`` with CMake is to just delete the entire build directory and reconfigure. +Downloading Dependencies +~~~~~~~~~~~~~~~~~~~~~~~~ + +If you don't have some dependencies installed, CMake can be used to download, +configure and compile them alongside BOUT++. + +For NetCDF, use ``-DBOUT_DOWNLOAD_NETCDF_CXX4=ON`` + +For SUNDIALS, use ``-DBOUT_DOWNLOAD_SUNDIALS=ON``. If using ``ccmake`` this option +may not appear initially. This automatically sets ``BOUT_USE_SUNDIALS=ON``, and +configures SUNDIALS to use MPI. + Bundled Dependencies ^^^^^^^^^^^^^^^^^^^^ From 720bb4add96d7e62e30204ede57df3b966697742 Mon Sep 17 00:00:00 2001 From: Ben Dudson Date: Wed, 9 Jun 2021 21:49:42 +0100 Subject: [PATCH 303/428] CMake rename PACKAGE_TESTS to BOUT_TESTS If including BOUT++ as a dependency, this namespacing makes it easier to select which tests should be run: In some cases (e.g. CI, Github actions) only the tests for the physics model should be run, and not the BOUT++ tests. --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9f7c9c64e2..4f68053f0f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -723,9 +723,9 @@ target_compile_definitions(bout++ ################################################## # Tests -option(PACKAGE_TESTS "Build the tests" ON) +option(BOUT_TESTS "Build the tests" ON) option(BOUT_RUN_ALL_TESTS "Run all of the tests (this can be slow!)" OFF) -if(PACKAGE_TESTS) +if(BOUT_TESTS) enable_testing() # Targets for just building the tests From 1c51db689e4d6845dae527617ac0c5971ea12d44 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Jun 2021 10:35:45 +0100 Subject: [PATCH 304/428] Disable tests by default if building as part of another project --- CMakeLists.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4f68053f0f..d2550a04db 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -723,7 +723,12 @@ target_compile_definitions(bout++ ################################################## # Tests -option(BOUT_TESTS "Build the tests" ON) +# Are we building BOUT++ directly, or as part of another project +string(COMPARE EQUAL + "${PROJECT_NAME}" "${CMAKE_PROJECT_NAME}" + PROJECT_IS_TOP_LEVEL +) +option(BOUT_TESTS "Build the tests" ${PROJECT_IS_TOP_LEVEL}) option(BOUT_RUN_ALL_TESTS "Run all of the tests (this can be slow!)" OFF) if(BOUT_TESTS) enable_testing() From 23373644ab0e183ea0ec6c17c07ebac7e7a08718 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Tue, 29 Jun 2021 15:19:02 +0200 Subject: [PATCH 305/428] make sundials detection errors fatal Previously a failure to find SUNDIALS_nvecparallel_LIBRARY resulted in a compile time error. This makes this a configure error. --- cmake/FindSUNDIALS.cmake | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cmake/FindSUNDIALS.cmake b/cmake/FindSUNDIALS.cmake index 8f9e1a6310..5789d12a7b 100644 --- a/cmake/FindSUNDIALS.cmake +++ b/cmake/FindSUNDIALS.cmake @@ -68,9 +68,10 @@ if (SUNDIALS_DEBUG) " SUNDIALS_nvecparallel_LIBRARY = ${SUNDIALS_nvecparallel_LIBRARY}") endif() -if (SUNDIALS_nvecparallel_LIBRARY) - list(APPEND SUNDIALS_LIBRARIES "${SUNDIALS_nvecparallel_LIBRARY}") +if (${SUNDIALS_nvecparallel_LIBRARY} STREQUAL "SUNDIALS_nvecparallel_LIBRARY-NOTFOUND") + message(FATAL_ERROR "Sundials requested but SUNDIALS nvecparallel not found.") endif() +list(APPEND SUNDIALS_LIBRARIES "${SUNDIALS_nvecparallel_LIBRARY}") mark_as_advanced(SUNDIALS_nvecparallel_LIBRARY) set(SUNDIALS_COMPONENTS arkode cvode ida) @@ -89,9 +90,10 @@ foreach (LIB ${SUNDIALS_COMPONENTS}) " SUNDIALS_${LIB}_LIBRARY = ${SUNDIALS_${LIB}_LIBRARY}") endif() - if (SUNDIALS_${LIB}_LIBRARY) - list(APPEND SUNDIALS_LIBRARIES "${SUNDIALS_${LIB}_LIBRARY}") + if (${SUNDIALS_${LIB}_LIBRARY} STREQUAL "SUNDIALS_${LIB}_LIBRARY-NOTFOUND") + message(FATAL_ERROR "Sundials requested but SUNDIALS ${LIB} not found.") endif() + list(APPEND SUNDIALS_LIBRARIES "${SUNDIALS_${LIB}_LIBRARY}") mark_as_advanced(SUNDIALS_${LIB}_LIBRARY) endforeach() From 8e3c2cb3085fc110502bc09fc20a399167c139fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Tue, 29 Jun 2021 15:34:23 +0200 Subject: [PATCH 306/428] Add soversion This ensures we do resolve to a well specified version. If there is a globally installed BOUT++ with .so, the local version is used, as long as the SOVERSION is different. --- CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d2550a04db..31ec4c3e36 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -315,7 +315,8 @@ target_include_directories(bout++ PUBLIC set(BOUT_LIB_PATH "${CMAKE_CURRENT_BINARY_DIR}/lib") set_target_properties(bout++ PROPERTIES LIBRARY_OUTPUT_DIRECTORY "${BOUT_LIB_PATH}" - ARCHIVE_OUTPUT_DIRECTORY "${BOUT_LIB_PATH}") + ARCHIVE_OUTPUT_DIRECTORY "${BOUT_LIB_PATH}" + SOVERSION 4.4.0) # Set some variables for the bout-config script set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS} -L\$BOUT_LIB_PATH -lbout++") From 0cf2a3779843885ad60048476ed39abc4f3b6a9a Mon Sep 17 00:00:00 2001 From: dschwoerer Date: Sat, 3 Jul 2021 17:47:44 +0200 Subject: [PATCH 307/428] Simplify sundials detection strings ending in -NOTFOUND are False Co-authored-by: Peter Hill --- cmake/FindSUNDIALS.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/FindSUNDIALS.cmake b/cmake/FindSUNDIALS.cmake index 5789d12a7b..c67e2be100 100644 --- a/cmake/FindSUNDIALS.cmake +++ b/cmake/FindSUNDIALS.cmake @@ -68,7 +68,7 @@ if (SUNDIALS_DEBUG) " SUNDIALS_nvecparallel_LIBRARY = ${SUNDIALS_nvecparallel_LIBRARY}") endif() -if (${SUNDIALS_nvecparallel_LIBRARY} STREQUAL "SUNDIALS_nvecparallel_LIBRARY-NOTFOUND") +if (NOT SUNDIALS_nvecparallel_LIBRARY) message(FATAL_ERROR "Sundials requested but SUNDIALS nvecparallel not found.") endif() list(APPEND SUNDIALS_LIBRARIES "${SUNDIALS_nvecparallel_LIBRARY}") @@ -90,7 +90,7 @@ foreach (LIB ${SUNDIALS_COMPONENTS}) " SUNDIALS_${LIB}_LIBRARY = ${SUNDIALS_${LIB}_LIBRARY}") endif() - if (${SUNDIALS_${LIB}_LIBRARY} STREQUAL "SUNDIALS_${LIB}_LIBRARY-NOTFOUND") + if (NOT SUNDIALS_${LIB}_LIBRARY) message(FATAL_ERROR "Sundials requested but SUNDIALS ${LIB} not found.") endif() list(APPEND SUNDIALS_LIBRARIES "${SUNDIALS_${LIB}_LIBRARY}") From d1c3fb925dede8ce584255de7f2ba0ea00657aad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Sat, 3 Jul 2021 17:51:23 +0200 Subject: [PATCH 308/428] Add SO version for bundled pvode --- externalpackages/PVODE/CMakeLists.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/externalpackages/PVODE/CMakeLists.txt b/externalpackages/PVODE/CMakeLists.txt index 9c4778d142..c5bcfbd061 100644 --- a/externalpackages/PVODE/CMakeLists.txt +++ b/externalpackages/PVODE/CMakeLists.txt @@ -49,6 +49,10 @@ add_library(pvpre precon/band.h ) + +set_target_properties(pvode PROPERTIES + SOVERSION 1.0.0) + target_include_directories(pvpre PUBLIC $ $ @@ -56,6 +60,10 @@ target_include_directories(pvpre PUBLIC ) target_link_libraries(pvpre PUBLIC MPI::MPI_CXX) + +set_target_properties(pvpre PROPERTIES + SOVERSION 1.0.0) + include(GNUInstallDirs) install(TARGETS pvode pvpre EXPORT PVODETargets From 0b83240a0e61e7ec024cd4033055d11b765eb699 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 6 Nov 2019 09:21:31 +0000 Subject: [PATCH 309/428] Move input file for staggered version of test Previously changed test in fab73286 to not copy+sed input file, forgot that the input file in data/ wasn't in repo (again) --- .../{BOUT_stag.inp => data/BOUT.inp} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/integrated/test-drift-instability-staggered/{BOUT_stag.inp => data/BOUT.inp} (100%) diff --git a/tests/integrated/test-drift-instability-staggered/BOUT_stag.inp b/tests/integrated/test-drift-instability-staggered/data/BOUT.inp similarity index 100% rename from tests/integrated/test-drift-instability-staggered/BOUT_stag.inp rename to tests/integrated/test-drift-instability-staggered/data/BOUT.inp From 46fcf569f8ab60b3e54cad02405e45efc5aee5e6 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 12 Aug 2020 19:22:57 +0100 Subject: [PATCH 310/428] Delete removed test_io.grd.nc file from test-invpar's CMakeLists.txt --- tests/integrated/test-invpar/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integrated/test-invpar/CMakeLists.txt b/tests/integrated/test-invpar/CMakeLists.txt index 418709e7df..b1ab558af4 100644 --- a/tests/integrated/test-invpar/CMakeLists.txt +++ b/tests/integrated/test-invpar/CMakeLists.txt @@ -2,5 +2,4 @@ bout_add_integrated_test(test-invpar SOURCES test_invpar.cxx USE_RUNTEST USE_DATA_BOUT_INP - EXTRA_FILES test_io.grd.nc ) From 592aad1a7d15a1e5bcbe582f13d012e804373705 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 9 Jun 2020 11:16:30 +0100 Subject: [PATCH 311/428] Upgrade split-file version of advdiff to PhysicsModel --- examples/advdiff2/CMakeLists.txt | 6 ++---- examples/advdiff2/globals.cxx | 3 --- examples/advdiff2/globals.hxx | 9 --------- examples/advdiff2/header.hxx | 12 ++++++++++-- examples/advdiff2/init.cxx | 24 +++++++++--------------- examples/advdiff2/makefile | 2 +- examples/advdiff2/{run.cxx => rhs.cxx} | 5 ++--- 7 files changed, 24 insertions(+), 37 deletions(-) delete mode 100644 examples/advdiff2/globals.cxx delete mode 100644 examples/advdiff2/globals.hxx rename examples/advdiff2/{run.cxx => rhs.cxx} (54%) diff --git a/examples/advdiff2/CMakeLists.txt b/examples/advdiff2/CMakeLists.txt index 92fcbe543b..e90a00d72f 100644 --- a/examples/advdiff2/CMakeLists.txt +++ b/examples/advdiff2/CMakeLists.txt @@ -7,9 +7,7 @@ if (NOT TARGET bout++::bout++) endif() bout_add_example(advdiff2 - SOURCES globals.cxx - globals.hxx - header.hxx + SOURCES header.hxx init.cxx - run.cxx + rhs.cxx EXTRA_FILES slab.grd.nc) diff --git a/examples/advdiff2/globals.cxx b/examples/advdiff2/globals.cxx deleted file mode 100644 index 5f80f88048..0000000000 --- a/examples/advdiff2/globals.cxx +++ /dev/null @@ -1,3 +0,0 @@ -#include "header.hxx" -#define GLOBALORIGIN -#include "globals.hxx" diff --git a/examples/advdiff2/globals.hxx b/examples/advdiff2/globals.hxx deleted file mode 100644 index 6388fbe36f..0000000000 --- a/examples/advdiff2/globals.hxx +++ /dev/null @@ -1,9 +0,0 @@ -#include "header.hxx" - -#ifdef GLOBALORIGIN -#define GLOBAL -#else -#define GLOBAL extern -#endif - -GLOBAL Field3D V; diff --git a/examples/advdiff2/header.hxx b/examples/advdiff2/header.hxx index 7d7ff44477..9e6f92431f 100644 --- a/examples/advdiff2/header.hxx +++ b/examples/advdiff2/header.hxx @@ -1,7 +1,15 @@ #ifndef INCLUDE_GUARD_H #define INCLUDE_GUARD_H -#include -#include +#include + +class AdvDiff : public PhysicsModel { + // Evolving variables + Field3D V; + +protected: + int init(bool restarting) override; + int rhs(BoutReal) override; +}; #endif diff --git a/examples/advdiff2/init.cxx b/examples/advdiff2/init.cxx index efa2cd5c83..614f949544 100644 --- a/examples/advdiff2/init.cxx +++ b/examples/advdiff2/init.cxx @@ -1,35 +1,29 @@ +#include #include -#include -#include -#include "globals.hxx" +#include "header.hxx" -int physics_init(bool restarting) -{ +int AdvDiff::init(bool restarting) { // 2D initial profiles Field2D V0; - // Read initial conditions - mesh->get(V0, "V0"); - mesh->get(mesh->getCoordinates()->dx, "dx"); - mesh->get(mesh->getCoordinates()->dy, "dy"); - + mesh->get(mesh->getCoordinates()->dx, "dx"); + mesh->get(mesh->getCoordinates()->dy, "dy"); // read options - // Set evolving variables - bout_solve(V, "V"); + SOLVE_FOR(V); - - if(!restarting) { + if (!restarting) { // Set variables to these values (+ the initial perturbation) // NOTE: This must be after the calls to bout_solve V += V0; } - + return 0; } +BOUTMAIN(AdvDiff) diff --git a/examples/advdiff2/makefile b/examples/advdiff2/makefile index 5572e1e3e1..e17c2c3c7a 100644 --- a/examples/advdiff2/makefile +++ b/examples/advdiff2/makefile @@ -1,6 +1,6 @@ BOUT_TOP ?= ../.. -SOURCEC = globals.cxx init.cxx run.cxx +SOURCEC = init.cxx rhs.cxx TARGET = advdiff diff --git a/examples/advdiff2/run.cxx b/examples/advdiff2/rhs.cxx similarity index 54% rename from examples/advdiff2/run.cxx rename to examples/advdiff2/rhs.cxx index 44d40bb9e7..d2a3619d82 100644 --- a/examples/advdiff2/run.cxx +++ b/examples/advdiff2/rhs.cxx @@ -1,13 +1,12 @@ #include #include -#include "globals.hxx" +#include "header.hxx" -int physics_run(BoutReal UNUSED(t)) { +int AdvDiff::rhs(BoutReal UNUSED(t)) { // Run communications mesh->communicate(V); - //ddt(V) = D2DX2(V) + 0.5*DDX(V) + D2DY2(V); ddt(V) = DDX(V); return 0; From bb2ac60758e74d5fe6f076e6297f2f587e1b515d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Thu, 27 Feb 2020 00:39:28 +0000 Subject: [PATCH 312/428] Add bout_config to expose configure status to python Some tests need to know wether the metric field is 2d or 3d. Exposing the whole state might be usefull. --- configure | 6 ++++++ configure.ac | 2 ++ 2 files changed, 8 insertions(+) diff --git a/configure b/configure index da1c6680e2..8a2e81d2d6 100755 --- a/configure +++ b/configure @@ -17023,6 +17023,10 @@ BOUT_INCLUDE_PATH=$PWD/include ac_config_files="$ac_config_files bin/bout-config" +ac_config_files="$ac_config_files src/makefile" + +ac_config_files="$ac_config_files tools/pylib/boututils/bout_config.py" + cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure @@ -17752,6 +17756,8 @@ do "po-directories") CONFIG_COMMANDS="$CONFIG_COMMANDS po-directories" ;; "make.config") CONFIG_FILES="$CONFIG_FILES make.config" ;; "bin/bout-config") CONFIG_FILES="$CONFIG_FILES bin/bout-config" ;; + "src/makefile") CONFIG_FILES="$CONFIG_FILES src/makefile" ;; + "tools/pylib/boututils/bout_config.py") CONFIG_FILES="$CONFIG_FILES tools/pylib/boututils/bout_config.py" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac diff --git a/configure.ac b/configure.ac index f6793d504a..f31b147ddf 100644 --- a/configure.ac +++ b/configure.ac @@ -1408,6 +1408,8 @@ AC_SUBST(HAS_NLS) AC_SUBST(HAS_FFTW) AC_CONFIG_FILES([bin/bout-config]) +AC_CONFIG_FILES([src/makefile]) +AC_CONFIG_FILES([tools/pylib/boututils/bout_config.py]) AC_OUTPUT chmod a+x bin/bout-config From ca0c5b57570c3f12a82cc5c30e05c260bd62b992 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Sun, 1 Mar 2020 18:53:43 +0000 Subject: [PATCH 313/428] Add bout_config to cmake * Add metric3D to cmake * cmake with -DENABLE_METRIC_3D=ON to get 3D metrics * Due to the out-of-source build we need to python dirs, one for noarch files, and one for arched ones --- CMakeLists.txt | 15 ++++++++++++++- bout++Config.cmake.in | 1 + 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 31ec4c3e36..2b72c91d2b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -779,6 +779,16 @@ if (BOUT_BUILD_DOCS) add_subdirectory(manual EXCLUDE_FROM_ALL) endif() +option(ENABLE_METRIC_3D "Enable 3D metric support" OFF) +if(ENABLE_METRIC_3D) + set(BOUT_METRIC_TYPE "3D") + target_compile_definitions(bout++ + PUBLIC "COORDINATES_USE_3D") + target_link_libraries(bout++ PUBLIC ${CMAKE_DL_LIBS}) +else() + set(BOUT_METRIC_TYPE "2D") +endif() +set(BOUT_USE_METRIC_3D ${ENABLE_METRIC_3D}) ################################################## # Generate the bout-config script @@ -861,7 +871,9 @@ configure_package_config_file(bout++Config.cmake.in bout++Config.cmake INSTALL_DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/bout++Config.cmake" ) -# CMake configuration files +configure_package_config_file(tools/pylib/boututils/bout_config.py.cin tools/pylib/boututils/bout_config.py + INSTALL_DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/tools/pylib/boututils/bout_config.py" + ) install( FILES "${CMAKE_CURRENT_BINARY_DIR}/bout++Config.cmake" @@ -917,6 +929,7 @@ message(" Field name tracking : ${BOUT_USE_TRACK} Floating point exceptions: ${BOUT_USE_SIGFPE} Backtrace enabled : ${BOUT_USE_BACKTRACE} + 3D Metric enabled : ${BOUT_USE_METRIC_3D} === Python === diff --git a/bout++Config.cmake.in b/bout++Config.cmake.in index e9579161f8..106c2815f3 100644 --- a/bout++Config.cmake.in +++ b/bout++Config.cmake.in @@ -10,6 +10,7 @@ set(BOUT_USE_BACKTRACE @BOUT_USE_BACKTRACE@) set(BOUT_USE_OPENMP @BOUT_USE_OPENMP@) set(BOUT_HAS_OUTPUT_DEBUG @BOUT_HAS_OUTPUT_DEBUG@) set(BOUT_CHECK_LEVEL @BOUT_CHECK_LEVEL@) +set(BOUT_USE_METRIC_3D @BOUT_USE_METRIC_3D@) set(BOUT_HAS_PVODE @BOUT_HAS_PVODE@) set(BOUT_HAS_NETCDF @BOUT_HAS_NETCDF@) From bad4ba9793a0c880017269bf84ecfb1965402240 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Sun, 1 Mar 2020 20:02:02 +0000 Subject: [PATCH 314/428] Move boutconfig to own folder. otherwise python finds boututils in one pythonpath, and does not look in other locations for the other file. --- CMakeLists.txt | 4 +- configure.ac | 2 +- tools/pylib/boutconfig/__init__.py.cin | 294 +++++++++++++++++++++++++ tools/pylib/boutconfig/__init__.py.in | 293 ++++++++++++++++++++++++ 4 files changed, 590 insertions(+), 3 deletions(-) create mode 100644 tools/pylib/boutconfig/__init__.py.cin create mode 100644 tools/pylib/boutconfig/__init__.py.in diff --git a/CMakeLists.txt b/CMakeLists.txt index 2b72c91d2b..7744824ea1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -871,8 +871,8 @@ configure_package_config_file(bout++Config.cmake.in bout++Config.cmake INSTALL_DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/bout++Config.cmake" ) -configure_package_config_file(tools/pylib/boututils/bout_config.py.cin tools/pylib/boututils/bout_config.py - INSTALL_DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/tools/pylib/boututils/bout_config.py" +configure_package_config_file(tools/pylib/boutconfig/__init__.py.cin tools/pylib/boutconfig/__init__.py + INSTALL_DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/tools/pylib/boutconfig/__init__.py" ) install( FILES diff --git a/configure.ac b/configure.ac index f31b147ddf..25632421be 100644 --- a/configure.ac +++ b/configure.ac @@ -1409,7 +1409,7 @@ AC_SUBST(HAS_FFTW) AC_CONFIG_FILES([bin/bout-config]) AC_CONFIG_FILES([src/makefile]) -AC_CONFIG_FILES([tools/pylib/boututils/bout_config.py]) +AC_CONFIG_FILES([tools/pylib/boutconfig/__init__.py]) AC_OUTPUT chmod a+x bin/bout-config diff --git a/tools/pylib/boutconfig/__init__.py.cin b/tools/pylib/boutconfig/__init__.py.cin new file mode 100644 index 0000000000..141203897c --- /dev/null +++ b/tools/pylib/boutconfig/__init__.py.cin @@ -0,0 +1,294 @@ +"""Functions for getting the config used for compiling BOUT++ + +""" +# Created by cmake +_yesno = {"TRUE": True, "ON": True, "FALSE": False, "OFF": False} +_iyesno = {True:"yes", False: "no"} + +_cc='@MPICXX@' +_cxx='@MPICXX@' +_ld='@MPICXX@' +_checks="@BOUT_CHECK_LEVEL@" +_cflags='@CONFIG_CFLAGS@' +_libs='@CONFIG_LDFLAGS@' + +_version="@BOUT_VERSION@" +_git="@GIT_REVISION@" +_idlpath="@IDLCONFIGPATH@" +_pythonpath="@PYTHONCONFIGPATH@" + +_has_netcdf="@BOUT_HAS_NETCDF@" +_has_pnetcdf="OFF" +_has_hdf5="@BOUT_HAS_HDF5@" +_has_pvode="@BOUT_HAS_PVODE@" +_has_cvode="OFF" +_has_ida="OFF" +_has_lapack="@BOUT_HAS_LAPACK@" +_has_petsc="@BOUT_HAS_PETSC@" +_has_slepc="@BOUT_HAS_SLEPC@" +_has_mumps="OFF" +_has_arkode="OFF" +_has_openmp="@BOUT_USE_OPENMP@" +_has_nls="@BOUT_HAS_GETTEXT@" +_has_fftw="@BOUT_HAS_FFTW@" + +_petsc_has_sundials="@PETSC_HAS_SUNDIALS@" + +_metric_type="@BOUT_METRIC_TYPE@" +def has_netcdf(): + """Return 'yes' / 'no' whether has_netcdf is available + + """ + return _iyesno[_yesno[_has_netcdf]] + +def hasNetcdf(): + """Return True / False whether has_netcdf is available + + """ + return _yesno[_has_netcdf] + +def has_pnetcdf(): + """Return 'yes' / 'no' whether has_pnetcdf is available + + """ + return _iyesno[_yesno[_has_pnetcdf]] + +def hasPnetcdf(): + """Return True / False whether has_pnetcdf is available + + """ + return _yesno[_has_pnetcdf] + +def has_hdf5(): + """Return 'yes' / 'no' whether has_hdf5 is available + + """ + return _iyesno[_yesno[_has_hdf5]] + +def hasHdf5(): + """Return True / False whether has_hdf5 is available + + """ + return _yesno[_has_hdf5] + +def has_pvode(): + """Return 'yes' / 'no' whether has_pvode is available + + """ + return _iyesno[_yesno[_has_pvode]] + +def hasPvode(): + """Return True / False whether has_pvode is available + + """ + return _yesno[_has_pvode] + +def has_cvode(): + """Return 'yes' / 'no' whether has_cvode is available + + """ + return _iyesno[_yesno[_has_cvode]] + +def hasCvode(): + """Return True / False whether has_cvode is available + + """ + return _yesno[_has_cvode] + +def has_ida(): + """Return 'yes' / 'no' whether has_ida is available + + """ + return _iyesno[_yesno[_has_ida]] + +def hasIda(): + """Return True / False whether has_ida is available + + """ + return _yesno[_has_ida] + +def has_lapack(): + """Return 'yes' / 'no' whether has_lapack is available + + """ + return _iyesno[_yesno[_has_lapack]] + +def hasLapack(): + """Return True / False whether has_lapack is available + + """ + return _yesno[_has_lapack] + +def has_petsc(): + """Return 'yes' / 'no' whether has_petsc is available + + """ + return _iyesno[_yesno[_has_petsc]] + +def hasPetsc(): + """Return True / False whether has_petsc is available + + """ + return _yesno[_has_petsc] + +def has_slepc(): + """Return 'yes' / 'no' whether has_slepc is available + + """ + return _iyesno[_yesno[_has_slepc]] + +def hasSlepc(): + """Return True / False whether has_slepc is available + + """ + return _yesno[_has_slepc] + +def has_mumps(): + """Return 'yes' / 'no' whether has_mumps is available + + """ + return _iyesno[_yesno[_has_mumps]] + +def hasMumps(): + """Return True / False whether has_mumps is available + + """ + return _yesno[_has_mumps] + +def has_arkode(): + """Return 'yes' / 'no' whether has_arkode is available + + """ + return _iyesno[_yesno[_has_arkode]] + +def hasArkode(): + """Return True / False whether has_arkode is available + + """ + return _yesno[_has_arkode] + +def has_openmp(): + """Return 'yes' / 'no' whether has_openmp is available + + """ + return _iyesno[_yesno[_has_openmp]] + +def hasOpenmp(): + """Return True / False whether has_openmp is available + + """ + return _yesno[_has_openmp] + +def has_nls(): + """Return 'yes' / 'no' whether has_nls is available + + """ + return _iyesno[_yesno[_has_nls]] + +def hasNls(): + """Return True / False whether has_nls is available + + """ + return _yesno[_has_nls] + +def has_fftw(): + """Return 'yes' / 'no' whether has_fftw is available + + """ + return _iyesno[_yesno[_has_fftw]] + +def hasFftw(): + """Return True / False whether has_fftw is available + + """ + return _yesno[_has_fftw] + +def petsc_has_sundials(): + """Return 'yes' / 'no' whether petsc_has_sundials is available + + """ + return _iyesno[_yesno[_petsc_has_sundials]] + +def petscHasSundials(): + """Return True / False whether petsc_has_sundials is available + + """ + return _yesno[_petsc_has_sundials] + +def cc(): + """Option for cc + + """ + return _cc + +def cxx(): + """Option for cxx + + """ + return _cxx + +def ld(): + """Option for ld + + """ + return _ld + +def checks(): + """Option for checks + + """ + return _checks + +def cflags(): + """Option for cflags + + """ + return _cflags + +def libs(): + """Option for libs + + """ + return _libs + +def metric_type(): + """Option for metric_type + + """ + return _metric_type + +def version(): + """Option for version + + """ + return _version + +def git(): + """Option for git + + """ + return _git + +def idlpath(): + """Option for idlpath + + """ + return _idlpath + +def pythonpath(): + """Option for pythonpath + + """ + return _pythonpath + +def isMetric2D(): + """Is the metric 2D? + + """ + return _metric_type == "2D" + +def isMetric3D(): + """Is the metric 3D? + + """ + return _metric_type == "3D" diff --git a/tools/pylib/boutconfig/__init__.py.in b/tools/pylib/boutconfig/__init__.py.in new file mode 100644 index 0000000000..c53d4cce2c --- /dev/null +++ b/tools/pylib/boutconfig/__init__.py.in @@ -0,0 +1,293 @@ +"""Functions for getting the config used for compiling BOUT++ + +""" + +_yesno = {'"yes"': True, '"no"': False} + +_cc='@MPICXX@' +_cxx='@MPICXX@' +_ld='@MPICXX@' +_checks="@CHECK_LEVEL@" +_cflags='@CONFIG_CFLAGS@' +_libs='@CONFIG_LDFLAGS@' + +_version="@BOUT_VERSION@" +_git="@GIT_REVISION@" +_idlpath="@IDLCONFIGPATH@" +_pythonpath="@PYTHONCONFIGPATH@" + +_has_netcdf="@HAS_NETCDF@" +_has_pnetcdf="@HAS_PNETCDF@" +_has_hdf5="@HAS_HDF5@" +_has_pvode="@HAS_PVODE@" +_has_cvode="@HAS_CVODE@" +_has_ida="@HAS_IDA@" +_has_lapack="@HAS_LAPACK@" +_has_petsc="@HAS_PETSC@" +_has_slepc="@HAS_SLEPC@" +_has_mumps="@HAS_MUMPS@" +_has_arkode="@HAS_ARKODE@" +_has_openmp="@HAS_OPENMP@" +_has_nls="@HAS_NLS@" +_has_fftw="@HAS_FFTW@" + +_petsc_has_sundials="@PETSC_HAS_SUNDIALS@" + +_metric_type="@METRIC_TYPE@" +def has_netcdf(): + """Return 'yes' / 'no' whether has_netcdf is available + + """ + return _has_netcdf + +def hasNetcdf(): + """Return True / False whether has_netcdf is available + + """ + return _yesno[_has_netcdf] + +def has_pnetcdf(): + """Return 'yes' / 'no' whether has_pnetcdf is available + + """ + return _has_pnetcdf + +def hasPnetcdf(): + """Return True / False whether has_pnetcdf is available + + """ + return _yesno[_has_pnetcdf] + +def has_hdf5(): + """Return 'yes' / 'no' whether has_hdf5 is available + + """ + return _has_hdf5 + +def hasHdf5(): + """Return True / False whether has_hdf5 is available + + """ + return _yesno[_has_hdf5] + +def has_pvode(): + """Return 'yes' / 'no' whether has_pvode is available + + """ + return _has_pvode + +def hasPvode(): + """Return True / False whether has_pvode is available + + """ + return _yesno[_has_pvode] + +def has_cvode(): + """Return 'yes' / 'no' whether has_cvode is available + + """ + return _has_cvode + +def hasCvode(): + """Return True / False whether has_cvode is available + + """ + return _yesno[_has_cvode] + +def has_ida(): + """Return 'yes' / 'no' whether has_ida is available + + """ + return _has_ida + +def hasIda(): + """Return True / False whether has_ida is available + + """ + return _yesno[_has_ida] + +def has_lapack(): + """Return 'yes' / 'no' whether has_lapack is available + + """ + return _has_lapack + +def hasLapack(): + """Return True / False whether has_lapack is available + + """ + return _yesno[_has_lapack] + +def has_petsc(): + """Return 'yes' / 'no' whether has_petsc is available + + """ + return _has_petsc + +def hasPetsc(): + """Return True / False whether has_petsc is available + + """ + return _yesno[_has_petsc] + +def has_slepc(): + """Return 'yes' / 'no' whether has_slepc is available + + """ + return _has_slepc + +def hasSlepc(): + """Return True / False whether has_slepc is available + + """ + return _yesno[_has_slepc] + +def has_mumps(): + """Return 'yes' / 'no' whether has_mumps is available + + """ + return _has_mumps + +def hasMumps(): + """Return True / False whether has_mumps is available + + """ + return _yesno[_has_mumps] + +def has_arkode(): + """Return 'yes' / 'no' whether has_arkode is available + + """ + return _has_arkode + +def hasArkode(): + """Return True / False whether has_arkode is available + + """ + return _yesno[_has_arkode] + +def has_openmp(): + """Return 'yes' / 'no' whether has_openmp is available + + """ + return _has_openmp + +def hasOpenmp(): + """Return True / False whether has_openmp is available + + """ + return _yesno[_has_openmp] + +def has_nls(): + """Return 'yes' / 'no' whether has_nls is available + + """ + return _has_nls + +def hasNls(): + """Return True / False whether has_nls is available + + """ + return _yesno[_has_nls] + +def has_fftw(): + """Return 'yes' / 'no' whether has_fftw is available + + """ + return _has_fftw + +def hasFftw(): + """Return True / False whether has_fftw is available + + """ + return _yesno[_has_fftw] + +def petsc_has_sundials(): + """Return 'yes' / 'no' whether petsc_has_sundials is available + + """ + return _petsc_has_sundials + +def petscHasSundials(): + """Return True / False whether petsc_has_sundials is available + + """ + return _yesno[_petsc_has_sundials] + +def cc(): + """Option for cc + + """ + return _cc + +def cxx(): + """Option for cxx + + """ + return _cxx + +def ld(): + """Option for ld + + """ + return _ld + +def checks(): + """Option for checks + + """ + return _checks + +def cflags(): + """Option for cflags + + """ + return _cflags + +def libs(): + """Option for libs + + """ + return _libs + +def metric_type(): + """Option for metric_type + + """ + return _metric_type + +def version(): + """Option for version + + """ + return _version + +def git(): + """Option for git + + """ + return _git + +def idlpath(): + """Option for idlpath + + """ + return _idlpath + +def pythonpath(): + """Option for pythonpath + + """ + return _pythonpath + +def isMetric2D(): + """Is the metric 2D? + + """ + return _metric_type == "2D" + +def isMetric3D(): + """Is the metric 3D? + + """ + return _metric_type == "3D" From 75b035b9aee9af0ca42a687eee1f35acd9a48272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Sat, 4 Apr 2020 13:50:45 +0100 Subject: [PATCH 315/428] update configure --- configure | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configure b/configure index 8a2e81d2d6..8821c01aa9 100755 --- a/configure +++ b/configure @@ -17025,7 +17025,7 @@ ac_config_files="$ac_config_files bin/bout-config" ac_config_files="$ac_config_files src/makefile" -ac_config_files="$ac_config_files tools/pylib/boututils/bout_config.py" +ac_config_files="$ac_config_files tools/pylib/boutconfig/__init__.py" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure @@ -17757,7 +17757,7 @@ do "make.config") CONFIG_FILES="$CONFIG_FILES make.config" ;; "bin/bout-config") CONFIG_FILES="$CONFIG_FILES bin/bout-config" ;; "src/makefile") CONFIG_FILES="$CONFIG_FILES src/makefile" ;; - "tools/pylib/boututils/bout_config.py") CONFIG_FILES="$CONFIG_FILES tools/pylib/boututils/bout_config.py" ;; + "tools/pylib/boutconfig/__init__.py") CONFIG_FILES="$CONFIG_FILES tools/pylib/boutconfig/__init__.py" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac From d04e80ad10d0e8a7b00111f9946cf48f8d915f55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Sat, 11 Apr 2020 23:09:57 +0100 Subject: [PATCH 316/428] Fix bad cherry-pick --- CMakeLists.txt | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7744824ea1..545dd4d9ff 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -779,16 +779,6 @@ if (BOUT_BUILD_DOCS) add_subdirectory(manual EXCLUDE_FROM_ALL) endif() -option(ENABLE_METRIC_3D "Enable 3D metric support" OFF) -if(ENABLE_METRIC_3D) - set(BOUT_METRIC_TYPE "3D") - target_compile_definitions(bout++ - PUBLIC "COORDINATES_USE_3D") - target_link_libraries(bout++ PUBLIC ${CMAKE_DL_LIBS}) -else() - set(BOUT_METRIC_TYPE "2D") -endif() -set(BOUT_USE_METRIC_3D ${ENABLE_METRIC_3D}) ################################################## # Generate the bout-config script From a2e608ffbd0b69dfcf364db4a36e25e7b7d8bffb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Sun, 12 Apr 2020 00:00:36 +0100 Subject: [PATCH 317/428] Use dict for boutconfig --- tools/pylib/boutconfig/__init__.py.cin | 317 ++++--------------------- tools/pylib/boutconfig/__init__.py.in | 317 ++++--------------------- 2 files changed, 84 insertions(+), 550 deletions(-) diff --git a/tools/pylib/boutconfig/__init__.py.cin b/tools/pylib/boutconfig/__init__.py.cin index 141203897c..1ba12f0d98 100644 --- a/tools/pylib/boutconfig/__init__.py.cin +++ b/tools/pylib/boutconfig/__init__.py.cin @@ -5,281 +5,48 @@ _yesno = {"TRUE": True, "ON": True, "FALSE": False, "OFF": False} _iyesno = {True:"yes", False: "no"} -_cc='@MPICXX@' -_cxx='@MPICXX@' -_ld='@MPICXX@' -_checks="@BOUT_CHECK_LEVEL@" -_cflags='@CONFIG_CFLAGS@' -_libs='@CONFIG_LDFLAGS@' - -_version="@BOUT_VERSION@" -_git="@GIT_REVISION@" -_idlpath="@IDLCONFIGPATH@" -_pythonpath="@PYTHONCONFIGPATH@" - -_has_netcdf="@BOUT_HAS_NETCDF@" -_has_pnetcdf="OFF" -_has_hdf5="@BOUT_HAS_HDF5@" -_has_pvode="@BOUT_HAS_PVODE@" -_has_cvode="OFF" -_has_ida="OFF" -_has_lapack="@BOUT_HAS_LAPACK@" -_has_petsc="@BOUT_HAS_PETSC@" -_has_slepc="@BOUT_HAS_SLEPC@" -_has_mumps="OFF" -_has_arkode="OFF" -_has_openmp="@BOUT_USE_OPENMP@" -_has_nls="@BOUT_HAS_GETTEXT@" -_has_fftw="@BOUT_HAS_FFTW@" - -_petsc_has_sundials="@PETSC_HAS_SUNDIALS@" - -_metric_type="@BOUT_METRIC_TYPE@" -def has_netcdf(): - """Return 'yes' / 'no' whether has_netcdf is available - - """ - return _iyesno[_yesno[_has_netcdf]] - -def hasNetcdf(): - """Return True / False whether has_netcdf is available - - """ - return _yesno[_has_netcdf] - -def has_pnetcdf(): - """Return 'yes' / 'no' whether has_pnetcdf is available - - """ - return _iyesno[_yesno[_has_pnetcdf]] - -def hasPnetcdf(): - """Return True / False whether has_pnetcdf is available - - """ - return _yesno[_has_pnetcdf] - -def has_hdf5(): - """Return 'yes' / 'no' whether has_hdf5 is available - - """ - return _iyesno[_yesno[_has_hdf5]] - -def hasHdf5(): - """Return True / False whether has_hdf5 is available - - """ - return _yesno[_has_hdf5] - -def has_pvode(): - """Return 'yes' / 'no' whether has_pvode is available - - """ - return _iyesno[_yesno[_has_pvode]] - -def hasPvode(): - """Return True / False whether has_pvode is available - - """ - return _yesno[_has_pvode] - -def has_cvode(): - """Return 'yes' / 'no' whether has_cvode is available - - """ - return _iyesno[_yesno[_has_cvode]] - -def hasCvode(): - """Return True / False whether has_cvode is available - - """ - return _yesno[_has_cvode] - -def has_ida(): - """Return 'yes' / 'no' whether has_ida is available - - """ - return _iyesno[_yesno[_has_ida]] - -def hasIda(): - """Return True / False whether has_ida is available - - """ - return _yesno[_has_ida] - -def has_lapack(): - """Return 'yes' / 'no' whether has_lapack is available - - """ - return _iyesno[_yesno[_has_lapack]] - -def hasLapack(): - """Return True / False whether has_lapack is available - - """ - return _yesno[_has_lapack] - -def has_petsc(): - """Return 'yes' / 'no' whether has_petsc is available - - """ - return _iyesno[_yesno[_has_petsc]] - -def hasPetsc(): - """Return True / False whether has_petsc is available - - """ - return _yesno[_has_petsc] - -def has_slepc(): - """Return 'yes' / 'no' whether has_slepc is available - - """ - return _iyesno[_yesno[_has_slepc]] - -def hasSlepc(): - """Return True / False whether has_slepc is available - - """ - return _yesno[_has_slepc] - -def has_mumps(): - """Return 'yes' / 'no' whether has_mumps is available - - """ - return _iyesno[_yesno[_has_mumps]] - -def hasMumps(): - """Return True / False whether has_mumps is available - - """ - return _yesno[_has_mumps] - -def has_arkode(): - """Return 'yes' / 'no' whether has_arkode is available - - """ - return _iyesno[_yesno[_has_arkode]] - -def hasArkode(): - """Return True / False whether has_arkode is available - - """ - return _yesno[_has_arkode] - -def has_openmp(): - """Return 'yes' / 'no' whether has_openmp is available - - """ - return _iyesno[_yesno[_has_openmp]] - -def hasOpenmp(): - """Return True / False whether has_openmp is available - - """ - return _yesno[_has_openmp] - -def has_nls(): - """Return 'yes' / 'no' whether has_nls is available - - """ - return _iyesno[_yesno[_has_nls]] - -def hasNls(): - """Return True / False whether has_nls is available - - """ - return _yesno[_has_nls] - -def has_fftw(): - """Return 'yes' / 'no' whether has_fftw is available - - """ - return _iyesno[_yesno[_has_fftw]] - -def hasFftw(): - """Return True / False whether has_fftw is available - - """ - return _yesno[_has_fftw] - -def petsc_has_sundials(): - """Return 'yes' / 'no' whether petsc_has_sundials is available - - """ - return _iyesno[_yesno[_petsc_has_sundials]] - -def petscHasSundials(): - """Return True / False whether petsc_has_sundials is available - - """ - return _yesno[_petsc_has_sundials] - -def cc(): - """Option for cc - - """ - return _cc - -def cxx(): - """Option for cxx - - """ - return _cxx - -def ld(): - """Option for ld - - """ - return _ld - -def checks(): - """Option for checks - - """ - return _checks - -def cflags(): - """Option for cflags - - """ - return _cflags - -def libs(): - """Option for libs - - """ - return _libs - -def metric_type(): - """Option for metric_type - - """ - return _metric_type - -def version(): - """Option for version - - """ - return _version - -def git(): - """Option for git - - """ - return _git - -def idlpath(): - """Option for idlpath - - """ - return _idlpath - -def pythonpath(): - """Option for pythonpath - - """ - return _pythonpath +config = { + "cc":'@MPICXX@', + "cxx":'@MPICXX@', + "ld":'@MPICXX@', + "checks":"@BOUT_CHECK_LEVEL@", + "cflags":'@CONFIG_CFLAGS@', + "libs":'@CONFIG_LDFLAGS@', + + "version":"@BOUT_VERSION@", + "git":"@GIT_REVISION@", + "idlpath":"@IDLCONFIGPATH@", + "pythonpath":"@PYTHONCONFIGPATH@", + + "has_netcdf":"@BOUT_HAS_NETCDF@", + "has_pnetcdf":"OFF", + "has_hdf5":"@BOUT_HAS_HDF5@", + "has_pvode":"@BOUT_HAS_PVODE@", + "has_cvode":"OFF", + "has_ida":"OFF", + "has_lapack":"@BOUT_HAS_LAPACK@", + "has_petsc":"@BOUT_HAS_PETSC@", + "has_slepc":"@BOUT_HAS_SLEPC@", + "has_mumps":"OFF", + "has_arkode":"OFF", + "has_openmp":"@BOUT_USE_OPENMP@", + "has_nls":"@BOUT_HAS_GETTEXT@", + "has_fftw":"@BOUT_HAS_FFTW@", + + "petsc_has_sundials":"@PETSC_HAS_SUNDIALS@", + + "metric_type":"@BOUT_METRIC_TYPE@", + } + +@property +def has(): + """Get a dict of the enabled features + """ + _has={} + for k in config: + if k.startswith("has_"): + _has[k[4:]]=_yesno[config[k]] + return _has def isMetric2D(): """Is the metric 2D? diff --git a/tools/pylib/boutconfig/__init__.py.in b/tools/pylib/boutconfig/__init__.py.in index c53d4cce2c..916d5eabb6 100644 --- a/tools/pylib/boutconfig/__init__.py.in +++ b/tools/pylib/boutconfig/__init__.py.in @@ -4,281 +4,48 @@ _yesno = {'"yes"': True, '"no"': False} -_cc='@MPICXX@' -_cxx='@MPICXX@' -_ld='@MPICXX@' -_checks="@CHECK_LEVEL@" -_cflags='@CONFIG_CFLAGS@' -_libs='@CONFIG_LDFLAGS@' - -_version="@BOUT_VERSION@" -_git="@GIT_REVISION@" -_idlpath="@IDLCONFIGPATH@" -_pythonpath="@PYTHONCONFIGPATH@" - -_has_netcdf="@HAS_NETCDF@" -_has_pnetcdf="@HAS_PNETCDF@" -_has_hdf5="@HAS_HDF5@" -_has_pvode="@HAS_PVODE@" -_has_cvode="@HAS_CVODE@" -_has_ida="@HAS_IDA@" -_has_lapack="@HAS_LAPACK@" -_has_petsc="@HAS_PETSC@" -_has_slepc="@HAS_SLEPC@" -_has_mumps="@HAS_MUMPS@" -_has_arkode="@HAS_ARKODE@" -_has_openmp="@HAS_OPENMP@" -_has_nls="@HAS_NLS@" -_has_fftw="@HAS_FFTW@" - -_petsc_has_sundials="@PETSC_HAS_SUNDIALS@" - -_metric_type="@METRIC_TYPE@" -def has_netcdf(): - """Return 'yes' / 'no' whether has_netcdf is available - - """ - return _has_netcdf - -def hasNetcdf(): - """Return True / False whether has_netcdf is available - - """ - return _yesno[_has_netcdf] - -def has_pnetcdf(): - """Return 'yes' / 'no' whether has_pnetcdf is available - - """ - return _has_pnetcdf - -def hasPnetcdf(): - """Return True / False whether has_pnetcdf is available - - """ - return _yesno[_has_pnetcdf] - -def has_hdf5(): - """Return 'yes' / 'no' whether has_hdf5 is available - - """ - return _has_hdf5 - -def hasHdf5(): - """Return True / False whether has_hdf5 is available - - """ - return _yesno[_has_hdf5] - -def has_pvode(): - """Return 'yes' / 'no' whether has_pvode is available - - """ - return _has_pvode - -def hasPvode(): - """Return True / False whether has_pvode is available - - """ - return _yesno[_has_pvode] - -def has_cvode(): - """Return 'yes' / 'no' whether has_cvode is available - - """ - return _has_cvode - -def hasCvode(): - """Return True / False whether has_cvode is available - - """ - return _yesno[_has_cvode] - -def has_ida(): - """Return 'yes' / 'no' whether has_ida is available - - """ - return _has_ida - -def hasIda(): - """Return True / False whether has_ida is available - - """ - return _yesno[_has_ida] - -def has_lapack(): - """Return 'yes' / 'no' whether has_lapack is available - - """ - return _has_lapack - -def hasLapack(): - """Return True / False whether has_lapack is available - - """ - return _yesno[_has_lapack] - -def has_petsc(): - """Return 'yes' / 'no' whether has_petsc is available - - """ - return _has_petsc - -def hasPetsc(): - """Return True / False whether has_petsc is available - - """ - return _yesno[_has_petsc] - -def has_slepc(): - """Return 'yes' / 'no' whether has_slepc is available - - """ - return _has_slepc - -def hasSlepc(): - """Return True / False whether has_slepc is available - - """ - return _yesno[_has_slepc] - -def has_mumps(): - """Return 'yes' / 'no' whether has_mumps is available - - """ - return _has_mumps - -def hasMumps(): - """Return True / False whether has_mumps is available - - """ - return _yesno[_has_mumps] - -def has_arkode(): - """Return 'yes' / 'no' whether has_arkode is available - - """ - return _has_arkode - -def hasArkode(): - """Return True / False whether has_arkode is available - - """ - return _yesno[_has_arkode] - -def has_openmp(): - """Return 'yes' / 'no' whether has_openmp is available - - """ - return _has_openmp - -def hasOpenmp(): - """Return True / False whether has_openmp is available - - """ - return _yesno[_has_openmp] - -def has_nls(): - """Return 'yes' / 'no' whether has_nls is available - - """ - return _has_nls - -def hasNls(): - """Return True / False whether has_nls is available - - """ - return _yesno[_has_nls] - -def has_fftw(): - """Return 'yes' / 'no' whether has_fftw is available - - """ - return _has_fftw - -def hasFftw(): - """Return True / False whether has_fftw is available - - """ - return _yesno[_has_fftw] - -def petsc_has_sundials(): - """Return 'yes' / 'no' whether petsc_has_sundials is available - - """ - return _petsc_has_sundials - -def petscHasSundials(): - """Return True / False whether petsc_has_sundials is available - - """ - return _yesno[_petsc_has_sundials] - -def cc(): - """Option for cc - - """ - return _cc - -def cxx(): - """Option for cxx - - """ - return _cxx - -def ld(): - """Option for ld - - """ - return _ld - -def checks(): - """Option for checks - - """ - return _checks - -def cflags(): - """Option for cflags - - """ - return _cflags - -def libs(): - """Option for libs - - """ - return _libs - -def metric_type(): - """Option for metric_type - - """ - return _metric_type - -def version(): - """Option for version - - """ - return _version - -def git(): - """Option for git - - """ - return _git - -def idlpath(): - """Option for idlpath - - """ - return _idlpath - -def pythonpath(): - """Option for pythonpath - - """ - return _pythonpath +config = { + "cc":'@MPICXX@', + "cxx":'@MPICXX@', + "ld":'@MPICXX@', + "checks":"@CHECK_LEVEL@", + "cflags":'@CONFIG_CFLAGS@', + "libs":'@CONFIG_LDFLAGS@', + + "version":"@BOUT_VERSION@", + "git":"@GIT_REVISION@", + "idlpath":"@IDLCONFIGPATH@", + "pythonpath":"@PYTHONCONFIGPATH@", + + "has_netcdf":"@HAS_NETCDF@", + "has_pnetcdf":"@HAS_PNETCDF@", + "has_hdf5":"@HAS_HDF5@", + "has_pvode":"@HAS_PVODE@", + "has_cvode":"@HAS_CVODE@", + "has_ida":"@HAS_IDA@", + "has_lapack":"@HAS_LAPACK@", + "has_petsc":"@HAS_PETSC@", + "has_slepc":"@HAS_SLEPC@", + "has_mumps":"@HAS_MUMPS@", + "has_arkode":"@HAS_ARKODE@", + "has_openmp":"@HAS_OPENMP@", + "has_nls":"@HAS_NLS@", + "has_fftw":"@HAS_FFTW@", + + "petsc_has_sundials":"@PETSC_HAS_SUNDIALS@", + + "metric_type":"@METRIC_TYPE@", + } + +@property +def has(): + """Get a dict of the enabled features + """ + _has={} + for k in config: + if k.startswith("has_"): + _has[k[4:]]=_yesno[config[k]] + return _has def isMetric2D(): """Is the metric 2D? From 2dc58a3cb0cee8f8304a91c3bd0a13fa64b3a892 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Wed, 3 Jun 2020 16:32:32 +0100 Subject: [PATCH 318/428] Update boutconfig to new interface --- tools/pylib/boutconfig/__init__.py.cin | 4 +-- tools/pylib/boutconfig/__init__.py.in | 36 +++++++++++++------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/tools/pylib/boutconfig/__init__.py.cin b/tools/pylib/boutconfig/__init__.py.cin index 1ba12f0d98..cb4c53a969 100644 --- a/tools/pylib/boutconfig/__init__.py.cin +++ b/tools/pylib/boutconfig/__init__.py.cin @@ -52,10 +52,10 @@ def isMetric2D(): """Is the metric 2D? """ - return _metric_type == "2D" + return config["metric_type"] == "2D" def isMetric3D(): """Is the metric 3D? """ - return _metric_type == "3D" + return config["metric_type"] == "3D" diff --git a/tools/pylib/boutconfig/__init__.py.in b/tools/pylib/boutconfig/__init__.py.in index 916d5eabb6..432f13b99f 100644 --- a/tools/pylib/boutconfig/__init__.py.in +++ b/tools/pylib/boutconfig/__init__.py.in @@ -8,7 +8,7 @@ config = { "cc":'@MPICXX@', "cxx":'@MPICXX@', "ld":'@MPICXX@', - "checks":"@CHECK_LEVEL@", + "checks":"@BOUT_CHECK_LEVEL@", "cflags":'@CONFIG_CFLAGS@', "libs":'@CONFIG_LDFLAGS@', @@ -17,24 +17,24 @@ config = { "idlpath":"@IDLCONFIGPATH@", "pythonpath":"@PYTHONCONFIGPATH@", - "has_netcdf":"@HAS_NETCDF@", - "has_pnetcdf":"@HAS_PNETCDF@", - "has_hdf5":"@HAS_HDF5@", - "has_pvode":"@HAS_PVODE@", - "has_cvode":"@HAS_CVODE@", - "has_ida":"@HAS_IDA@", - "has_lapack":"@HAS_LAPACK@", - "has_petsc":"@HAS_PETSC@", - "has_slepc":"@HAS_SLEPC@", - "has_mumps":"@HAS_MUMPS@", - "has_arkode":"@HAS_ARKODE@", - "has_openmp":"@HAS_OPENMP@", - "has_nls":"@HAS_NLS@", - "has_fftw":"@HAS_FFTW@", + "has_netcdf":"@BOUT_HAS_NETCDF@", + "has_pnetcdf":"@BOUT_HAS_PNETCDF@", + "has_hdf5":"@BOUT_HAS_HDF5@", + "has_pvode":"@BOUT_HAS_PVODE@", + "has_cvode":"@BOUT_HAS_CVODE@", + "has_ida":"@BOUT_HAS_IDA@", + "has_lapack":"@BOUT_HAS_LAPACK@", + "has_petsc":"@BOUT_HAS_PETSC@", + "has_slepc":"@BOUT_HAS_SLEPC@", + "has_mumps":"no", + "has_arkode":"@BOUT_HAS_ARKODE@", + "has_openmp":"@BOUT_USE_OPENMP@", + "has_nls":"@BOUT_HAS_GETTEXT@", + "has_fftw":"@BOUT_HAS_FFTW@", "petsc_has_sundials":"@PETSC_HAS_SUNDIALS@", - "metric_type":"@METRIC_TYPE@", + "metric_type":"@BOUT_METRIC_TYPE@", } @property @@ -51,10 +51,10 @@ def isMetric2D(): """Is the metric 2D? """ - return _metric_type == "2D" + return config["metric_type"] == "2D" def isMetric3D(): """Is the metric 3D? """ - return _metric_type == "3D" + return config["metric_type"] == "3D" From b33abef30baea69274c4bf4c3e1c8ecb7687c180 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Dec 2020 09:51:21 +0000 Subject: [PATCH 319/428] Apply black formatting to boutconfig input files --- tools/pylib/boutconfig/__init__.py.cin | 78 ++++++++++++-------------- tools/pylib/boutconfig/__init__.py.in | 76 ++++++++++++------------- 2 files changed, 71 insertions(+), 83 deletions(-) diff --git a/tools/pylib/boutconfig/__init__.py.cin b/tools/pylib/boutconfig/__init__.py.cin index cb4c53a969..7f126d161a 100644 --- a/tools/pylib/boutconfig/__init__.py.cin +++ b/tools/pylib/boutconfig/__init__.py.cin @@ -3,59 +3,53 @@ """ # Created by cmake _yesno = {"TRUE": True, "ON": True, "FALSE": False, "OFF": False} -_iyesno = {True:"yes", False: "no"} +_iyesno = {True: "yes", False: "no"} config = { - "cc":'@MPICXX@', - "cxx":'@MPICXX@', - "ld":'@MPICXX@', - "checks":"@BOUT_CHECK_LEVEL@", - "cflags":'@CONFIG_CFLAGS@', - "libs":'@CONFIG_LDFLAGS@', - - "version":"@BOUT_VERSION@", - "git":"@GIT_REVISION@", - "idlpath":"@IDLCONFIGPATH@", - "pythonpath":"@PYTHONCONFIGPATH@", - - "has_netcdf":"@BOUT_HAS_NETCDF@", - "has_pnetcdf":"OFF", - "has_hdf5":"@BOUT_HAS_HDF5@", - "has_pvode":"@BOUT_HAS_PVODE@", - "has_cvode":"OFF", - "has_ida":"OFF", - "has_lapack":"@BOUT_HAS_LAPACK@", - "has_petsc":"@BOUT_HAS_PETSC@", - "has_slepc":"@BOUT_HAS_SLEPC@", - "has_mumps":"OFF", - "has_arkode":"OFF", - "has_openmp":"@BOUT_USE_OPENMP@", - "has_nls":"@BOUT_HAS_GETTEXT@", - "has_fftw":"@BOUT_HAS_FFTW@", - - "petsc_has_sundials":"@PETSC_HAS_SUNDIALS@", - - "metric_type":"@BOUT_METRIC_TYPE@", - } + "cc": "@MPICXX@", + "cxx": "@MPICXX@", + "ld": "@MPICXX@", + "checks": "@BOUT_CHECK_LEVEL@", + "cflags": "@CONFIG_CFLAGS@", + "libs": "@CONFIG_LDFLAGS@", + "version": "@BOUT_VERSION@", + "git": "@GIT_REVISION@", + "idlpath": "@IDLCONFIGPATH@", + "pythonpath": "@PYTHONCONFIGPATH@", + "has_netcdf": "@BOUT_HAS_NETCDF@", + "has_pnetcdf": "OFF", + "has_hdf5": "@BOUT_HAS_HDF5@", + "has_pvode": "@BOUT_HAS_PVODE@", + "has_cvode": "OFF", + "has_ida": "OFF", + "has_lapack": "@BOUT_HAS_LAPACK@", + "has_petsc": "@BOUT_HAS_PETSC@", + "has_slepc": "@BOUT_HAS_SLEPC@", + "has_mumps": "OFF", + "has_arkode": "OFF", + "has_openmp": "@BOUT_USE_OPENMP@", + "has_nls": "@BOUT_HAS_GETTEXT@", + "has_fftw": "@BOUT_HAS_FFTW@", + "petsc_has_sundials": "@PETSC_HAS_SUNDIALS@", + "metric_type": "@BOUT_METRIC_TYPE@", +} + @property def has(): - """Get a dict of the enabled features - """ - _has={} + """Get a dict of the enabled features""" + _has = {} for k in config: if k.startswith("has_"): - _has[k[4:]]=_yesno[config[k]] + _has[k[4:]] = _yesno[config[k]] return _has -def isMetric2D(): - """Is the metric 2D? - """ +def isMetric2D(): + """Is the metric 2D?""" return config["metric_type"] == "2D" -def isMetric3D(): - """Is the metric 3D? - """ +def isMetric3D(): + """Is the metric 3D?""" return config["metric_type"] == "3D" diff --git a/tools/pylib/boutconfig/__init__.py.in b/tools/pylib/boutconfig/__init__.py.in index 432f13b99f..60ded0de93 100644 --- a/tools/pylib/boutconfig/__init__.py.in +++ b/tools/pylib/boutconfig/__init__.py.in @@ -5,56 +5,50 @@ _yesno = {'"yes"': True, '"no"': False} config = { - "cc":'@MPICXX@', - "cxx":'@MPICXX@', - "ld":'@MPICXX@', - "checks":"@BOUT_CHECK_LEVEL@", - "cflags":'@CONFIG_CFLAGS@', - "libs":'@CONFIG_LDFLAGS@', - - "version":"@BOUT_VERSION@", - "git":"@GIT_REVISION@", - "idlpath":"@IDLCONFIGPATH@", - "pythonpath":"@PYTHONCONFIGPATH@", - - "has_netcdf":"@BOUT_HAS_NETCDF@", - "has_pnetcdf":"@BOUT_HAS_PNETCDF@", - "has_hdf5":"@BOUT_HAS_HDF5@", - "has_pvode":"@BOUT_HAS_PVODE@", - "has_cvode":"@BOUT_HAS_CVODE@", - "has_ida":"@BOUT_HAS_IDA@", - "has_lapack":"@BOUT_HAS_LAPACK@", - "has_petsc":"@BOUT_HAS_PETSC@", - "has_slepc":"@BOUT_HAS_SLEPC@", - "has_mumps":"no", - "has_arkode":"@BOUT_HAS_ARKODE@", - "has_openmp":"@BOUT_USE_OPENMP@", - "has_nls":"@BOUT_HAS_GETTEXT@", - "has_fftw":"@BOUT_HAS_FFTW@", - - "petsc_has_sundials":"@PETSC_HAS_SUNDIALS@", - - "metric_type":"@BOUT_METRIC_TYPE@", - } + "cc": "@MPICXX@", + "cxx": "@MPICXX@", + "ld": "@MPICXX@", + "checks": "@BOUT_CHECK_LEVEL@", + "cflags": "@CONFIG_CFLAGS@", + "libs": "@CONFIG_LDFLAGS@", + "version": "@BOUT_VERSION@", + "git": "@GIT_REVISION@", + "idlpath": "@IDLCONFIGPATH@", + "pythonpath": "@PYTHONCONFIGPATH@", + "has_netcdf": "@BOUT_HAS_NETCDF@", + "has_pnetcdf": "@BOUT_HAS_PNETCDF@", + "has_hdf5": "@BOUT_HAS_HDF5@", + "has_pvode": "@BOUT_HAS_PVODE@", + "has_cvode": "@BOUT_HAS_CVODE@", + "has_ida": "@BOUT_HAS_IDA@", + "has_lapack": "@BOUT_HAS_LAPACK@", + "has_petsc": "@BOUT_HAS_PETSC@", + "has_slepc": "@BOUT_HAS_SLEPC@", + "has_mumps": "no", + "has_arkode": "@BOUT_HAS_ARKODE@", + "has_openmp": "@BOUT_USE_OPENMP@", + "has_nls": "@BOUT_HAS_GETTEXT@", + "has_fftw": "@BOUT_HAS_FFTW@", + "petsc_has_sundials": "@PETSC_HAS_SUNDIALS@", + "metric_type": "@BOUT_METRIC_TYPE@", +} + @property def has(): - """Get a dict of the enabled features - """ - _has={} + """Get a dict of the enabled features""" + _has = {} for k in config: if k.startswith("has_"): - _has[k[4:]]=_yesno[config[k]] + _has[k[4:]] = _yesno[config[k]] return _has -def isMetric2D(): - """Is the metric 2D? - """ +def isMetric2D(): + """Is the metric 2D?""" return config["metric_type"] == "2D" -def isMetric3D(): - """Is the metric 3D? - """ +def isMetric3D(): + """Is the metric 3D?""" return config["metric_type"] == "3D" From 7c181f2318e2852211062cd8775f2514d6be0c7e Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Dec 2020 09:53:54 +0000 Subject: [PATCH 320/428] Add CMake variables for SUNDIALS components to boutconfig input --- tools/pylib/boutconfig/__init__.py.cin | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/pylib/boutconfig/__init__.py.cin b/tools/pylib/boutconfig/__init__.py.cin index 7f126d161a..48e3f737d0 100644 --- a/tools/pylib/boutconfig/__init__.py.cin +++ b/tools/pylib/boutconfig/__init__.py.cin @@ -20,13 +20,13 @@ config = { "has_pnetcdf": "OFF", "has_hdf5": "@BOUT_HAS_HDF5@", "has_pvode": "@BOUT_HAS_PVODE@", - "has_cvode": "OFF", - "has_ida": "OFF", + "has_cvode": "@BOUT_HAS_CVODE@", + "has_ida": "@BOUT_HAS_IDA@", "has_lapack": "@BOUT_HAS_LAPACK@", "has_petsc": "@BOUT_HAS_PETSC@", "has_slepc": "@BOUT_HAS_SLEPC@", "has_mumps": "OFF", - "has_arkode": "OFF", + "has_arkode": "@BOUT_HAS_ARKODE@", "has_openmp": "@BOUT_USE_OPENMP@", "has_nls": "@BOUT_HAS_GETTEXT@", "has_fftw": "@BOUT_HAS_FFTW@", From feb992adc1ae76a1f54deaaaf9f8d695300b6908 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Dec 2020 09:56:57 +0000 Subject: [PATCH 321/428] Remove BOUT_METRIC_TYPE from boutconfig --- tools/pylib/boutconfig/__init__.py.cin | 11 ----------- tools/pylib/boutconfig/__init__.py.in | 11 ----------- 2 files changed, 22 deletions(-) diff --git a/tools/pylib/boutconfig/__init__.py.cin b/tools/pylib/boutconfig/__init__.py.cin index 48e3f737d0..b0d4bc158d 100644 --- a/tools/pylib/boutconfig/__init__.py.cin +++ b/tools/pylib/boutconfig/__init__.py.cin @@ -31,7 +31,6 @@ config = { "has_nls": "@BOUT_HAS_GETTEXT@", "has_fftw": "@BOUT_HAS_FFTW@", "petsc_has_sundials": "@PETSC_HAS_SUNDIALS@", - "metric_type": "@BOUT_METRIC_TYPE@", } @@ -43,13 +42,3 @@ def has(): if k.startswith("has_"): _has[k[4:]] = _yesno[config[k]] return _has - - -def isMetric2D(): - """Is the metric 2D?""" - return config["metric_type"] == "2D" - - -def isMetric3D(): - """Is the metric 3D?""" - return config["metric_type"] == "3D" diff --git a/tools/pylib/boutconfig/__init__.py.in b/tools/pylib/boutconfig/__init__.py.in index 60ded0de93..5aaf7bb0f0 100644 --- a/tools/pylib/boutconfig/__init__.py.in +++ b/tools/pylib/boutconfig/__init__.py.in @@ -30,7 +30,6 @@ config = { "has_nls": "@BOUT_HAS_GETTEXT@", "has_fftw": "@BOUT_HAS_FFTW@", "petsc_has_sundials": "@PETSC_HAS_SUNDIALS@", - "metric_type": "@BOUT_METRIC_TYPE@", } @@ -42,13 +41,3 @@ def has(): if k.startswith("has_"): _has[k[4:]] = _yesno[config[k]] return _has - - -def isMetric2D(): - """Is the metric 2D?""" - return config["metric_type"] == "2D" - - -def isMetric3D(): - """Is the metric 3D?""" - return config["metric_type"] == "3D" From 7b4170162632c8d5e5158b3aaa1dbb5e1ae03fa5 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Dec 2020 10:11:22 +0000 Subject: [PATCH 322/428] Fix names of some boutconfig variables --- tools/pylib/boutconfig/__init__.py.cin | 12 ++++++------ tools/pylib/boutconfig/__init__.py.in | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/pylib/boutconfig/__init__.py.cin b/tools/pylib/boutconfig/__init__.py.cin index b0d4bc158d..06093d1f2f 100644 --- a/tools/pylib/boutconfig/__init__.py.cin +++ b/tools/pylib/boutconfig/__init__.py.cin @@ -6,14 +6,14 @@ _yesno = {"TRUE": True, "ON": True, "FALSE": False, "OFF": False} _iyesno = {True: "yes", False: "no"} config = { - "cc": "@MPICXX@", - "cxx": "@MPICXX@", - "ld": "@MPICXX@", + "cc": "@CMAKE_C_COMPILER@", + "cxx": "@CMAKE_CXX_COMPILER@", + "ld": "@CMAKE_CXX_COMPILER@", "checks": "@BOUT_CHECK_LEVEL@", - "cflags": "@CONFIG_CFLAGS@", - "libs": "@CONFIG_LDFLAGS@", + "cflags": "", + "libs": "", "version": "@BOUT_VERSION@", - "git": "@GIT_REVISION@", + "git": "@BOUT_REVISION@", "idlpath": "@IDLCONFIGPATH@", "pythonpath": "@PYTHONCONFIGPATH@", "has_netcdf": "@BOUT_HAS_NETCDF@", diff --git a/tools/pylib/boutconfig/__init__.py.in b/tools/pylib/boutconfig/__init__.py.in index 5aaf7bb0f0..8d615f0389 100644 --- a/tools/pylib/boutconfig/__init__.py.in +++ b/tools/pylib/boutconfig/__init__.py.in @@ -12,7 +12,7 @@ config = { "cflags": "@CONFIG_CFLAGS@", "libs": "@CONFIG_LDFLAGS@", "version": "@BOUT_VERSION@", - "git": "@GIT_REVISION@", + "git": "@BOUT_REVISION@", "idlpath": "@IDLCONFIGPATH@", "pythonpath": "@PYTHONCONFIGPATH@", "has_netcdf": "@BOUT_HAS_NETCDF@", From 01384cc9e095b5903ca95540c46b0a498c01445a Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Dec 2020 10:13:50 +0000 Subject: [PATCH 323/428] CMake: Set BOUT_PYTHONPATH variable with location of Python modules Includes the generated boutconfig module --- CMakeLists.txt | 5 ++++- tools/pylib/boutconfig/__init__.py.cin | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 545dd4d9ff..cedd51b981 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -676,6 +676,9 @@ if (HAS_PRETTY_FUNCTION) PUBLIC "BOUT_HAS_PRETTY_FUNCTION") endif() +# Locations of the various Python modules, including the generated boutconfig module +set(BOUT_PYTHONPATH "${CMAKE_CURRENT_BINARY_DIR}/tools/pylib:${CMAKE_CURRENT_SOURCE_DIR}/tools/pylib") + # We want to compile the actual flags used into the library so we can # see them at runtime. This needs a few steps: @@ -926,7 +929,7 @@ message(" Make sure that the tools/pylib directory is in your PYTHONPATH e.g. by adding to your ~/.bashrc file - export PYTHONPATH=$PWD/tools/pylib/:\$PYTHONPATH + export PYTHONPATH=${BOUT_PYTHONPATH}:\$PYTHONPATH *** Now run `cmake --build .` to compile BOUT++ *** ") diff --git a/tools/pylib/boutconfig/__init__.py.cin b/tools/pylib/boutconfig/__init__.py.cin index 06093d1f2f..99a29824fc 100644 --- a/tools/pylib/boutconfig/__init__.py.cin +++ b/tools/pylib/boutconfig/__init__.py.cin @@ -15,7 +15,7 @@ config = { "version": "@BOUT_VERSION@", "git": "@BOUT_REVISION@", "idlpath": "@IDLCONFIGPATH@", - "pythonpath": "@PYTHONCONFIGPATH@", + "pythonpath": "@BOUT_PYTHONPATH@", "has_netcdf": "@BOUT_HAS_NETCDF@", "has_pnetcdf": "OFF", "has_hdf5": "@BOUT_HAS_HDF5@", From 120dfc20c95b81b995e5463269ede4e23d713f69 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Dec 2020 10:28:53 +0000 Subject: [PATCH 324/428] Add back CMake versions of boutconfig cflags/lib variables Note that these will contain generator expressions and CMake targets, and therefore won't generally be very useful --- CMakeLists.txt | 5 +++++ tools/pylib/boutconfig/__init__.py.cin | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cedd51b981..f9c0de357d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -678,6 +678,11 @@ endif() # Locations of the various Python modules, including the generated boutconfig module set(BOUT_PYTHONPATH "${CMAKE_CURRENT_BINARY_DIR}/tools/pylib:${CMAKE_CURRENT_SOURCE_DIR}/tools/pylib") +# Variables for boutconfig module -- note that these will contain +# generator expressions and CMake targets, and not generally be very +# useful +get_target_property(BOUT_LIBS bout++ INTERFACE_LINK_LIBRARIES) +get_target_property(BOUT_CFLAGS bout++ INTERFACE_INCLUDE_DIRECTORIES) # We want to compile the actual flags used into the library so we can # see them at runtime. This needs a few steps: diff --git a/tools/pylib/boutconfig/__init__.py.cin b/tools/pylib/boutconfig/__init__.py.cin index 99a29824fc..138e1c6004 100644 --- a/tools/pylib/boutconfig/__init__.py.cin +++ b/tools/pylib/boutconfig/__init__.py.cin @@ -10,8 +10,8 @@ config = { "cxx": "@CMAKE_CXX_COMPILER@", "ld": "@CMAKE_CXX_COMPILER@", "checks": "@BOUT_CHECK_LEVEL@", - "cflags": "", - "libs": "", + "cflags": "@BOUT_CFLAGS@", + "libs": "@BOUT_LIBS@", "version": "@BOUT_VERSION@", "git": "@BOUT_REVISION@", "idlpath": "@IDLCONFIGPATH@", From 8b04deee671832546f7657c1575337223758fb8a Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Dec 2020 10:31:24 +0000 Subject: [PATCH 325/428] CMake: Install boutconfig python module --- CMakeLists.txt | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f9c0de357d..ff24f2a83a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -869,9 +869,6 @@ configure_package_config_file(bout++Config.cmake.in bout++Config.cmake INSTALL_DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/bout++Config.cmake" ) -configure_package_config_file(tools/pylib/boutconfig/__init__.py.cin tools/pylib/boutconfig/__init__.py - INSTALL_DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/tools/pylib/boutconfig/__init__.py" - ) install( FILES "${CMAKE_CURRENT_BINARY_DIR}/bout++Config.cmake" @@ -893,6 +890,15 @@ install( DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/bout++" ) +configure_package_config_file(tools/pylib/boutconfig/__init__.py.cin tools/pylib/boutconfig/__init__.py + INSTALL_DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/tools/pylib/boutconfig/__init__.py" + ) + +install( + FILES "${CMAKE_CURRENT_BINARY_DIR}/tools/pylib/boutconfig/__init__.py" + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/boutconfig" + ) + export(EXPORT bout++Targets FILE "${CMAKE_CURRENT_BINARY_DIR}/bout++Targets.cmake" NAMESPACE bout++:: From 58ade8f640fe5400b60a5eb2f4114dee55ae6f1c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Dec 2020 10:32:27 +0000 Subject: [PATCH 326/428] Autotools: Remove generated boutconfig module with distclean --- make.config.in | 2 ++ 1 file changed, 2 insertions(+) diff --git a/make.config.in b/make.config.in index 78e6d495ae..69bcca940f 100644 --- a/make.config.in +++ b/make.config.in @@ -359,6 +359,8 @@ distclean:: clean clean-tests @echo externalpackages cleaned @touch $(BOUT_TOP)/configure @echo autom4te.cache cleaned + -@$(RM) $(BOUT_TOP)/tools/pylib/boutconfig/__init__.py + @echo generated Python boutconfig module cleaned clean-tests: clean-unit-tests clean-integrated-tests clean-mms-tests From 4a83be92145af396343e8976cd78b32bb67d7e4a Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 10 Dec 2020 14:11:17 +0000 Subject: [PATCH 327/428] Remove some other references to 3D metrics --- CMakeLists.txt | 1 - bout++Config.cmake.in | 1 - 2 files changed, 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ff24f2a83a..914d76a06a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -933,7 +933,6 @@ message(" Field name tracking : ${BOUT_USE_TRACK} Floating point exceptions: ${BOUT_USE_SIGFPE} Backtrace enabled : ${BOUT_USE_BACKTRACE} - 3D Metric enabled : ${BOUT_USE_METRIC_3D} === Python === diff --git a/bout++Config.cmake.in b/bout++Config.cmake.in index 106c2815f3..e9579161f8 100644 --- a/bout++Config.cmake.in +++ b/bout++Config.cmake.in @@ -10,7 +10,6 @@ set(BOUT_USE_BACKTRACE @BOUT_USE_BACKTRACE@) set(BOUT_USE_OPENMP @BOUT_USE_OPENMP@) set(BOUT_HAS_OUTPUT_DEBUG @BOUT_HAS_OUTPUT_DEBUG@) set(BOUT_CHECK_LEVEL @BOUT_CHECK_LEVEL@) -set(BOUT_USE_METRIC_3D @BOUT_USE_METRIC_3D@) set(BOUT_HAS_PVODE @BOUT_HAS_PVODE@) set(BOUT_HAS_NETCDF @BOUT_HAS_NETCDF@) From d6402ffb7f5d3882a4953494d498251ea9b42f63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Fri, 20 Mar 2020 13:19:26 +0000 Subject: [PATCH 328/428] build shared lib if --enable-shared Automatically set the target to the shared object, if we are building the shared object. This removes the need to call `make shared` after configuring with `--enable-shared`. --- .gitignore | 1 + configure | 61 +++++++++++++++++++++++++++++++++++ configure.ac | 36 +++++++++++++++++++++ make.config.in | 32 ++++++++---------- makefile | 40 +++++++---------------- src/{makefile => makefile.in} | 19 ++++++++++- 6 files changed, 141 insertions(+), 48 deletions(-) rename src/{makefile => makefile.in} (72%) diff --git a/.gitignore b/.gitignore index e6a6914573..0b2632b9f0 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ autom4te.cache/ aclocal.m4 /bin/bout-config +/src/makefile /include/pvode /lib/ /manual/*.pdf diff --git a/configure b/configure index 8821c01aa9..fa97379d7d 100755 --- a/configure +++ b/configure @@ -647,6 +647,10 @@ HAS_PVODE CHECK_LEVEL GIT_REVISION BOUT_VERSION +SHARED_EXTRA +STATIC_EXTRA +DEFLIB2 +DEFLIB1 PYTHONCONFIGPATH IDLCONFIGPATH PREFIX @@ -823,6 +827,7 @@ enable_optimize enable_sigfpe enable_backtrace enable_shared +enable_static enable_openmp with_openmp_schedule enable_pvode_openmp @@ -1482,6 +1487,7 @@ Optional Features: --enable-sigfpe Enable FloatingPointExceptions --disable-backtrace Disable function backtrace --enable-shared Enable building bout++ into an shared object + --enable-static Enable building bout++ into an static library --enable-openmp Enable building with OpenMP support --enable-pvode-openmp Enable building PVODE with OpenMP support --disable-openmp do not use OpenMP @@ -2728,6 +2734,13 @@ else enable_shared=no fi +# Check whether --enable-static was given. +if test "${enable_static+set}" = set; then : + enableval=$enable_static; +else + enable_static=auto +fi + # Check whether --enable-openmp was given. if test "${enable_openmp+set}" = set; then : enableval=$enable_openmp; @@ -6343,6 +6356,7 @@ fi # Build into shared object (pic) ############################################################# +DEFLIB2='' if test "x$enable_shared" = "xyes"; then : # compile as position independent code. @@ -6354,6 +6368,50 @@ if test "x$enable_shared" = "xyes"; then : # PowerPC and SPARC. # Therfore use -fPIC for now CXXFLAGS="$CXXFLAGS -fPIC" + # for make.config + DEFLIB1='$(LIB_SO)' + # for src/makefile + DEFLIB2+=' ../lib/libbout++.so' + if test "x$enable_static" = "xauto"; then : + + enable_static=no + +fi + SHARED_EXTRA=':' + if test "x$enable_static" = "xno"; then : + + SHARED_EXTRA='$(RM) -f ../lib/*.a' + +fi + + +else + + if test "x$enable_static" = "xauto"; then : + + enable_static=yes + +fi + +fi + +if test "x$enable_static" = "xyes"; then : + + DEFLIB1='$(LIB_A)' + DEFLIB2+=' ../lib/libbout++.a' + STATIC_EXTRA=':' + # In case we only build static, make sure shared libs are removed + if ! test "x$enable_shared" = "xyes"; then : + + STATIC_EXTRA='$(RM) -f ../lib/*.so*' + +fi + +fi + +if test "x$DEFLIB2" = x ; then : + + as_fn_error $? "Need to enable at least on of static or shared!" "$LINENO" 5 fi @@ -17017,6 +17075,9 @@ BOUT_INCLUDE_PATH=$PWD/include + + + diff --git a/configure.ac b/configure.ac index 25632421be..612f0d16e1 100644 --- a/configure.ac +++ b/configure.ac @@ -89,6 +89,8 @@ AC_ARG_ENABLE(backtrace, [AS_HELP_STRING([--disable-backtrace], [Disable function backtrace])],,[enable_backtrace=maybe]) AC_ARG_ENABLE(shared, [AS_HELP_STRING([--enable-shared], [Enable building bout++ into an shared object])],,[enable_shared=no]) +AC_ARG_ENABLE(static, [AS_HELP_STRING([--enable-static], + [Enable building bout++ into an static library])],,[enable_static=auto]) AC_ARG_ENABLE(openmp, [AS_HELP_STRING([--enable-openmp], [Enable building with OpenMP support])],,[enable_openmp=no]) AC_ARG_WITH(openmp_schedule,[AS_HELP_STRING([--with-openmp-schedule=static/dynamic/guided/auto], @@ -393,6 +395,7 @@ AS_IF([test "x$enable_backtrace" = "xyes" || test "x$enable_backtrace" = "xmaybe # Build into shared object (pic) ############################################################# +DEFLIB2='' AS_IF([test "x$enable_shared" = "xyes"], [ # compile as position independent code. # -fpic is apparently faster then -fPIC, but -fPIC works always. @@ -403,6 +406,35 @@ AS_IF([test "x$enable_shared" = "xyes"], [ # PowerPC and SPARC. # Therfore use -fPIC for now CXXFLAGS="$CXXFLAGS -fPIC" + # for make.config + DEFLIB1='$(LIB_SO)' + # for src/makefile + DEFLIB2+=' ../lib/libbout++.so' + AS_IF([test "x$enable_static" = "xauto"], [ + enable_static=no + ]) + SHARED_EXTRA=':' + AS_IF([test "x$enable_static" = "xno"], [ + SHARED_EXTRA='$(RM) -f ../lib/*.a' + ]) +], [ + AS_IF([test "x$enable_static" = "xauto"], [ + enable_static=yes + ]) +]) + +AS_IF([test "x$enable_static" = "xyes"], [ + DEFLIB1='$(LIB_A)' + DEFLIB2+=' ../lib/libbout++.a' + STATIC_EXTRA=':' + # In case we only build static, make sure shared libs are removed + AS_IF([! test "x$enable_shared" = "xyes"], [ + STATIC_EXTRA='$(RM) -f ../lib/*.so*' + ]) +]) + +AS_IF([test "x$DEFLIB2" = x ], [ + AC_MSG_ERROR([Need to enable at least on of static or shared!]) ]) ############################################################# @@ -1379,6 +1411,10 @@ AC_SUBST(OWN_MPARK) AC_SUBST(PREFIX) AC_SUBST(IDLCONFIGPATH) AC_SUBST(PYTHONCONFIGPATH) +AC_SUBST(DEFLIB1) +AC_SUBST(DEFLIB2) +AC_SUBST(STATIC_EXTRA) +AC_SUBST(SHARED_EXTRA) AC_SUBST(BOUT_VERSION) AC_SUBST(GIT_REVISION) diff --git a/make.config.in b/make.config.in index 69bcca940f..1e2454d725 100644 --- a/make.config.in +++ b/make.config.in @@ -83,12 +83,10 @@ BOUT_VERSION = @PACKAGE_VERSION@ # Files that are needed by configure and should be dependencies for 'all' OBJ = $(SOURCEC:%.cxx=%.o) -ifndef RELEASED -# If this is part of an packaged and installed installation, the user cannot -# write these files, so they shouldn't be changed. Thus only set this, in -# the non installed case. -LIB = $(BOUT_LIB_PATH)/libbout++.a +ifndef RELEASE +LIB_A = $(BOUT_LIB_PATH)/libbout++.a LIB_SO = $(BOUT_LIB_PATH)/libbout++.so +LIB = @DEFLIB1@ endif MPARK_VARIANT_INCLUDE_PATH=@MPARK_VARIANT_INCLUDE_PATH@ @@ -138,7 +136,9 @@ $(DIRS): # first a potential trailing slash is removed, and after that the # directory name is extracted. This allows to e.g. specify a directory # as fuu/bar/ and still get an archive named bar.a - @$(MAKE) -s --no-print-directory MODULE_DIR=$(MODULE_DIR) SUB_NAME=$(shell basename $@) TARGET=sub -C $@ +# The # probably doesn't need to be escaped as everything in the +# recipy is passed to the shell, even lines starting with # + @$(MAKE) -s --no-print-directory MODULE_DIR=$(MODULE_DIR) SUB_NAME=$(shell f=$@ ; g=$${f%/} ; echo $${g##*/}) TARGET=sub -C $@ endif endif @@ -184,14 +184,14 @@ endif $(INSTALL_DATA) tools/idllib/README $(DESTDIR)@datadir@/bout++/idllib/ $(INSTALL_DATA) tools/pylib/boutdata/*.py $(DESTDIR)@datadir@/bout++/pylib/boutdata/ $(INSTALL_DATA) tools/pylib/boututils/*.py $(DESTDIR)@datadir@/bout++/pylib/boututils/ - $(INSTALL_DATA) make.config $(INSTALL_INCLUDE_PATH) + $(INSTALL_DATA) make.config $(DESTDIR)@datadir@/bout++/ for mo in $(MO_FILES); do $(MKDIR) $(DESTDIR)@localedir@/`dirname $$mo`; $(INSTALL_DATA) locale/$$mo $(DESTDIR)@localedir@/$$mo; done $(POST_INSTALL) # Post-install commands follow. @# Modify paths in the bout-config script sed -i "s|^BOUT_INCLUDE_PATH=.*|BOUT_INCLUDE_PATH=@includedir@/bout++|" $(DESTDIR)@bindir@/bout-config sed -i "s|^BOUT_LIB_PATH=.*|BOUT_LIB_PATH=@libdir@|" $(DESTDIR)@bindir@/bout-config - sed -i "s|^BOUT_CONFIG_FILE=.*|BOUT_CONFIG_FILE=@includedir@/bout++/make.config|" $(DESTDIR)@bindir@/bout-config + sed -i "s|^BOUT_CONFIG_FILE=.*|BOUT_CONFIG_FILE=@datadir@/bout++/make.config|" $(DESTDIR)@bindir@/bout-config sed -i "s|^idlpath=.*|idlpath=@datadir@/bout++/idllib/|" $(DESTDIR)@bindir@/bout-config sed -i "s|^pythonpath=.*|pythonpath=@datadir@/bout++/pylib/|" $(DESTDIR)@bindir@/bout-config ifeq ("@OWN_MPARK@", "yes") @@ -213,6 +213,7 @@ uninstall: $(PRE_UNINSTALL) # Pre-uninstall commands follow. $(NORMAL_UNINSTALL) # Normal commands follow. + $(RM) $(DESTDIR)@datadir@/bout++/make.config $(RM) -r $(DESTDIR)@datadir@/bout++/pylib/boututils/ $(RM) -r $(DESTDIR)@datadir@/bout++/pylib/boutdata/ $(RM) -r $(DESTDIR)@datadir@/bout++/idllib/ @@ -249,15 +250,11 @@ endif ifeq ("$(TARGET)", "sub") LIB=$(MODULE_DIR)/$(SUB_NAME).a -sub:$(LIB) - -$(LIB): makefile $(BOUT_CONFIG_FILE) $(BOUT_TOP)/include $(BOUT_TOP)/lib $(OBJ) +sub: makefile $(BOUT_CONFIG_FILE) $(BOUT_TOP)/include $(BOUT_TOP)/lib $(OBJ) ifneq ("$(OBJ)foo", "foo") @echo " Adding $(OBJ) to $(LIB)" - @LIBT=$(LIB).$$$$.a && \ - $(AR) $(ARFLAGS) $${LIBT} $(OBJ) && \ - $(RANLIB) $${LIBT} && \ - mv $${LIBT} $(LIB) + @$(AR) $(ARFLAGS) $(LIB) $(OBJ) + @$(RANLIB) $(LIB) endif endif @@ -286,14 +283,13 @@ DIRS_=$(DIRS:%/=%) # then we extract the directory name, in case it is a longer path # We are not in a recipe, so # needs to be escaped # $$ is an escaped $ -DIRS__=$(shell for d in $(DIRS_) ; do basename $$d;done) +DIRS__=$(shell for f in $(DIRS_) ; do echo $${f\#\#*/};done) # now we can generate a list of libraries SUB_LIBS=$(DIRS__:%=%.a) $(SUB_LIBS):$(DIRS__) $(SOURCEC): checklib $(SOURCEC:%.cxx=%.o): $(LIB) -$(TARGET): | $(DIRS) $(TARGET): makefile $(BOUT_CONFIG_FILE) $(OBJ) $(SUB_LIBS) @echo " Linking" $(TARGET) @$(LD) $(LDFLAGS) -o $(TARGET) $(OBJ) $(SUB_LIBS) $(BOUT_LIBS) @@ -337,7 +333,7 @@ endif clean:: -@$(RM) -rf $(OBJ) $(DEPS) $(TARGET) - @for pp in $(DIRS); do echo " " $$pp cleaned; $(MAKE) --no-print-directory -C $$pp clean; done + @for pp in $(DIRS) $(DIRS_CLEAN); do echo " " cleaning $$pp; $(MAKE) --no-print-directory -C $$pp clean; done @$(RM) -f $(SUB_LIBS) -@$(RM) .*.mk @test -f make.config && ( find src | grep '\.o$$' && echo "WARNING: Some object files remain - which might cause issues. Clean with $(MAKE) clean-remove-object-files" ) || exit 0 diff --git a/makefile b/makefile index 6b7926d240..92a7d60aec 100644 --- a/makefile +++ b/makefile @@ -2,24 +2,14 @@ BOUT_TOP = . DIRS = src +DIRS_CLEAN = tests/integrated tests/unit tests/MMS + TARGET ?= libfast include make.config +# For compatibility ignore shared shared: libfast - @echo "Creating libbout++.so" - @echo $(BOUT_FLAGS) | grep -i pic > /dev/null 2>&1 || (echo "not compiled with PIC support - reconfigure with --enable-shared" ;exit 1) - @#$(CXX) -shared -o $(LIB_SO) $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) -L $(BOUT_TOP)/lib -Wl,--whole-archive -lpvode -lpvpre -Wl,--no-whole-archive - @$(RM) $(BOUT_TOP)/lib/*.so* - @$(CXX) -shared -Wl,-soname,libbout++.so.4.3.1 -o $(LIB_SO).4.3.1 $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) - @$(CXX) -shared -Wl,-soname,libpvode.so.1.0.0 -o $(BOUT_TOP)/lib/libpvode_.so -L $(BOUT_TOP)/lib -Wl,--whole-archive -lpvode -Wl,--no-whole-archive - @$(CXX) -shared -Wl,-soname,libpvpre.so.1.0.0 -o $(BOUT_TOP)/lib/libpvpre_.so -L $(BOUT_TOP)/lib -Wl,--whole-archive -lpvpre -Wl,--no-whole-archive - @mv $(BOUT_TOP)/lib/libpvode_.so $(BOUT_TOP)/lib/libpvode.so.1.0.0 - @mv $(BOUT_TOP)/lib/libpvpre_.so $(BOUT_TOP)/lib/libpvpre.so.1.0.0 - @ln -s libbout++.so.4.3.1 $(LIB_SO) - @ln -s libpvode.so.1.0.0 lib/libpvode.so - @ln -s libpvpre.so.1.0.0 lib/libpvpre.so - ###################################################################### # Tests @@ -30,35 +20,27 @@ check-unit-tests: libfast check-mms-tests: libfast @cd tests/MMS; export LD_LIBRARY_PATH=${PWD}/lib:${LD_LIBRARY_PATH} ; \ - PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} \ - OMPI_MCA_rmaps_base_oversubscribe=yes ./test_suite_make + PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} ./test_suite_make @cd tests/MMS; export LD_LIBRARY_PATH=${PWD}/lib:${LD_LIBRARY_PATH} ; \ - PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} \ - OMPI_MCA_rmaps_base_oversubscribe=yes ./test_suite + PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} ./test_suite check-mms-tests-all: libfast @cd tests/MMS; export LD_LIBRARY_PATH=${PWD}/lib:${LD_LIBRARY_PATH} ; \ - PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} \ - OMPI_MCA_rmaps_base_oversubscribe=yes ./test_suite_make --all + PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} ./test_suite_make --all @cd tests/MMS; export LD_LIBRARY_PATH=${PWD}/lib:${LD_LIBRARY_PATH} ; \ - PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} \ - OMPI_MCA_rmaps_base_oversubscribe=yes ./test_suite --all + PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} ./test_suite --all check-integrated-tests: libfast @cd tests/integrated; export LD_LIBRARY_PATH=${PWD}/lib:${LD_LIBRARY_PATH} ; \ - PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} \ - OMPI_MCA_rmaps_base_oversubscribe=yes ./test_suite_make + PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} ./test_suite_make @cd tests/integrated; export LD_LIBRARY_PATH=${PWD}/lib:${LD_LIBRARY_PATH} ; \ - PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} \ - OMPI_MCA_rmaps_base_oversubscribe=yes ./test_suite + PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} ./test_suite check-integrated-tests-all: libfast @cd tests/integrated; export LD_LIBRARY_PATH=${PWD}/lib:${LD_LIBRARY_PATH} ; \ - PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} \ - OMPI_MCA_rmaps_base_oversubscribe=yes ./test_suite_make --all + PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} ./test_suite_make --all @cd tests/integrated; export LD_LIBRARY_PATH=${PWD}/lib:${LD_LIBRARY_PATH} ; \ - PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} \ - OMPI_MCA_rmaps_base_oversubscribe=yes ./test_suite --all + PYTHONPATH=${PWD}/tools/pylib/:${PYTHONPATH} ./test_suite --all check: check-unit-tests check-integrated-tests check-mms-tests diff --git a/src/makefile b/src/makefile.in similarity index 72% rename from src/makefile rename to src/makefile.in index 68d6fcaa28..de7d09e319 100644 --- a/src/makefile +++ b/src/makefile.in @@ -53,12 +53,29 @@ $(BOUT_TOP)/lib/.last.o.file: .dummy # rebuild is forced. libfast: .libfast -.libfast: bout++.o +.libfast: @DEFLIB2@ + +../lib/libbout++.a: bout++.o @echo "Recreating libbout++.a" @rm -f $(BOUT_TOP)/lib/libbout++.a @$(AR) $(ARFLAGS) $(LIB) $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) @touch .libfast @#$(RANLIB) $(LIB) + @@STATIC_EXTRA@ + +../lib/libbout++.so: bout++.o + @echo "Creating libbout++.so" + @echo $(BOUT_FLAGS) | grep -qi pic || (echo "not compiled with PIC support - reconfigure with --enable-shared" ;exit 1) + @$(RM) $(BOUT_TOP)/lib/*.so* + @$(CXX) -shared -Wl,-soname,libbout++.so.$(BOUT_VERSION) -o $(LIB_SO).$(BOUT_VERSION) $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) + @$(CXX) -shared -Wl,-soname,libpvode.so.1.0.0 -o $(BOUT_TOP)/lib/libpvode_.so -L $(BOUT_TOP)/lib -Wl,--whole-archive -lpvode -Wl,--no-whole-archive + @$(CXX) -shared -Wl,-soname,libpvpre.so.1.0.0 -o $(BOUT_TOP)/lib/libpvpre_.so -L $(BOUT_TOP)/lib -Wl,--whole-archive -lpvpre -Wl,--no-whole-archive + @mv $(BOUT_TOP)/lib/libpvode_.so $(BOUT_TOP)/lib/libpvode.so.1.0.0 + @mv $(BOUT_TOP)/lib/libpvpre_.so $(BOUT_TOP)/lib/libpvpre.so.1.0.0 + @cd ..;ln -s libbout++.so.$(BOUT_VERSION) $(LIB_SO) + @cd ..;ln -s libpvode.so.1.0.0 lib/libpvode.so + @cd ..;ln -s libpvpre.so.1.0.0 lib/libpvpre.so + @@SHARED_EXTRA@ # From the legacy build script: # Then set the last two preqrequisites for 'all' From 294393b538439b22c5d00cc723558d8b9726fb1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Fri, 20 Mar 2020 14:33:43 +0000 Subject: [PATCH 329/428] Use relative path for so file linking --- src/makefile.in | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/makefile.in b/src/makefile.in index de7d09e319..0a2fb39cc2 100644 --- a/src/makefile.in +++ b/src/makefile.in @@ -72,9 +72,9 @@ libfast: .libfast @$(CXX) -shared -Wl,-soname,libpvpre.so.1.0.0 -o $(BOUT_TOP)/lib/libpvpre_.so -L $(BOUT_TOP)/lib -Wl,--whole-archive -lpvpre -Wl,--no-whole-archive @mv $(BOUT_TOP)/lib/libpvode_.so $(BOUT_TOP)/lib/libpvode.so.1.0.0 @mv $(BOUT_TOP)/lib/libpvpre_.so $(BOUT_TOP)/lib/libpvpre.so.1.0.0 - @cd ..;ln -s libbout++.so.$(BOUT_VERSION) $(LIB_SO) - @cd ..;ln -s libpvode.so.1.0.0 lib/libpvode.so - @cd ..;ln -s libpvpre.so.1.0.0 lib/libpvpre.so + @ln -s libbout++.so.$(BOUT_VERSION) $(LIB_SO) + @ln -s libpvode.so.1.0.0 ../lib/libpvode.so + @ln -s libpvpre.so.1.0.0 ../lib/libpvpre.so @@SHARED_EXTRA@ # From the legacy build script: From 3bc72d9d7ebed27fe04851f2551eeedef4e2dd51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Mon, 23 Mar 2020 10:21:50 +0000 Subject: [PATCH 330/428] be verbose in travis script --- .travis_script.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis_script.sh b/.travis_script.sh index e2b331165e..2958d6fb8b 100755 --- a/.travis_script.sh +++ b/.travis_script.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -e +set -ex #Default flags COVERAGE=0 From 680470f42c0b21cabfaef31db645f2c1e8c88331 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Tue, 24 Mar 2020 13:17:10 +0000 Subject: [PATCH 331/428] prefer $(BOUT_LIB_PATH) --- configure.ac | 4 ++-- src/makefile.in | 26 +++++++++++++------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/configure.ac b/configure.ac index 612f0d16e1..78dbe9a407 100644 --- a/configure.ac +++ b/configure.ac @@ -409,13 +409,13 @@ AS_IF([test "x$enable_shared" = "xyes"], [ # for make.config DEFLIB1='$(LIB_SO)' # for src/makefile - DEFLIB2+=' ../lib/libbout++.so' + DEFLIB2+=' $(BOUT_LIB_PATH)/libbout++.so' AS_IF([test "x$enable_static" = "xauto"], [ enable_static=no ]) SHARED_EXTRA=':' AS_IF([test "x$enable_static" = "xno"], [ - SHARED_EXTRA='$(RM) -f ../lib/*.a' + SHARED_EXTRA='$(RM) -f $(BOUT_LIB_PATH)/*.a' ]) ], [ AS_IF([test "x$enable_static" = "xauto"], [ diff --git a/src/makefile.in b/src/makefile.in index 0a2fb39cc2..765d33b279 100644 --- a/src/makefile.in +++ b/src/makefile.in @@ -38,12 +38,12 @@ include $(BOUT_TOP)/make.config # directories are finished and further to make sure we actually know # whether the file needs to be rebuild - which we can only know for # certain if all directories are finished -bout++.o: $(BOUT_TOP)/lib/.last.o.file .dummy +bout++.o: $(BOUT_LIB_PATH)/.last.o.file .dummy # The recipie could be removed, as it only catches the case of # out-of-date make.config # The rest is needed to tell make that .last.o.file will be created -$(BOUT_TOP)/lib/.last.o.file: .dummy +$(BOUT_LIB_PATH)/.last.o.file: .dummy @test -f $@ || \ echo "make.config is out of date - run ./configure" @test -f $@ || \ @@ -55,26 +55,26 @@ libfast: .libfast .libfast: @DEFLIB2@ -../lib/libbout++.a: bout++.o +$(BOUT_LIB_PATH)/libbout++.a: bout++.o @echo "Recreating libbout++.a" - @rm -f $(BOUT_TOP)/lib/libbout++.a + @rm -f $(BOUT_LIB_PATH)/libbout++.a @$(AR) $(ARFLAGS) $(LIB) $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) @touch .libfast @#$(RANLIB) $(LIB) @@STATIC_EXTRA@ -../lib/libbout++.so: bout++.o +$(BOUT_LIB_PATH)/libbout++.so: bout++.o @echo "Creating libbout++.so" @echo $(BOUT_FLAGS) | grep -qi pic || (echo "not compiled with PIC support - reconfigure with --enable-shared" ;exit 1) - @$(RM) $(BOUT_TOP)/lib/*.so* + @$(RM) $(BOUT_LIB_PATH)/*.so* @$(CXX) -shared -Wl,-soname,libbout++.so.$(BOUT_VERSION) -o $(LIB_SO).$(BOUT_VERSION) $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) - @$(CXX) -shared -Wl,-soname,libpvode.so.1.0.0 -o $(BOUT_TOP)/lib/libpvode_.so -L $(BOUT_TOP)/lib -Wl,--whole-archive -lpvode -Wl,--no-whole-archive - @$(CXX) -shared -Wl,-soname,libpvpre.so.1.0.0 -o $(BOUT_TOP)/lib/libpvpre_.so -L $(BOUT_TOP)/lib -Wl,--whole-archive -lpvpre -Wl,--no-whole-archive - @mv $(BOUT_TOP)/lib/libpvode_.so $(BOUT_TOP)/lib/libpvode.so.1.0.0 - @mv $(BOUT_TOP)/lib/libpvpre_.so $(BOUT_TOP)/lib/libpvpre.so.1.0.0 + @$(CXX) -shared -Wl,-soname,libpvode.so.1.0.0 -o $(BOUT_LIB_PATH)/libpvode_.so -L $(BOUT_LIB_PATH) -Wl,--whole-archive -lpvode -Wl,--no-whole-archive + @$(CXX) -shared -Wl,-soname,libpvpre.so.1.0.0 -o $(BOUT_LIB_PATH)/libpvpre_.so -L $(BOUT_LIB_PATH) -Wl,--whole-archive -lpvpre -Wl,--no-whole-archive + @mv $(BOUT_LIB_PATH)/libpvode_.so $(BOUT_LIB_PATH)/libpvode.so.1.0.0 + @mv $(BOUT_LIB_PATH)/libpvpre_.so $(BOUT_LIB_PATH)/libpvpre.so.1.0.0 @ln -s libbout++.so.$(BOUT_VERSION) $(LIB_SO) - @ln -s libpvode.so.1.0.0 ../lib/libpvode.so - @ln -s libpvpre.so.1.0.0 ../lib/libpvpre.so + @ln -s libpvode.so.1.0.0 $(BOUT_LIB_PATH)/libpvode.so + @ln -s libpvpre.so.1.0.0 $(BOUT_LIB_PATH)/libpvpre.so @@SHARED_EXTRA@ # From the legacy build script: @@ -98,7 +98,7 @@ initial_message: @echo "FLAGS = " $(BOUT_FLAGS) @echo "CHECKSUM = " $(CHECKSUM) @echo "INCLUDE = " $(BOUT_INCLUDE) - @rm -f $(BOUT_TOP)/lib/libbout++.a + @rm -f $(BOUT_LIB_PATH)/libbout++.a # From the legacy build script end_message: From e598be5f586f6aa46234d32e46689c4f4710a6d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Fri, 27 Mar 2020 12:47:14 +0000 Subject: [PATCH 332/428] use single var for both files --- configure | 19 +++++++------------ configure.ac | 17 ++++++----------- make.config.in | 2 +- src/makefile.in | 2 +- 4 files changed, 15 insertions(+), 25 deletions(-) diff --git a/configure b/configure index fa97379d7d..59fc7b4607 100755 --- a/configure +++ b/configure @@ -649,8 +649,7 @@ GIT_REVISION BOUT_VERSION SHARED_EXTRA STATIC_EXTRA -DEFLIB2 -DEFLIB1 +LIB_TO_BUILD PYTHONCONFIGPATH IDLCONFIGPATH PREFIX @@ -6356,7 +6355,7 @@ fi # Build into shared object (pic) ############################################################# -DEFLIB2='' +LIB_TO_BUILD='' if test "x$enable_shared" = "xyes"; then : # compile as position independent code. @@ -6368,10 +6367,8 @@ if test "x$enable_shared" = "xyes"; then : # PowerPC and SPARC. # Therfore use -fPIC for now CXXFLAGS="$CXXFLAGS -fPIC" - # for make.config - DEFLIB1='$(LIB_SO)' # for src/makefile - DEFLIB2+=' ../lib/libbout++.so' + LIB_TO_BUILD+=' $(BOUT_LIB_PATH)/libbout++.so' if test "x$enable_static" = "xauto"; then : enable_static=no @@ -6380,11 +6377,10 @@ fi SHARED_EXTRA=':' if test "x$enable_static" = "xno"; then : - SHARED_EXTRA='$(RM) -f ../lib/*.a' + SHARED_EXTRA='$(RM) -f $(BOUT_LIB_PATH)/*.a' fi - else if test "x$enable_static" = "xauto"; then : @@ -6397,19 +6393,18 @@ fi if test "x$enable_static" = "xyes"; then : - DEFLIB1='$(LIB_A)' - DEFLIB2+=' ../lib/libbout++.a' + LIB_TO_BUILD+=' $(BOUT_LIB_PATH)/libbout++.a' STATIC_EXTRA=':' # In case we only build static, make sure shared libs are removed if ! test "x$enable_shared" = "xyes"; then : - STATIC_EXTRA='$(RM) -f ../lib/*.so*' + STATIC_EXTRA='$(RM) -f $(BOUT_LIB_PATH)/*.so*' fi fi -if test "x$DEFLIB2" = x ; then : +if test "x$LIB_TO_BUILD" = x ; then : as_fn_error $? "Need to enable at least on of static or shared!" "$LINENO" 5 diff --git a/configure.ac b/configure.ac index 78dbe9a407..e78398bc5c 100644 --- a/configure.ac +++ b/configure.ac @@ -395,7 +395,7 @@ AS_IF([test "x$enable_backtrace" = "xyes" || test "x$enable_backtrace" = "xmaybe # Build into shared object (pic) ############################################################# -DEFLIB2='' +LIB_TO_BUILD='' AS_IF([test "x$enable_shared" = "xyes"], [ # compile as position independent code. # -fpic is apparently faster then -fPIC, but -fPIC works always. @@ -406,10 +406,7 @@ AS_IF([test "x$enable_shared" = "xyes"], [ # PowerPC and SPARC. # Therfore use -fPIC for now CXXFLAGS="$CXXFLAGS -fPIC" - # for make.config - DEFLIB1='$(LIB_SO)' - # for src/makefile - DEFLIB2+=' $(BOUT_LIB_PATH)/libbout++.so' + LIB_TO_BUILD+=' $(BOUT_LIB_PATH)/libbout++.so' AS_IF([test "x$enable_static" = "xauto"], [ enable_static=no ]) @@ -424,16 +421,15 @@ AS_IF([test "x$enable_shared" = "xyes"], [ ]) AS_IF([test "x$enable_static" = "xyes"], [ - DEFLIB1='$(LIB_A)' - DEFLIB2+=' ../lib/libbout++.a' + LIB_TO_BUILD+=' $(BOUT_LIB_PATH)/libbout++.a' STATIC_EXTRA=':' # In case we only build static, make sure shared libs are removed AS_IF([! test "x$enable_shared" = "xyes"], [ - STATIC_EXTRA='$(RM) -f ../lib/*.so*' + STATIC_EXTRA='$(RM) -f $(BOUT_LIB_PATH)/*.so*' ]) ]) -AS_IF([test "x$DEFLIB2" = x ], [ +AS_IF([test "x$LIB_TO_BUILD" = x ], [ AC_MSG_ERROR([Need to enable at least on of static or shared!]) ]) @@ -1411,8 +1407,7 @@ AC_SUBST(OWN_MPARK) AC_SUBST(PREFIX) AC_SUBST(IDLCONFIGPATH) AC_SUBST(PYTHONCONFIGPATH) -AC_SUBST(DEFLIB1) -AC_SUBST(DEFLIB2) +AC_SUBST(LIB_TO_BUILD) AC_SUBST(STATIC_EXTRA) AC_SUBST(SHARED_EXTRA) diff --git a/make.config.in b/make.config.in index 1e2454d725..88e232ef96 100644 --- a/make.config.in +++ b/make.config.in @@ -86,7 +86,7 @@ OBJ = $(SOURCEC:%.cxx=%.o) ifndef RELEASE LIB_A = $(BOUT_LIB_PATH)/libbout++.a LIB_SO = $(BOUT_LIB_PATH)/libbout++.so -LIB = @DEFLIB1@ +LIB = @LIB_TO_BUILD@ endif MPARK_VARIANT_INCLUDE_PATH=@MPARK_VARIANT_INCLUDE_PATH@ diff --git a/src/makefile.in b/src/makefile.in index 765d33b279..64f57f3986 100644 --- a/src/makefile.in +++ b/src/makefile.in @@ -53,7 +53,7 @@ $(BOUT_LIB_PATH)/.last.o.file: .dummy # rebuild is forced. libfast: .libfast -.libfast: @DEFLIB2@ +.libfast: @LIB_TO_BUILD@ $(BOUT_LIB_PATH)/libbout++.a: bout++.o @echo "Recreating libbout++.a" From 0b6bb63758b503172fac2a9f57ca8c25cc4efa18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Wed, 1 Apr 2020 18:15:40 +0100 Subject: [PATCH 333/428] Only remove libbout++.a --- configure | 3 +-- configure.ac | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/configure b/configure index 59fc7b4607..b1c5e0b857 100755 --- a/configure +++ b/configure @@ -6367,7 +6367,6 @@ if test "x$enable_shared" = "xyes"; then : # PowerPC and SPARC. # Therfore use -fPIC for now CXXFLAGS="$CXXFLAGS -fPIC" - # for src/makefile LIB_TO_BUILD+=' $(BOUT_LIB_PATH)/libbout++.so' if test "x$enable_static" = "xauto"; then : @@ -6377,7 +6376,7 @@ fi SHARED_EXTRA=':' if test "x$enable_static" = "xno"; then : - SHARED_EXTRA='$(RM) -f $(BOUT_LIB_PATH)/*.a' + SHARED_EXTRA='$(RM) -f $(BOUT_LIB_PATH)/libbout++.a' fi diff --git a/configure.ac b/configure.ac index e78398bc5c..4e34e969c3 100644 --- a/configure.ac +++ b/configure.ac @@ -412,7 +412,7 @@ AS_IF([test "x$enable_shared" = "xyes"], [ ]) SHARED_EXTRA=':' AS_IF([test "x$enable_static" = "xno"], [ - SHARED_EXTRA='$(RM) -f $(BOUT_LIB_PATH)/*.a' + SHARED_EXTRA='$(RM) -f $(BOUT_LIB_PATH)/libbout++.a' ]) ], [ AS_IF([test "x$enable_static" = "xauto"], [ From 6e0cb5d2bb686e65cd1f2e09535e513d3be4a404 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 29 Jul 2021 13:15:24 +0100 Subject: [PATCH 334/428] Use CXX to compile instead of MPICXX MPICXX sets the MPI compiler, but this is used to set CXX. We should use CXX because some of the macros add flags directly to CXX --- configure | 10 +++++----- configure.ac | 10 +++++----- make.config.in | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/configure b/configure index b1c5e0b857..225f6428a7 100755 --- a/configure +++ b/configure @@ -12545,7 +12545,7 @@ else # Set a compile-time flag CXXFLAGS="$CXXFLAGS -DBOUT_HAS_SCOREP" - MPICXX="$SCOREPPATH --user --nocompiler $MPICXX" + CXX="$SCOREPPATH --user --nocompiler $CXX" HAS_SCOREP="yes" { $as_echo "$as_me:${as_lineno-$LINENO}: Scorep support enabled" >&5 $as_echo "$as_me: Scorep support enabled" >&6;} @@ -12940,16 +12940,16 @@ $as_echo "$as_me: PVODE being built without OpenMP support" >&6;} fi # Clean PVODE - CXX="$MPICXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE clean -C externalpackages/PVODE/precon/ >> config-build.log 2>&1 - CXX="$MPICXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE clean -C externalpackages/PVODE/source/ >> config-build.log 2>&1 + CXX="$CXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE clean -C externalpackages/PVODE/precon/ >> config-build.log 2>&1 + CXX="$CXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE clean -C externalpackages/PVODE/source/ >> config-build.log 2>&1 { $as_echo "$as_me:${as_lineno-$LINENO}: Building PVODE" >&5 $as_echo "$as_me: Building PVODE" >&6;} echo "* Building PVODE" >> config-build.log echo "*************************************************************" >> config-build.log - CXX="$MPICXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE -C externalpackages/PVODE/precon/ >> config-build.log 2>&1 - CXX="$MPICXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE -C externalpackages/PVODE/source/ >> config-build.log 2>&1 + CXX="$CXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE -C externalpackages/PVODE/precon/ >> config-build.log 2>&1 + CXX="$CXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE -C externalpackages/PVODE/source/ >> config-build.log 2>&1 if test -f externalpackages/PVODE/lib/libpvode.a && test -f externalpackages/PVODE/lib/libpvpre.a; then : diff --git a/configure.ac b/configure.ac index 4e34e969c3..0628181656 100644 --- a/configure.ac +++ b/configure.ac @@ -1163,7 +1163,7 @@ Please supply the path using --with-scorep=/path/to/scorep]) ],[ # Set a compile-time flag CXXFLAGS="$CXXFLAGS -DBOUT_HAS_SCOREP" - MPICXX="$SCOREPPATH --user --nocompiler $MPICXX" + CXX="$SCOREPPATH --user --nocompiler $CXX" HAS_SCOREP="yes" AC_MSG_NOTICE([Scorep support enabled]) ]) @@ -1253,15 +1253,15 @@ AS_IF([test "$with_pvode" != "no"], [ AC_MSG_NOTICE([PVODE being built without OpenMP support]) ]) # Clean PVODE - CXX="$MPICXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE clean -C externalpackages/PVODE/precon/ >> config-build.log 2>&1 - CXX="$MPICXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE clean -C externalpackages/PVODE/source/ >> config-build.log 2>&1 + CXX="$CXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE clean -C externalpackages/PVODE/precon/ >> config-build.log 2>&1 + CXX="$CXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE clean -C externalpackages/PVODE/source/ >> config-build.log 2>&1 AC_MSG_NOTICE([Building PVODE]) echo "* Building PVODE" >> config-build.log echo "*************************************************************" >> config-build.log - CXX="$MPICXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE -C externalpackages/PVODE/precon/ >> config-build.log 2>&1 - CXX="$MPICXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE -C externalpackages/PVODE/source/ >> config-build.log 2>&1 + CXX="$CXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE -C externalpackages/PVODE/precon/ >> config-build.log 2>&1 + CXX="$CXX" CXXFLAGS=$PVODE_FLAGS MKDIR="$MKDIR_P" RANLIB="$RANLIB" $MAKE -C externalpackages/PVODE/source/ >> config-build.log 2>&1 AS_IF([test -f externalpackages/PVODE/lib/libpvode.a && test -f externalpackages/PVODE/lib/libpvpre.a], [ AC_MSG_NOTICE([Successfully built PVODE]) diff --git a/make.config.in b/make.config.in index 88e232ef96..5fb15790b4 100644 --- a/make.config.in +++ b/make.config.in @@ -46,7 +46,7 @@ BOUT_FLAGS := $(BOUT_FLAGS) -DBOUT_FLAGS_STRING="$(BOUT_FLAGS)" # Specify the MPI C++ compiler in CXX -CXX = @MPICXX@ +CXX = @CXX@ CC = $(CXX) AR = ar From 5eaf3725663debffc50453c216e052a1577ce93d Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 1 Nov 2019 12:34:24 +0000 Subject: [PATCH 335/428] Use @CXX@ instead of @MPICXX@ in bout-config.in --- bin/bout-config.in | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/bout-config.in b/bin/bout-config.in index 9ebbf87c5f..80ae42c467 100755 --- a/bin/bout-config.in +++ b/bin/bout-config.in @@ -15,9 +15,9 @@ BOUT_INCLUDE_PATH=@BOUT_INCLUDE_PATH@ MPARK_VARIANT_INCLUDE_PATH=@MPARK_VARIANT_INCLUDE_PATH@ BOUT_CONFIG_FILE=@PREFIX@/make.config -cc="@MPICXX@" -cxx="@MPICXX@" -ld="@MPICXX@" +cc="@CXX@" +cxx="@CXX@" +ld="@CXX@" checks="@CHECK_LEVEL@" cflags="@CONFIG_CFLAGS@" libs="@CONFIG_LDFLAGS@" From f987a263470ff69b1e50c220e867be63c35fdb08 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 29 Jul 2021 13:21:06 +0100 Subject: [PATCH 336/428] CMake: Fix for v4.4 without build_defines header --- CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 914d76a06a..b49b5febef 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -805,6 +805,9 @@ else() set(CONFIG_LDFLAGS "${CONFIG_LDFLAGS}") endif() +# For v4.4 without build_defines.hxx +set(CONFIG_CFLAGS "${CONFIG_CFLAGS} ${BOUT_COMPILE_DEFINITIONS}") + # This version of the file allows the build directory to be used directly configure_file(bin/bout-config.in bin/bout-config @ONLY) From a7403b5031ad459d6dbcc8842df791b586feab06 Mon Sep 17 00:00:00 2001 From: David Date: Sun, 27 Sep 2020 17:07:57 +0200 Subject: [PATCH 337/428] Fix comparison is compares whether the strings are identically, not whether they contain the same data --- src/field/gen_fieldops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/field/gen_fieldops.py b/src/field/gen_fieldops.py index 3a5bd97a93..ea9329ea01 100755 --- a/src/field/gen_fieldops.py +++ b/src/field/gen_fieldops.py @@ -47,7 +47,7 @@ def smart_open(filename, mode='r'): try: yield fh finally: - if filename is not '-': + if filename != '-': fh.close() From 9472a62d05e3d7e44ba357f753f882ec2a9e561d Mon Sep 17 00:00:00 2001 From: David Date: Sun, 27 Sep 2020 17:08:16 +0200 Subject: [PATCH 338/428] Add CMake code to generate generated_fieldops --- CMakeLists.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index b49b5febef..b61408b2d0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -303,6 +303,16 @@ set(BOUT_SOURCES ./src/sys/utils.cxx ) + +ADD_CUSTOM_COMMAND( OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/src/field/generated_fieldops.cxx + COMMAND python gen_fieldops.py --filename generated_fieldops.cxx.tmp + COMMAND clang-format generated_fieldops.cxx.tmp -i + COMMAND mv generated_fieldops.cxx.tmp generated_fieldops.cxx + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/field/gen_fieldops.jinja ${CMAKE_CURRENT_SOURCE_DIR}/src/field/gen_fieldops.py + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/src/field/ + COMMENT "Generating source code" ) + + add_library(bout++ ${BOUT_SOURCES} ) From 938d142f00740fe72b32ed5ab6c60c831204ef91 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 21 Oct 2020 11:35:15 +0100 Subject: [PATCH 339/428] CMake: Add module to find clang-format --- cmake/FindClangFormat.cmake | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 cmake/FindClangFormat.cmake diff --git a/cmake/FindClangFormat.cmake b/cmake/FindClangFormat.cmake new file mode 100644 index 0000000000..c940002c3b --- /dev/null +++ b/cmake/FindClangFormat.cmake @@ -0,0 +1,29 @@ +# Find Clang format +# +# Taken from https://github.com/ttroy50/cmake-examples commit 64bd54a +# This file is under MIT Licence + +if (NOT ClangFormat_BIN_NAME) + set(ClangFormat_BIN_NAME clang-format) +endif() + +# if custom path check there first +if (ClangFormat_ROOT_DIR) + find_program(ClangFormat_BIN + NAMES + ${ClangFormat_BIN_NAME} + PATHS + "${ClangFormat_ROOT_DIR}" + NO_DEFAULT_PATH) +endif() + +find_program(ClangFormat_BIN NAMES ${ClangFormat_BIN_NAME}) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args( + ClangFormat + DEFAULT_MSG + ClangFormat_BIN) + +mark_as_advanced( + ClangFormat_BIN) From 11aca91c1dc35b33c13a8a3c56aa82de96746613 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 21 Oct 2020 11:37:53 +0100 Subject: [PATCH 340/428] CMake: Make generated_fieldops.cxx command more portable Find both Python and clang-format executables, rather than hard-coding them, and skip generating this file if either is not available --- CMakeLists.txt | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b61408b2d0..a236a55ef1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -303,15 +303,23 @@ set(BOUT_SOURCES ./src/sys/utils.cxx ) - -ADD_CUSTOM_COMMAND( OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/src/field/generated_fieldops.cxx - COMMAND python gen_fieldops.py --filename generated_fieldops.cxx.tmp - COMMAND clang-format generated_fieldops.cxx.tmp -i - COMMAND mv generated_fieldops.cxx.tmp generated_fieldops.cxx - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/field/gen_fieldops.jinja ${CMAKE_CURRENT_SOURCE_DIR}/src/field/gen_fieldops.py - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/src/field/ - COMMENT "Generating source code" ) - +find_package(Python) +find_package(ClangFormat) +if (Python_FOUND AND ClangFormat_FOUND) + add_custom_command( OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/src/field/generated_fieldops.cxx + COMMAND ${Python_EXECUTABLE} gen_fieldops.py --filename generated_fieldops.cxx.tmp + COMMAND ${ClangFormat_BIN} generated_fieldops.cxx.tmp -i + COMMAND ${CMAKE_COMMAND} -E rename generated_fieldops.cxx.tmp generated_fieldops.cxx + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/field/gen_fieldops.jinja ${CMAKE_CURRENT_SOURCE_DIR}/src/field/gen_fieldops.py + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/src/field/ + COMMENT "Generating source code" ) +else() + message(AUTHOR_WARNING "'src/field/generated_fieldops.cxx' will not be \ +regenerated when you make changes to either \ +'src/field/gen_fieldops.py' or 'src/field/gen_fieldops.jinja'. \ +This is because either Python or clang-format is missing \ +(see above messages for more information)") +endif() add_library(bout++ ${BOUT_SOURCES} From fe2f099a85549523d6693ff9098376c2105230f0 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 21 Oct 2020 11:52:16 +0100 Subject: [PATCH 341/428] CMake: Add option to skip generating fieldops --- CMakeLists.txt | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a236a55ef1..35b173f41f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -303,9 +303,13 @@ set(BOUT_SOURCES ./src/sys/utils.cxx ) +option(BOUT_GENERATE_FIELDOPS "Automatically re-generate the Field arithmetic operators from the Python templates. \ +Requires Python, clang-format, and Jinja2. Turn this OFF to skip generating them if, for example, \ +you are unable to install the Jinja2 Python module. This is only important for BOUT++ developers." ON) + find_package(Python) find_package(ClangFormat) -if (Python_FOUND AND ClangFormat_FOUND) +if (BOUT_GENERATE_FIELDOPS AND Python_FOUND AND ClangFormat_FOUND) add_custom_command( OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/src/field/generated_fieldops.cxx COMMAND ${Python_EXECUTABLE} gen_fieldops.py --filename generated_fieldops.cxx.tmp COMMAND ${ClangFormat_BIN} generated_fieldops.cxx.tmp -i @@ -318,7 +322,10 @@ else() regenerated when you make changes to either \ 'src/field/gen_fieldops.py' or 'src/field/gen_fieldops.jinja'. \ This is because either Python or clang-format is missing \ -(see above messages for more information)") +(see above messages for more information) \ +or BOUT_GENERATE_FIELDOPS is OFF (current value: ${BOUT_GENERATE_FIELDOPS}). \ +This warning is only important for BOUT++ developers and can otherwise be \ +safely ignored.") endif() add_library(bout++ From ddbe218069ab8d024421ba9e8bc660d7e65e90ac Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 29 Jul 2021 16:21:35 +0100 Subject: [PATCH 342/428] Autotools: Bump soversion --- makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/makefile b/makefile index 529de3444b..59fb5ba21b 100644 --- a/makefile +++ b/makefile @@ -16,7 +16,7 @@ shared: libfast @$(CXX) -shared -Wl,-soname,libpvpre.so.1.0.0 -o $(BOUT_TOP)/lib/libpvpre_.so -L $(BOUT_TOP)/lib -Wl,--whole-archive -lpvpre -Wl,--no-whole-archive @mv $(BOUT_TOP)/lib/libpvode_.so $(BOUT_TOP)/lib/libpvode.so.1.0.0 @mv $(BOUT_TOP)/lib/libpvpre_.so $(BOUT_TOP)/lib/libpvpre.so.1.0.0 - @ln -s libbout++.so.4.3.3 $(LIB_SO) + @ln -s libbout++.so.4.4.0 $(LIB_SO) @ln -s libpvode.so.1.0.0 lib/libpvode.so @ln -s libpvpre.so.1.0.0 lib/libpvpre.so From e029b767cb168eed22470769b04ea2cb4ea35bfa Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 29 Jul 2021 16:28:41 +0100 Subject: [PATCH 343/428] Update changelog --- CHANGELOG.md | 92 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 57 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index be3280af44..1e85ac9723 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,41 +23,63 @@ **Merged pull requests:** -- Add new contributors [\#2386](https://github.com/boutproject/BOUT-dev/pulls/2386) ([dschwoerer](https://github.com/users/dschwoerer)) -- Update locale [\#2385](https://github.com/boutproject/BOUT-dev/pulls/2385) ([dschwoerer](https://github.com/users/dschwoerer)) -- Fix RTD [\#2384](https://github.com/boutproject/BOUT-dev/pulls/2384) ([dschwoerer](https://github.com/users/dschwoerer)) -- Rename `max_nonlinear_it` to `max_nonlinear_iterations` [\#2339](https://github.com/boutproject/BOUT-dev/pulls/2339) ([johnomotani](https://github.com/users/johnomotani)) -- CVODE constraints and max_noinlinear_iterations options (4.4) [\#2304](https://github.com/boutproject/BOUT-dev/pulls/2304) ([johnomotani](https://github.com/users/johnomotani)) -- Fix some Solvers not always using user preconditioner/Jacobian (v4.4) [\#2284](https://github.com/boutproject/BOUT-dev/pulls/2284) ([ZedThree](https://github.com/users/ZedThree)) -- Fix formatting strings for pre-fmt in beuler solver [\#2278](https://github.com/boutproject/BOUT-dev/pulls/2278) ([bendudson](https://github.com/users/bendudson)) -- Backport of Backward Euler solver to v4.4 [\#2265](https://github.com/boutproject/BOUT-dev/pulls/2265) ([bendudson](https://github.com/users/bendudson)) -- Fix use of uninitialised value in Delp2 (4.4) [\#2263](https://github.com/boutproject/BOUT-dev/pulls/2263) ([johnomotani](https://github.com/users/johnomotani)) -- Save provenance tracking info from grid file (4.4) [\#2231](https://github.com/boutproject/BOUT-dev/pulls/2231) ([johnomotani](https://github.com/users/johnomotani)) -- Generate random run ID, track restarts (4.4) [\#2224](https://github.com/boutproject/BOUT-dev/pulls/2224) ([johnomotani](https://github.com/users/johnomotani)) -- Generate report for Timers (4.4) [\#2216](https://github.com/boutproject/BOUT-dev/pulls/2216) ([johnomotani](https://github.com/users/johnomotani)) -- Replace boutdata and boututils directories with submodules (v4.4) [\#2198](https://github.com/boutproject/BOUT-dev/pulls/2198) ([johnomotani](https://github.com/users/johnomotani)) -- Use bout_type="string" for strings in H5Format (4.4) [\#2194](https://github.com/boutproject/BOUT-dev/pulls/2194) ([johnomotani](https://github.com/users/johnomotani)) -- Write descriptions for std::vector and std::string variables [\#2191](https://github.com/boutproject/BOUT-dev/pulls/2191) ([johnomotani](https://github.com/users/johnomotani)) -- Fix reading of char* in Ncxx4 (4.4) [\#2189](https://github.com/boutproject/BOUT-dev/pulls/2189) ([johnomotani](https://github.com/users/johnomotani)) -- Merge master into v4.4.0-alpha [\#2174](https://github.com/boutproject/BOUT-dev/pulls/2174) ([ZedThree](https://github.com/users/ZedThree)) -- I/O for std::vector and std::string (4.4) [\#2155](https://github.com/boutproject/BOUT-dev/pulls/2155) ([johnomotani](https://github.com/users/johnomotani)) -- Check DataFile grid sizes match those in existing mesh (v4.4) [\#2148](https://github.com/boutproject/BOUT-dev/pulls/2148) ([johnomotani](https://github.com/users/johnomotani)) -- Call checkData() before returning result in Laplace inversions (v4.4) [\#2134](https://github.com/boutproject/BOUT-dev/pulls/2134) ([johnomotani](https://github.com/users/johnomotani)) -- Allow setting FFTW_EXHAUSTIVE (v4.4) [\#2132](https://github.com/boutproject/BOUT-dev/pulls/2132) ([johnomotani](https://github.com/users/johnomotani)) -- Make example relocatable [\#2127](https://github.com/boutproject/BOUT-dev/pulls/2127) ([dschwoerer](https://github.com/users/dschwoerer)) -- Merge master into v4.4.0-alpha [\#2121](https://github.com/boutproject/BOUT-dev/pulls/2121) ([ZedThree](https://github.com/users/ZedThree)) -- Handle FieldPerps in Datafile::varAdded() and Datafile::varPtr() (v4.4.0) [\#2094](https://github.com/boutproject/BOUT-dev/pulls/2094) ([johnomotani](https://github.com/users/johnomotani)) -- Staggered grids in InvertPar (v4.4.0) [\#2088](https://github.com/boutproject/BOUT-dev/pulls/2088) ([johnomotani](https://github.com/users/johnomotani)) -- Allow descriptions of output variables; save some diagnostics for solvers (v4.4) [\#2086](https://github.com/boutproject/BOUT-dev/pulls/2086) ([johnomotani](https://github.com/users/johnomotani)) -- Correct Grad2_par2 implementation in InvertParCR (v4.4.0) [\#2077](https://github.com/boutproject/BOUT-dev/pulls/2077) ([johnomotani](https://github.com/users/johnomotani)) -- Enable staggered versions of SplitFluxDerivativeType (4.4) [\#2059](https://github.com/boutproject/BOUT-dev/pulls/2059) ([johnomotani](https://github.com/users/johnomotani)) -- Merge master into v4.4.0-alpha [\#1998](https://github.com/boutproject/BOUT-dev/pulls/1998) ([ZedThree](https://github.com/users/ZedThree)) -- LaplaceXY: finite difference option (v4.4) [\#1924](https://github.com/boutproject/BOUT-dev/pulls/1924) ([johnomotani](https://github.com/users/johnomotani)) -- Backport of Laplace performance test [\#1910](https://github.com/boutproject/BOUT-dev/pulls/1910) ([JosephThomasParker](https://github.com/users/JosephThomasParker)) -- Macro for creating enum classes (v4.4) [\#1895](https://github.com/boutproject/BOUT-dev/pulls/1895) ([johnomotani](https://github.com/users/johnomotani)) -- Implement toFieldAligned and fromFieldAligned for Vector3D (v4.4) [\#1878](https://github.com/boutproject/BOUT-dev/pulls/1878) ([johnomotani](https://github.com/users/johnomotani)) -- Remove 3-element list indexers for collect() [\#1862](https://github.com/boutproject/BOUT-dev/pulls/1862) ([johnomotani](https://github.com/users/johnomotani)) -- Allow user to override library option defaults (v4.4) [\#1849](https://github.com/boutproject/BOUT-dev/pulls/1849) ([johnomotani](https://github.com/users/johnomotani)) +- Add new contributors [\#2386](https://github.com/boutproject/BOUT-dev/pull/2386) ([dschwoerer](https://github.com/users/dschwoerer)) +- Update locale [\#2385](https://github.com/boutproject/BOUT-dev/pull/2385) ([dschwoerer](https://github.com/users/dschwoerer)) +- Fix RTD [\#2384](https://github.com/boutproject/BOUT-dev/pull/2384) ([dschwoerer](https://github.com/users/dschwoerer)) +- Rename `max_nonlinear_it` to `max_nonlinear_iterations` [\#2339](https://github.com/boutproject/BOUT-dev/pull/2339) ([johnomotani](https://github.com/users/johnomotani)) +- CVODE constraints and max_noinlinear_iterations options (4.4) [\#2304](https://github.com/boutproject/BOUT-dev/pull/2304) ([johnomotani](https://github.com/users/johnomotani)) +- Fix some Solvers not always using user preconditioner/Jacobian (v4.4) [\#2284](https://github.com/boutproject/BOUT-dev/pull/2284) ([ZedThree](https://github.com/users/ZedThree)) +- Fix formatting strings for pre-fmt in beuler solver [\#2278](https://github.com/boutproject/BOUT-dev/pull/2278) ([bendudson](https://github.com/users/bendudson)) +- Backport of Backward Euler solver to v4.4 [\#2265](https://github.com/boutproject/BOUT-dev/pull/2265) ([bendudson](https://github.com/users/bendudson)) +- Fix use of uninitialised value in Delp2 (4.4) [\#2263](https://github.com/boutproject/BOUT-dev/pull/2263) ([johnomotani](https://github.com/users/johnomotani)) +- Save provenance tracking info from grid file (4.4) [\#2231](https://github.com/boutproject/BOUT-dev/pull/2231) ([johnomotani](https://github.com/users/johnomotani)) +- Generate random run ID, track restarts (4.4) [\#2224](https://github.com/boutproject/BOUT-dev/pull/2224) ([johnomotani](https://github.com/users/johnomotani)) +- Generate report for Timers (4.4) [\#2216](https://github.com/boutproject/BOUT-dev/pull/2216) ([johnomotani](https://github.com/users/johnomotani)) +- Replace boutdata and boututils directories with submodules (v4.4) [\#2198](https://github.com/boutproject/BOUT-dev/pull/2198) ([johnomotani](https://github.com/users/johnomotani)) +- Use bout_type="string" for strings in H5Format (4.4) [\#2194](https://github.com/boutproject/BOUT-dev/pull/2194) ([johnomotani](https://github.com/users/johnomotani)) +- Write descriptions for std::vector and std::string variables [\#2191](https://github.com/boutproject/BOUT-dev/pull/2191) ([johnomotani](https://github.com/users/johnomotani)) +- Fix reading of char* in Ncxx4 (4.4) [\#2189](https://github.com/boutproject/BOUT-dev/pull/2189) ([johnomotani](https://github.com/users/johnomotani)) +- Merge master into v4.4.0-alpha [\#2174](https://github.com/boutproject/BOUT-dev/pull/2174) ([ZedThree](https://github.com/users/ZedThree)) +- I/O for std::vector and std::string (4.4) [\#2155](https://github.com/boutproject/BOUT-dev/pull/2155) ([johnomotani](https://github.com/users/johnomotani)) +- Check DataFile grid sizes match those in existing mesh (v4.4) [\#2148](https://github.com/boutproject/BOUT-dev/pull/2148) ([johnomotani](https://github.com/users/johnomotani)) +- Call checkData() before returning result in Laplace inversions (v4.4) [\#2134](https://github.com/boutproject/BOUT-dev/pull/2134) ([johnomotani](https://github.com/users/johnomotani)) +- Allow setting FFTW_EXHAUSTIVE (v4.4) [\#2132](https://github.com/boutproject/BOUT-dev/pull/2132) ([johnomotani](https://github.com/users/johnomotani)) +- Make example relocatable [\#2127](https://github.com/boutproject/BOUT-dev/pull/2127) ([dschwoerer](https://github.com/users/dschwoerer)) +- Merge master into v4.4.0-alpha [\#2121](https://github.com/boutproject/BOUT-dev/pull/2121) ([ZedThree](https://github.com/users/ZedThree)) +- Handle FieldPerps in Datafile::varAdded() and Datafile::varPtr() (v4.4.0) [\#2094](https://github.com/boutproject/BOUT-dev/pull/2094) ([johnomotani](https://github.com/users/johnomotani)) +- Staggered grids in InvertPar (v4.4.0) [\#2088](https://github.com/boutproject/BOUT-dev/pull/2088) ([johnomotani](https://github.com/users/johnomotani)) +- Allow descriptions of output variables; save some diagnostics for solvers (v4.4) [\#2086](https://github.com/boutproject/BOUT-dev/pull/2086) ([johnomotani](https://github.com/users/johnomotani)) +- Correct Grad2_par2 implementation in InvertParCR (v4.4.0) [\#2077](https://github.com/boutproject/BOUT-dev/pull/2077) ([johnomotani](https://github.com/users/johnomotani)) +- Enable staggered versions of SplitFluxDerivativeType (4.4) [\#2059](https://github.com/boutproject/BOUT-dev/pull/2059) ([johnomotani](https://github.com/users/johnomotani)) +- Merge master into v4.4.0-alpha [\#1998](https://github.com/boutproject/BOUT-dev/pull/1998) ([ZedThree](https://github.com/users/ZedThree)) +- LaplaceXY: finite difference option (v4.4) [\#1924](https://github.com/boutproject/BOUT-dev/pull/1924) ([johnomotani](https://github.com/users/johnomotani)) +- Backport of Laplace performance test [\#1910](https://github.com/boutproject/BOUT-dev/pull/1910) ([JosephThomasParker](https://github.com/users/JosephThomasParker)) +- Macro for creating enum classes (v4.4) [\#1895](https://github.com/boutproject/BOUT-dev/pull/1895) ([johnomotani](https://github.com/users/johnomotani)) +- Implement toFieldAligned and fromFieldAligned for Vector3D (v4.4) [\#1878](https://github.com/boutproject/BOUT-dev/pull/1878) ([johnomotani](https://github.com/users/johnomotani)) +- Remove 3-element list indexers for collect() [\#1862](https://github.com/boutproject/BOUT-dev/pull/1862) ([johnomotani](https://github.com/users/johnomotani)) +- Allow user to override library option defaults (v4.4) [\#1849](https://github.com/boutproject/BOUT-dev/pull/1849) ([johnomotani](https://github.com/users/johnomotani)) + +The following were backported from v5.0.0 in [\#2389](https://github.com/boutproject/BOUT-dev/pull/2389): + +- Add remaining integrated tests to CMake build [\#1833](https://github.com/boutproject/BOUT-dev/pull/1833) ([ZedThree](https://github.com/users/ZedThree)) +- Fixes for Windows [\#1874](https://github.com/boutproject/BOUT-dev/pull/1874) ([ZedThree](https://github.com/users/ZedThree)) +- CMake: fix FindSlepc for non-make generators [\#1881](https://github.com/boutproject/BOUT-dev/pull/1881) ([ZedThree](https://github.com/users/ZedThree)) +- CMake: enable using external mpark.variant [\#1900](https://github.com/boutproject/BOUT-dev/pull/1900) ([ZedThree](https://github.com/users/ZedThree)) +- Cmake fixes [\#1909](https://github.com/boutproject/BOUT-dev/pull/1909) ([dschwoerer](https://github.com/users/dschwoerer)) +- CMake: find package fixes [\#1912](https://github.com/boutproject/BOUT-dev/pull/1912) ([ZedThree](https://github.com/users/ZedThree)) +- Fix runtest for non-make generators [\#1952](https://github.com/boutproject/BOUT-dev/pull/1952) ([ZedThree](https://github.com/users/ZedThree)) +- CMake: Generate Field arithmetic operators [\#2119](https://github.com/boutproject/BOUT-dev/pull/2119) ([dschwoerer](https://github.com/users/dschwoerer)) +- CMake: Add option to download and build netCDF C++ API [\#2138](https://github.com/boutproject/BOUT-dev/pull/2138) ([ZedThree](https://github.com/users/ZedThree)) +- Fix CMake config file following change to netCDF cmake module [\#2162](https://github.com/boutproject/BOUT-dev/pull/2162) ([ZedThree](https://github.com/users/ZedThree)) +- Make input Options case sensitive and error on unused Options [\#2210](https://github.com/boutproject/BOUT-dev/pull/2210) ([ZedThree](https://github.com/users/ZedThree)) +- Next CMake fixes [\#2275](https://github.com/boutproject/BOUT-dev/pull/2275) ([bendudson](https://github.com/users/bendudson)) +- Add MMS tests to CMake build [\#2286](https://github.com/boutproject/BOUT-dev/pull/2286) ([ZedThree](https://github.com/users/ZedThree)) +- CMake: Build documentation [\#2300](https://github.com/boutproject/BOUT-dev/pull/2300) ([ZedThree](https://github.com/users/ZedThree)) +- Fix for FindPETSc not being reentrant if pkg-config used (next) [\#2318](https://github.com/boutproject/BOUT-dev/pull/2318) ([ZedThree](https://github.com/users/ZedThree)) +- CMake: Fix a few minor issues, generate `bout-config` [\#2328](https://github.com/boutproject/BOUT-dev/pull/2328) ([ZedThree](https://github.com/users/ZedThree)) +- CMake: Add option to download SUNDIALS at configure time [\#2331](https://github.com/boutproject/BOUT-dev/pull/2331) ([ZedThree](https://github.com/users/ZedThree)) +- CMake rename PACKAGE_TESTS to BOUT_TESTS [\#2347](https://github.com/boutproject/BOUT-dev/pull/2347) ([bendudson](https://github.com/users/bendudson)) +- CMake: Add SOVERSION; fix FindSUNDIALS [\#2358](https://github.com/boutproject/BOUT-dev/pull/2358) ([dschwoerer](https://github.com/users/dschwoerer)) ## [v4.3.3](https://github.com/boutproject/BOUT-dev/tree/v4.3.2) (2021-07-28) From 3c68b241028cf89baa09801f794495a2ac4742d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Thu, 2 Apr 2020 19:00:41 +0100 Subject: [PATCH 344/428] Linking fixes for .so * Add $(LDFLAGS) to the so creation * LIB_A should be created as static archive, never the so * If creating the lib fails, we don't want to keep the broken lib * LDFLAGS should be at the end when linking --- make.config.in | 4 ++-- src/makefile.in | 16 +++++++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/make.config.in b/make.config.in index 92a600d9ca..86ff4503a1 100644 --- a/make.config.in +++ b/make.config.in @@ -242,7 +242,7 @@ ifeq ("$(TARGET)", "lib") lib: makefile $(BOUT_CONFIG_FILE) $(BOUT_TOP)/include $(BOUT_TOP)/lib $(OBJ) ifneq ("$(OBJ)foo", "foo") @echo " Adding $(OBJ) to libbout++.a" - @$(AR) $(ARFLAGS) $(LIB) $(OBJ) + @$(AR) $(ARFLAGS) $(LIB_A) $(OBJ) @$(RANLIB) $(LIB) endif endif @@ -292,7 +292,7 @@ $(SOURCEC): checklib $(SOURCEC:%.cxx=%.o): $(LIB) $(TARGET): makefile $(BOUT_CONFIG_FILE) $(OBJ) $(SUB_LIBS) @echo " Linking" $(TARGET) - @$(LD) $(LDFLAGS) -o $(TARGET) $(OBJ) $(SUB_LIBS) $(BOUT_LIBS) + @$(LD) -o $(TARGET) $(OBJ) $(SUB_LIBS) $(BOUT_LIBS) $(LDFLAGS) checklib: ifneq ("$(CHANGED)foo", "foo") diff --git a/src/makefile.in b/src/makefile.in index 64f57f3986..ecf86f2481 100644 --- a/src/makefile.in +++ b/src/makefile.in @@ -57,8 +57,8 @@ libfast: .libfast $(BOUT_LIB_PATH)/libbout++.a: bout++.o @echo "Recreating libbout++.a" - @rm -f $(BOUT_LIB_PATH)/libbout++.a - @$(AR) $(ARFLAGS) $(LIB) $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) + @rm -f $(LIB_A) + @$(AR) $(ARFLAGS) $(LIB_A) $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) || rm -f $(LIB_A) @touch .libfast @#$(RANLIB) $(LIB) @@STATIC_EXTRA@ @@ -67,9 +67,15 @@ $(BOUT_LIB_PATH)/libbout++.so: bout++.o @echo "Creating libbout++.so" @echo $(BOUT_FLAGS) | grep -qi pic || (echo "not compiled with PIC support - reconfigure with --enable-shared" ;exit 1) @$(RM) $(BOUT_LIB_PATH)/*.so* - @$(CXX) -shared -Wl,-soname,libbout++.so.$(BOUT_VERSION) -o $(LIB_SO).$(BOUT_VERSION) $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) - @$(CXX) -shared -Wl,-soname,libpvode.so.1.0.0 -o $(BOUT_LIB_PATH)/libpvode_.so -L $(BOUT_LIB_PATH) -Wl,--whole-archive -lpvode -Wl,--no-whole-archive - @$(CXX) -shared -Wl,-soname,libpvpre.so.1.0.0 -o $(BOUT_LIB_PATH)/libpvpre_.so -L $(BOUT_LIB_PATH) -Wl,--whole-archive -lpvpre -Wl,--no-whole-archive + @$(CXX) -shared -Wl,-soname,libbout++.so.$(BOUT_VERSION) -o $(LIB_SO).$(BOUT_VERSION) \ + $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) \ + $(LDFLAGS) + @$(CXX) -shared -Wl,-soname,libpvode.so.1.0.0 -o $(BOUT_LIB_PATH)/libpvode_.so \ + -L $(BOUT_LIB_PATH) -Wl,--whole-archive -lpvode -Wl,--no-whole-archive \ + $(LDFLAGS) + @$(CXX) -shared -Wl,-soname,libpvpre.so.1.0.0 -o $(BOUT_LIB_PATH)/libpvpre_.so \ + -L $(BOUT_LIB_PATH) -Wl,--whole-archive -lpvpre -Wl,--no-whole-archive \ + $(LDFLAGS) @mv $(BOUT_LIB_PATH)/libpvode_.so $(BOUT_LIB_PATH)/libpvode.so.1.0.0 @mv $(BOUT_LIB_PATH)/libpvpre_.so $(BOUT_LIB_PATH)/libpvpre.so.1.0.0 @ln -s libbout++.so.$(BOUT_VERSION) $(LIB_SO) From 2696b913d210a33a7b38fc566b4600d9b6e3a3fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Wed, 28 Apr 2021 10:24:49 +0200 Subject: [PATCH 345/428] Faster recompile By splitting the __TIME__ and __DATE__ to a different header, it makes it much faster to compile. This is especially beneficial in a edit-compile-debug cycle. --- make.config.in | 2 +- src/bout++-time.cxx | 2 ++ src/bout++-time.hxx | 2 ++ src/bout++.cxx | 3 ++- src/makefile.in | 10 +++++----- 5 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 src/bout++-time.cxx create mode 100644 src/bout++-time.hxx diff --git a/make.config.in b/make.config.in index 86ff4503a1..8e612acae4 100644 --- a/make.config.in +++ b/make.config.in @@ -321,7 +321,7 @@ endif @echo " Compiling " $(@:.o=.cxx) @$(CXX) $(BOUT_INCLUDE) $(BOUT_FLAGS) -c $(@:.o=.cxx) -o $@ ifeq ("$(TARGET)","libfast") - test "$@" = "bout++.o" || touch $(BOUT_TOP)/lib/.last.o.file + test "$@" = "bout++-time.o" || touch $(BOUT_TOP)/lib/.last.o.file endif #################################################################### diff --git a/src/bout++-time.cxx b/src/bout++-time.cxx new file mode 100644 index 0000000000..d9fb269b41 --- /dev/null +++ b/src/bout++-time.cxx @@ -0,0 +1,2 @@ +const char * boutcompiledate{__DATE__}; +const char * boutcompiletime{__TIME__}; diff --git a/src/bout++-time.hxx b/src/bout++-time.hxx new file mode 100644 index 0000000000..14173cc62a --- /dev/null +++ b/src/bout++-time.hxx @@ -0,0 +1,2 @@ +extern const char * boutcompiledate; +extern const char * boutcompiletime; diff --git a/src/bout++.cxx b/src/bout++.cxx index bba2e27496..9ae2395bd3 100644 --- a/src/bout++.cxx +++ b/src/bout++.cxx @@ -51,6 +51,7 @@ const char DEFAULT_DIR[] = "data"; #include "bout/slepclib.hxx" #include "bout/solver.hxx" #include "bout/sys/timer.hxx" +#include "bout++-time.hxx" #define BOUT_NO_USING_NAMESPACE_BOUTGLOBALS #include "bout.hxx" @@ -375,7 +376,7 @@ void printStartupHeader(int MYPE, int NPES) { #ifdef MD5SUM output_progress.write("MD5 checksum: %s\n", BUILDFLAG(MD5SUM)); #endif - output_progress.write(_("Code compiled on %s at %s\n\n"), __DATE__, __TIME__); + output_progress.write(_("Code compiled on %s at %s\n\n"), boutcompiletime, boutcompiledate); output_info.write("B.Dudson (University of York), M.Umansky (LLNL) 2007\n"); output_info.write("Based on BOUT by Xueqiao Xu, 1999\n\n"); diff --git a/src/makefile.in b/src/makefile.in index ecf86f2481..2629630cb7 100644 --- a/src/makefile.in +++ b/src/makefile.in @@ -3,7 +3,7 @@ BOUT_TOP = .. DIRS = field fileio invert mesh physics solver sys -SOURCEC = bout++.cxx +SOURCEC = bout++.cxx bout++-time.cxx SOURCEH = bout.hxx CXXFLAGS += -DMD5SUM=$(CHECKSUM) -DREVISION="$(REVISION)" TARGET ?= lib @@ -38,7 +38,7 @@ include $(BOUT_TOP)/make.config # directories are finished and further to make sure we actually know # whether the file needs to be rebuild - which we can only know for # certain if all directories are finished -bout++.o: $(BOUT_LIB_PATH)/.last.o.file .dummy +bout++-time.o: $(BOUT_LIB_PATH)/.last.o.file .dummy # The recipie could be removed, as it only catches the case of # out-of-date make.config @@ -55,7 +55,7 @@ libfast: .libfast .libfast: @LIB_TO_BUILD@ -$(BOUT_LIB_PATH)/libbout++.a: bout++.o +$(BOUT_LIB_PATH)/libbout++.a: bout++-time.o @echo "Recreating libbout++.a" @rm -f $(LIB_A) @$(AR) $(ARFLAGS) $(LIB_A) $(shell find $(BOUT_TOP)/src -name \*.o -type f -print 2> /dev/null) || rm -f $(LIB_A) @@ -63,7 +63,7 @@ $(BOUT_LIB_PATH)/libbout++.a: bout++.o @#$(RANLIB) $(LIB) @@STATIC_EXTRA@ -$(BOUT_LIB_PATH)/libbout++.so: bout++.o +$(BOUT_LIB_PATH)/libbout++.so: bout++-time.o @echo "Creating libbout++.so" @echo $(BOUT_FLAGS) | grep -qi pic || (echo "not compiled with PIC support - reconfigure with --enable-shared" ;exit 1) @$(RM) $(BOUT_LIB_PATH)/*.so* @@ -90,7 +90,7 @@ all: lib end_message # Only needed for legacy build script ifneq ("$(TARGET)","libfast") # This to make sure build time is always printed correctly -.PHONY: bout++.cxx +.PHONY: bout++-time.cxx endif checksum: From 62155e35963653c2677d80696278deac42e35648 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Schw=C3=B6rer?= Date: Wed, 28 Apr 2021 11:13:08 +0200 Subject: [PATCH 346/428] Fix compilation * CMake needed new file * avoid race condition in autotools --- CMakeLists.txt | 1 + src/makefile.in | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b8ffddda72..e8af27b43b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -141,6 +141,7 @@ set(BOUT_SOURCES ./include/vector3d.hxx ./include/where.hxx ./src/bout++.cxx + ./src/bout++-time.cxx ./src/field/field.cxx ./src/field/field2d.cxx ./src/field/field3d.cxx diff --git a/src/makefile.in b/src/makefile.in index 2629630cb7..c6afa2b576 100644 --- a/src/makefile.in +++ b/src/makefile.in @@ -38,7 +38,7 @@ include $(BOUT_TOP)/make.config # directories are finished and further to make sure we actually know # whether the file needs to be rebuild - which we can only know for # certain if all directories are finished -bout++-time.o: $(BOUT_LIB_PATH)/.last.o.file .dummy +bout++-time.o: $(BOUT_LIB_PATH)/.last.o.file | $(DIRS) bout++.o # The recipie could be removed, as it only catches the case of # out-of-date make.config From 4bfed9ca51e69498c3733acf929cd07520e81bbc Mon Sep 17 00:00:00 2001 From: dschwoerer Date: Sat, 1 May 2021 20:42:30 +0200 Subject: [PATCH 347/428] fix order of date and time Co-authored-by: Peter Hill --- src/bout++.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bout++.cxx b/src/bout++.cxx index 9ae2395bd3..b2d59c9cee 100644 --- a/src/bout++.cxx +++ b/src/bout++.cxx @@ -376,7 +376,7 @@ void printStartupHeader(int MYPE, int NPES) { #ifdef MD5SUM output_progress.write("MD5 checksum: %s\n", BUILDFLAG(MD5SUM)); #endif - output_progress.write(_("Code compiled on %s at %s\n\n"), boutcompiletime, boutcompiledate); + output_progress.write(_("Code compiled on %s at %s\n\n"), boutcompiledate, boutcompiletime); output_info.write("B.Dudson (University of York), M.Umansky (LLNL) 2007\n"); output_info.write("Based on BOUT by Xueqiao Xu, 1999\n\n"); From 4ec28dfb25b490581d825b4d9e3f3223e8928deb Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 29 Jul 2021 16:55:13 +0100 Subject: [PATCH 348/428] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e85ac9723..99ec0f0997 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -80,6 +80,7 @@ The following were backported from v5.0.0 in [\#2389](https://github.com/boutpro - CMake: Add option to download SUNDIALS at configure time [\#2331](https://github.com/boutproject/BOUT-dev/pull/2331) ([ZedThree](https://github.com/users/ZedThree)) - CMake rename PACKAGE_TESTS to BOUT_TESTS [\#2347](https://github.com/boutproject/BOUT-dev/pull/2347) ([bendudson](https://github.com/users/bendudson)) - CMake: Add SOVERSION; fix FindSUNDIALS [\#2358](https://github.com/boutproject/BOUT-dev/pull/2358) ([dschwoerer](https://github.com/users/dschwoerer)) +- Faster recompile [\#2294](https://github.com/boutproject/BOUT-dev/pull/2294) ([dschwoerer](https://github.com/users/dschwoerer)) ## [v4.3.3](https://github.com/boutproject/BOUT-dev/tree/v4.3.2) (2021-07-28) From 5e05d0c181417c4115d634bc9772609dc5dbfe2c Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 23 Sep 2019 19:17:57 +0100 Subject: [PATCH 349/428] Allow Petsc options to be passed from BOUT.inp --- include/bout/petsclib.hxx | 6 ++++-- src/sys/petsclib.cxx | 38 +++++++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index 36fbcc2ff2..c9fbf07fbf 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -47,6 +47,8 @@ class PetscLib; #ifdef BOUT_HAS_PETSC +#include "options.hxx" + #include #include @@ -61,7 +63,7 @@ public: /*! * Ensure that PETSc has been initialised */ - PetscLib(); + PetscLib(Options* opt = nullptr); /*! * Calls PetscFinalize when all PetscLib instances are destroyed @@ -109,7 +111,7 @@ private: class PetscLib { public: - PetscLib() {} + PetscLib(Options* UNUSED(opt) = nullptr) {} ~PetscLib() {} static void setArgs(int &UNUSED(c), char** &UNUSED(v)) {} diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 5e49a2f1d5..e7357cd652 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -13,7 +13,7 @@ int *PetscLib::pargc = nullptr; char ***PetscLib::pargv = nullptr; PetscLogEvent PetscLib::USER_EVENT = 0; -PetscLib::PetscLib() { +PetscLib::PetscLib(Options* opt) { if(count == 0) { // Initialise PETSc @@ -23,6 +23,42 @@ PetscLib::PetscLib() { PetscLogEventRegister("Total BOUT++",0,&USER_EVENT); PetscLogEventBegin(USER_EVENT,0,0,0,0); } + + if (count == 0 or opt != nullptr) { + // Pass options to Petsc's global options database. + // (PetscOptions type exists for non-global options, but its use is not discussed in + // the Petsc manual, so ignoring the possibility here.) + + if (opt == nullptr) { + // Options read by default from the [petsc] section of the input + opt = Options::getRoot()->getSection("petsc"); + } + + Options& options = *opt; + + // Pass all options in the section to Petsc + for (auto& i : options.getChildren()) { + if (not i.second.isValue()) { + throw BoutException("Found subsection %s in %s when reading Petsc options - only " + "values are allowed in the Petsc options, not subsections", + i.first.c_str(), options.str().c_str()); + } + // Note, option names in the input file don't start with "-", but need to be passed + // to Petsc with "-" prepended + PetscErrorCode ierr; + if (lowercase(i.second) == "true") { + // Petsc flag with no value + ierr = PetscOptionsSetValue(nullptr, ("-"+i.first).c_str(), nullptr); + } else { + // Option with actual value to pass + ierr = PetscOptionsSetValue(nullptr, ("-"+i.first).c_str(), + i.second.as().c_str()); + } + if (ierr) { + throw BoutException("PetscOptionsSetValue returned error code %i", ierr); + } + } + } count++; } From 878d74c27a2c58854ccca8e5a0e356153a4274b1 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 23 Sep 2019 23:29:58 +0100 Subject: [PATCH 350/428] Use unique prefixes to allow passing object-specific options to PETSc --- include/bout/invertable_operator.hxx | 6 +- include/bout/petsclib.hxx | 12 ++- .../laplace/impls/petsc/petsc_laplace.cxx | 5 +- src/invert/laplacexy/laplacexy.cxx | 7 +- .../laplacexz/impls/petsc/laplacexz-petsc.cxx | 6 +- src/sys/petsclib.cxx | 85 ++++++++++++------- 6 files changed, 77 insertions(+), 44 deletions(-) diff --git a/include/bout/invertable_operator.hxx b/include/bout/invertable_operator.hxx index 962ab68d59..2854686ce0 100644 --- a/include/bout/invertable_operator.hxx +++ b/include/bout/invertable_operator.hxx @@ -134,7 +134,8 @@ public: : operatorFunction(func), preconditionerFunction(func), opt(optIn == nullptr ? Options::getRoot()->getSection("invertableOperator") : optIn), - localmesh(localmeshIn == nullptr ? bout::globals::mesh : localmeshIn) { + localmesh(localmeshIn == nullptr ? bout::globals::mesh : localmeshIn), + lib(opt) { AUTO_TRACE(); }; @@ -338,8 +339,7 @@ public: CHKERRQ(ierr); /// Now create and setup the linear solver with the matrix - ierr = KSPCreate(BoutComm::get(), &ksp); - CHKERRQ(ierr); + lib.createKSPWithOptions(BoutComm::get(), &ksp); #if PETSC_VERSION_LT(3, 5, 0) /// Need to provide a MatStructure flag in versions <3.5. This details if we expect diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index c9fbf07fbf..40a3e635cd 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -77,7 +77,10 @@ public: * The arguments will be passed to PetscInitialize() */ static void setArgs(int &c, char** &v) { pargc = &c; pargv = &v;} - + + /// Create a KSP linear solver that uses the options specific to this PetscLib + void createKSPWithOptions(MPI_Comm& comm, KSP& ksp); + /*! * Force cleanup. This will call PetscFinalize, printing a warning * if any instances of PetscLib still exist @@ -91,7 +94,12 @@ private: static int* pargc; static char*** pargv; + // Prefix for object-specific options + std::string options_prefix; + static PetscLogEvent USER_EVENT; + + void setPetscOptions(Options& options, std::string pass_options_prefix); }; #ifndef PETSC_VERSION_GE @@ -116,6 +124,8 @@ public: static void setArgs(int &UNUSED(c), char** &UNUSED(v)) {} + void createKSPWithOptions(MPI_Comm& comm, KSP& ksp); + static void cleanup() {} }; diff --git a/src/invert/laplace/impls/petsc/petsc_laplace.cxx b/src/invert/laplace/impls/petsc/petsc_laplace.cxx index 2399520093..943968bc68 100644 --- a/src/invert/laplace/impls/petsc/petsc_laplace.cxx +++ b/src/invert/laplace/impls/petsc/petsc_laplace.cxx @@ -61,7 +61,8 @@ static PetscErrorCode laplacePCapply(PC pc,Vec x,Vec y) { LaplacePetsc::LaplacePetsc(Options *opt, const CELL_LOC loc, Mesh *mesh_in) : Laplacian(opt, loc, mesh_in), A(0.0), C1(1.0), C2(1.0), D(1.0), Ex(0.0), Ez(0.0), - issetD(false), issetC(false), issetE(false) + issetD(false), issetC(false), issetE(false), + lib(opt==nullptr ? &(Options::root()["laplace"]) : opt) { A.setLocation(location); C1.setLocation(location); @@ -273,7 +274,7 @@ LaplacePetsc::LaplacePetsc(Options *opt, const CELL_LOC loc, Mesh *mesh_in) : MatSetUp(MatA); // Declare KSP Context (abstract PETSc object that manages all Krylov methods) - KSPCreate( comm, &ksp ); + lib.createKSPWithOptions(comm, ksp); // Get KSP Solver Type (Generalizes Minimal RESidual is the default) ksptype = (*opts)["ksptype"].doc("KSP solver type").withDefault(KSP_GMRES); diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index e19fd2368b..a09c212fc8 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -31,8 +31,9 @@ static PetscErrorCode laplacePCapply(PC pc,Vec x,Vec y) { int LaplaceXY::instance_count = 0; -LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) - : localmesh(m==nullptr ? bout::globals::mesh : m), location(loc), monitor(*this) { +LaplaceXY::LaplaceXY(Mesh* m, Options* opt, const CELL_LOC loc) + : lib(opt == nullptr ? &(Options::root()["laplacexy"]) : opt), + localmesh(m == nullptr ? bout::globals::mesh : m), location(loc), monitor(*this) { Timer timer("invert"); instance_count++; @@ -492,7 +493,7 @@ LaplaceXY::LaplaceXY(Mesh *m, Options *opt, const CELL_LOC loc) // Set up KSP // Declare KSP Context - KSPCreate( comm, &ksp ); + lib.createKSPWithOptions(comm, ksp); // Configure Linear Solver diff --git a/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx b/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx index cde72ae4ab..6ac515c029 100644 --- a/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx +++ b/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx @@ -19,7 +19,9 @@ #include LaplaceXZpetsc::LaplaceXZpetsc(Mesh *m, Options *opt, const CELL_LOC loc) - : LaplaceXZ(m, opt, loc), coefs_set(false) { + : LaplaceXZ(m, opt, loc), lib(opt==nullptr ? &(Options::root()["laplacexz"]) : opt), + coefs_set(false) { + /* Constructor: LaplaceXZpetsc * Purpose: - Setting inversion solver options * - Setting the solver method @@ -225,7 +227,7 @@ LaplaceXZpetsc::LaplaceXZpetsc(Mesh *m, Options *opt, const CELL_LOC loc) ////////////////////////////////////////////////// // Declare KSP Context - KSPCreate( comm, &data.ksp ); + lib.createKSPWithOptions(comm, data.ksp); // Set KSP type KSPSetType( data.ksp, ksptype.c_str() ); diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index e7357cd652..0b93a692d9 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -13,7 +13,7 @@ int *PetscLib::pargc = nullptr; char ***PetscLib::pargv = nullptr; PetscLogEvent PetscLib::USER_EVENT = 0; -PetscLib::PetscLib(Options* opt) { +PetscLib::PetscLib(Options* opt) : options_prefix("") { if(count == 0) { // Initialise PETSc @@ -22,42 +22,24 @@ PetscLib::PetscLib(Options* opt) { PetscInitialize(pargc,pargv,PETSC_NULL,help); PetscLogEventRegister("Total BOUT++",0,&USER_EVENT); PetscLogEventBegin(USER_EVENT,0,0,0,0); + + // Load global PETSc options from the [petsc] section of the input + setPetscOptions(Options::root()["petsc"], ""); } - if (count == 0 or opt != nullptr) { - // Pass options to Petsc's global options database. - // (PetscOptions type exists for non-global options, but its use is not discussed in - // the Petsc manual, so ignoring the possibility here.) + if (opt != nullptr and opt->isSection("petsc")) { + // Use options specific to this PetscLib + // Pass options to PETSc's global options database, with a unique prefix, that will be + // passed to a KSP later. + // (PetscOptions type exists for non-global options, but apparently is only for user + // options, and cannot be passed to KSP, etc. Non-global options can be passed by + // defining a custom prefix for the options string, and then passing that to the KSP.) - if (opt == nullptr) { - // Options read by default from the [petsc] section of the input - opt = Options::getRoot()->getSection("petsc"); - } + options_prefix = "boutpetsclib" + std::to_string(count) + "_"; - Options& options = *opt; - - // Pass all options in the section to Petsc - for (auto& i : options.getChildren()) { - if (not i.second.isValue()) { - throw BoutException("Found subsection %s in %s when reading Petsc options - only " - "values are allowed in the Petsc options, not subsections", - i.first.c_str(), options.str().c_str()); - } - // Note, option names in the input file don't start with "-", but need to be passed - // to Petsc with "-" prepended - PetscErrorCode ierr; - if (lowercase(i.second) == "true") { - // Petsc flag with no value - ierr = PetscOptionsSetValue(nullptr, ("-"+i.first).c_str(), nullptr); - } else { - // Option with actual value to pass - ierr = PetscOptionsSetValue(nullptr, ("-"+i.first).c_str(), - i.second.as().c_str()); - } - if (ierr) { - throw BoutException("PetscOptionsSetValue returned error code %i", ierr); - } - } + Options& options = (*opt)["petsc"]; + + setPetscOptions(options, options_prefix); } count++; } @@ -72,6 +54,18 @@ PetscLib::~PetscLib() { } } +void PetscLib::createKSPWithOptions(MPI_Comm& comm, KSP& ksp) { + auto ierr = KSPCreate(comm, &ksp); + if (ierr) { + throw BoutException("KSPCreate failed with error %i", ierr); + } + + ierr = KSPSetOptionsPrefix(ksp, options_prefix.c_str()); + if (ierr) { + throw BoutException("KSPSetOptionsPrefix failed with error %i", ierr); + } +} + void PetscLib::cleanup() { if(count == 0) return; // Either never initialised, or already cleaned up @@ -83,5 +77,30 @@ void PetscLib::cleanup() { count = 0; // ensure that finalise is not called again later } +void PetscLib::setPetscOptions(Options& options, std::string pass_options_prefix) { + // Pass all options in the section to PETSc + for (auto& i : options.getChildren()) { + if (not i.second.isValue()) { + throw BoutException("Found subsection %s in %s when reading PETSc options - only " + "values are allowed in the PETSc options, not subsections", + i.first.c_str(), options.str().c_str()); + } + // Note, option names in the input file don't start with "-", but need to be passed + // to PETSc with "-" prepended + PetscErrorCode ierr; + if (lowercase(i.second) == "true") { + // PETSc flag with no value + ierr = PetscOptionsSetValue(nullptr, ("-"+pass_options_prefix+i.first).c_str(), + nullptr); + } else { + // Option with actual value to pass + ierr = PetscOptionsSetValue(nullptr, ("-"+pass_options_prefix+i.first).c_str(), + i.second.as().c_str()); + } + if (ierr) { + throw BoutException("PetscOptionsSetValue returned error code %i", ierr); + } + } +} #endif // BOUT_HAS_PETSC From 187decdc377c770425428cfd04448c0e8d7c03c7 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Mon, 23 Sep 2019 23:57:59 +0100 Subject: [PATCH 351/428] Manual entry on PETSc options --- manual/sphinx/index.rst | 1 + manual/sphinx/user_docs/petsc.rst | 37 +++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 manual/sphinx/user_docs/petsc.rst diff --git a/manual/sphinx/index.rst b/manual/sphinx/index.rst index 1923087d98..a6e5626942 100644 --- a/manual/sphinx/index.rst +++ b/manual/sphinx/index.rst @@ -69,6 +69,7 @@ The documentation is divided into the following sections: user_docs/staggered_grids user_docs/eigenvalue_solver user_docs/nonlocal + user_docs/petsc .. toctree:: :maxdepth: 1 diff --git a/manual/sphinx/user_docs/petsc.rst b/manual/sphinx/user_docs/petsc.rst new file mode 100644 index 0000000000..27141e8822 --- /dev/null +++ b/manual/sphinx/user_docs/petsc.rst @@ -0,0 +1,37 @@ +.. default-role:: math + +.. _sec-petsc: + + +PETSc solvers +============= + +Options for PETSc solvers can be passed in the input file (or on the command line). +Global options are set in the ``[petsc]`` section. To set options specific to a +particular PETSc-based solver, the options can be set in a ``petsc`` subsection of the +solver's options, e.g. for a LaplaceXY solver (using the default options section) use the +``[laplacexy:petsc]`` section. Note that the global options, including any +passed on the command line [*]_, will be ignored for that solver if the subsection +is created. + +Any options that can be passed on the command line to PETSc can be set, with no preceding +hyphen. Any PETSc options that are passed as flags with no value set should be given the +value ``true`` so BOUT++ knows to read them. So for example, if the command line options +would be:: + + -ksp_monitor -ksp_type gmres + +in the input file you would put:: + + [petsc] + ksp_monitor = true + ksp_type = gmres + + +.. [*] The object-specific options are passed to PETSc by creating an object-specific + prefix ``boutpetsclib#_``, where ``#`` is replaced with an integer counter, + counting the number of PetscLib instances. So an option could in principle be + passed to a particular solver if you work out what the counter is for that solver, + e.g.:: + + -boutpetsclib1_ksp_type gmres From d4540d098fd63e1762b7f3b15c2c88a11bb1429c Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Sep 2019 10:10:17 +0100 Subject: [PATCH 352/428] Add 'Invertable operators' section to the contents list of the manual --- manual/sphinx/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/manual/sphinx/index.rst b/manual/sphinx/index.rst index a6e5626942..02684922ec 100644 --- a/manual/sphinx/index.rst +++ b/manual/sphinx/index.rst @@ -69,6 +69,7 @@ The documentation is divided into the following sections: user_docs/staggered_grids user_docs/eigenvalue_solver user_docs/nonlocal + user_docs/invertable_operator user_docs/petsc .. toctree:: From ad3107ef491548075561361e9f5e4563a3570b7c Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Sep 2019 10:10:56 +0100 Subject: [PATCH 353/428] PetscOptionsSetValue had a different signature before PETSc-3.7 The first argument 'PetscOptions options' was introduced in 3.7. Add \#if..\#else to handle the different versions. --- src/sys/petsclib.cxx | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 0b93a692d9..e68e932def 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -90,12 +90,24 @@ void PetscLib::setPetscOptions(Options& options, std::string pass_options_prefix PetscErrorCode ierr; if (lowercase(i.second) == "true") { // PETSc flag with no value +#if PETSC_VERSION_GE(3, 7, 0) ierr = PetscOptionsSetValue(nullptr, ("-"+pass_options_prefix+i.first).c_str(), nullptr); +#else +// no PetscOptions as first argument + ierr = PetscOptionsSetValue(("-"+pass_options_prefix+i.first).c_str(), + nullptr); +#endif } else { // Option with actual value to pass +#if PETSC_VERSION_GE(3, 7, 0) ierr = PetscOptionsSetValue(nullptr, ("-"+pass_options_prefix+i.first).c_str(), i.second.as().c_str()); +#else +// no PetscOptions as first argument + ierr = PetscOptionsSetValue(("-"+pass_options_prefix+i.first).c_str(), + i.second.as().c_str()); +#endif } if (ierr) { throw BoutException("PetscOptionsSetValue returned error code %i", ierr); From a154a67bb1d72447f7abccda4f1d055086daf864 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Sep 2019 10:54:39 +0100 Subject: [PATCH 354/428] Fix createKSPWithOptions call in InvertableOperator Previously passed pointer to ksp instead of ksp. --- include/bout/invertable_operator.hxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/bout/invertable_operator.hxx b/include/bout/invertable_operator.hxx index 2854686ce0..ff97b1711b 100644 --- a/include/bout/invertable_operator.hxx +++ b/include/bout/invertable_operator.hxx @@ -339,7 +339,7 @@ public: CHKERRQ(ierr); /// Now create and setup the linear solver with the matrix - lib.createKSPWithOptions(BoutComm::get(), &ksp); + lib.createKSPWithOptions(BoutComm::get(), ksp); #if PETSC_VERSION_LT(3, 5, 0) /// Need to provide a MatStructure flag in versions <3.5. This details if we expect From b9216c4c230cc3c23a64889cbbf77924b37a746f Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Sep 2019 11:46:54 +0100 Subject: [PATCH 355/428] Minor tidy of PetscLib changes --- include/bout/petsclib.hxx | 2 +- src/sys/petsclib.cxx | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index 40a3e635cd..3d98757b85 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -99,7 +99,7 @@ private: static PetscLogEvent USER_EVENT; - void setPetscOptions(Options& options, std::string pass_options_prefix); + void setPetscOptions(Options& options, const std::string& pass_options_prefix); }; #ifndef PETSC_VERSION_GE diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index e68e932def..3f66034eef 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -13,7 +13,7 @@ int *PetscLib::pargc = nullptr; char ***PetscLib::pargv = nullptr; PetscLogEvent PetscLib::USER_EVENT = 0; -PetscLib::PetscLib(Options* opt) : options_prefix("") { +PetscLib::PetscLib(Options* opt) { if(count == 0) { // Initialise PETSc @@ -77,7 +77,7 @@ void PetscLib::cleanup() { count = 0; // ensure that finalise is not called again later } -void PetscLib::setPetscOptions(Options& options, std::string pass_options_prefix) { +void PetscLib::setPetscOptions(Options& options, const std::string& prefix) { // Pass all options in the section to PETSc for (auto& i : options.getChildren()) { if (not i.second.isValue()) { @@ -88,29 +88,29 @@ void PetscLib::setPetscOptions(Options& options, std::string pass_options_prefix // Note, option names in the input file don't start with "-", but need to be passed // to PETSc with "-" prepended PetscErrorCode ierr; + auto petsc_option_name = "-"+prefix+i.first; if (lowercase(i.second) == "true") { // PETSc flag with no value #if PETSC_VERSION_GE(3, 7, 0) - ierr = PetscOptionsSetValue(nullptr, ("-"+pass_options_prefix+i.first).c_str(), - nullptr); + ierr = PetscOptionsSetValue(nullptr, petsc_option_name.c_str(), nullptr); #else // no PetscOptions as first argument - ierr = PetscOptionsSetValue(("-"+pass_options_prefix+i.first).c_str(), - nullptr); + ierr = PetscOptionsSetValue(petsc_option_name.c_str(), nullptr); #endif } else { // Option with actual value to pass #if PETSC_VERSION_GE(3, 7, 0) - ierr = PetscOptionsSetValue(nullptr, ("-"+pass_options_prefix+i.first).c_str(), - i.second.as().c_str()); + ierr = PetscOptionsSetValue(nullptr, petsc_option_name.c_str(), + i.second.as().c_str()); #else // no PetscOptions as first argument - ierr = PetscOptionsSetValue(("-"+pass_options_prefix+i.first).c_str(), - i.second.as().c_str()); + ierr = PetscOptionsSetValue(petsc_option_name.c_str(), + i.second.as().c_str()); #endif } if (ierr) { - throw BoutException("PetscOptionsSetValue returned error code %i", ierr); + throw BoutException("PetscOptionsSetValue returned error code %i when setting %s", + ierr, petsc_option_name); } } } From 7ddb068ff067958dd615edf6b8b5cd2a76ad2ccf Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Sep 2019 11:50:15 +0100 Subject: [PATCH 356/428] Return KSP from PetscLib::createKSPWithOptions() Instead of modifying a KSP argument (like the PETSc interface KSPCreate), create and return a KSP from the method. --- include/bout/invertable_operator.hxx | 2 +- include/bout/petsclib.hxx | 4 ++-- src/invert/laplace/impls/petsc/petsc_laplace.cxx | 2 +- src/invert/laplacexy/laplacexy.cxx | 2 +- src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx | 2 +- src/sys/petsclib.cxx | 6 +++++- 6 files changed, 11 insertions(+), 7 deletions(-) diff --git a/include/bout/invertable_operator.hxx b/include/bout/invertable_operator.hxx index ff97b1711b..7b52884301 100644 --- a/include/bout/invertable_operator.hxx +++ b/include/bout/invertable_operator.hxx @@ -339,7 +339,7 @@ public: CHKERRQ(ierr); /// Now create and setup the linear solver with the matrix - lib.createKSPWithOptions(BoutComm::get(), ksp); + ksp = lib.createKSPWithOptions(BoutComm::get()); #if PETSC_VERSION_LT(3, 5, 0) /// Need to provide a MatStructure flag in versions <3.5. This details if we expect diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index 3d98757b85..08ad01e8f9 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -79,7 +79,7 @@ public: static void setArgs(int &c, char** &v) { pargc = &c; pargv = &v;} /// Create a KSP linear solver that uses the options specific to this PetscLib - void createKSPWithOptions(MPI_Comm& comm, KSP& ksp); + KSP createKSPWithOptions(MPI_Comm& comm); /*! * Force cleanup. This will call PetscFinalize, printing a warning @@ -124,7 +124,7 @@ public: static void setArgs(int &UNUSED(c), char** &UNUSED(v)) {} - void createKSPWithOptions(MPI_Comm& comm, KSP& ksp); + KSP createKSPWithOptions(MPI_Comm& comm); static void cleanup() {} }; diff --git a/src/invert/laplace/impls/petsc/petsc_laplace.cxx b/src/invert/laplace/impls/petsc/petsc_laplace.cxx index 943968bc68..6adb5e23cd 100644 --- a/src/invert/laplace/impls/petsc/petsc_laplace.cxx +++ b/src/invert/laplace/impls/petsc/petsc_laplace.cxx @@ -274,7 +274,7 @@ LaplacePetsc::LaplacePetsc(Options *opt, const CELL_LOC loc, Mesh *mesh_in) : MatSetUp(MatA); // Declare KSP Context (abstract PETSc object that manages all Krylov methods) - lib.createKSPWithOptions(comm, ksp); + ksp = lib.createKSPWithOptions(comm); // Get KSP Solver Type (Generalizes Minimal RESidual is the default) ksptype = (*opts)["ksptype"].doc("KSP solver type").withDefault(KSP_GMRES); diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index a09c212fc8..01d280546b 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -493,7 +493,7 @@ LaplaceXY::LaplaceXY(Mesh* m, Options* opt, const CELL_LOC loc) // Set up KSP // Declare KSP Context - lib.createKSPWithOptions(comm, ksp); + ksp = lib.createKSPWithOptions(comm); // Configure Linear Solver diff --git a/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx b/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx index 6ac515c029..eeeb41e94e 100644 --- a/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx +++ b/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx @@ -227,7 +227,7 @@ LaplaceXZpetsc::LaplaceXZpetsc(Mesh *m, Options *opt, const CELL_LOC loc) ////////////////////////////////////////////////// // Declare KSP Context - lib.createKSPWithOptions(comm, data.ksp); + data.ksp = lib.createKSPWithOptions(comm); // Set KSP type KSPSetType( data.ksp, ksptype.c_str() ); diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 3f66034eef..bac69b7077 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -54,7 +54,9 @@ PetscLib::~PetscLib() { } } -void PetscLib::createKSPWithOptions(MPI_Comm& comm, KSP& ksp) { +KSP PetscLib::createKSPWithOptions(MPI_Comm& comm) { + KSP ksp; + auto ierr = KSPCreate(comm, &ksp); if (ierr) { throw BoutException("KSPCreate failed with error %i", ierr); @@ -64,6 +66,8 @@ void PetscLib::createKSPWithOptions(MPI_Comm& comm, KSP& ksp) { if (ierr) { throw BoutException("KSPSetOptionsPrefix failed with error %i", ierr); } + + return ksp; } void PetscLib::cleanup() { From 11bd33159286c9afbe5e15186d0eeffb1f32ae9d Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Sep 2019 11:59:31 +0100 Subject: [PATCH 357/428] Pass "petsc" options subsection directly to PetscLib constructor --- include/bout/invertable_operator.hxx | 2 +- src/invert/laplace/impls/petsc/petsc_laplace.cxx | 2 +- src/invert/laplacexy/laplacexy.cxx | 2 +- src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx | 3 ++- src/sys/petsclib.cxx | 6 ++---- 5 files changed, 7 insertions(+), 8 deletions(-) diff --git a/include/bout/invertable_operator.hxx b/include/bout/invertable_operator.hxx index 7b52884301..39756b1cb7 100644 --- a/include/bout/invertable_operator.hxx +++ b/include/bout/invertable_operator.hxx @@ -135,7 +135,7 @@ public: opt(optIn == nullptr ? Options::getRoot()->getSection("invertableOperator") : optIn), localmesh(localmeshIn == nullptr ? bout::globals::mesh : localmeshIn), - lib(opt) { + lib(&(*opt)["petsc"]) { AUTO_TRACE(); }; diff --git a/src/invert/laplace/impls/petsc/petsc_laplace.cxx b/src/invert/laplace/impls/petsc/petsc_laplace.cxx index 6adb5e23cd..a20bf2f968 100644 --- a/src/invert/laplace/impls/petsc/petsc_laplace.cxx +++ b/src/invert/laplace/impls/petsc/petsc_laplace.cxx @@ -62,7 +62,7 @@ LaplacePetsc::LaplacePetsc(Options *opt, const CELL_LOC loc, Mesh *mesh_in) : Laplacian(opt, loc, mesh_in), A(0.0), C1(1.0), C2(1.0), D(1.0), Ex(0.0), Ez(0.0), issetD(false), issetC(false), issetE(false), - lib(opt==nullptr ? &(Options::root()["laplace"]) : opt) + lib(opt==nullptr ? &(Options::root()["laplace"]["petsc"]) : &(*opt)["petsc"]) { A.setLocation(location); C1.setLocation(location); diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index 01d280546b..7ade2bdb8d 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -32,7 +32,7 @@ static PetscErrorCode laplacePCapply(PC pc,Vec x,Vec y) { int LaplaceXY::instance_count = 0; LaplaceXY::LaplaceXY(Mesh* m, Options* opt, const CELL_LOC loc) - : lib(opt == nullptr ? &(Options::root()["laplacexy"]) : opt), + : lib(opt == nullptr ? &(Options::root()["laplacexy"]["petsc"]) : &(*opt)["petsc"]), localmesh(m == nullptr ? bout::globals::mesh : m), location(loc), monitor(*this) { Timer timer("invert"); diff --git a/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx b/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx index eeeb41e94e..4e53a4d9c7 100644 --- a/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx +++ b/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx @@ -19,7 +19,8 @@ #include LaplaceXZpetsc::LaplaceXZpetsc(Mesh *m, Options *opt, const CELL_LOC loc) - : LaplaceXZ(m, opt, loc), lib(opt==nullptr ? &(Options::root()["laplacexz"]) : opt), + : LaplaceXZ(m, opt, loc), + lib(opt==nullptr ? &(Options::root()["laplacexz"]["petsc"]) : &(*opt)["petsc"]), coefs_set(false) { /* Constructor: LaplaceXZpetsc diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index bac69b7077..2ce738d6d7 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -27,7 +27,7 @@ PetscLib::PetscLib(Options* opt) { setPetscOptions(Options::root()["petsc"], ""); } - if (opt != nullptr and opt->isSection("petsc")) { + if (opt != nullptr and opt->isSection()) { // Use options specific to this PetscLib // Pass options to PETSc's global options database, with a unique prefix, that will be // passed to a KSP later. @@ -37,9 +37,7 @@ PetscLib::PetscLib(Options* opt) { options_prefix = "boutpetsclib" + std::to_string(count) + "_"; - Options& options = (*opt)["petsc"]; - - setPetscOptions(options, options_prefix); + setPetscOptions(*opt, options_prefix); } count++; } From c95b1d47622e298e9f5d74fbbe452247b2c99c53 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Sep 2019 16:20:28 +0100 Subject: [PATCH 358/428] Note usefulness of PETSc's -options_view and -options_left in manual --- manual/sphinx/user_docs/petsc.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/manual/sphinx/user_docs/petsc.rst b/manual/sphinx/user_docs/petsc.rst index 27141e8822..63118d8720 100644 --- a/manual/sphinx/user_docs/petsc.rst +++ b/manual/sphinx/user_docs/petsc.rst @@ -35,3 +35,6 @@ in the input file you would put:: e.g.:: -boutpetsclib1_ksp_type gmres + + The PETSc arguments ``-options_view`` and ``options_left`` might be helpful for + this - they will show what options have been set, so will show the prefixes used. From 4270fea087c7387d31cf488266e12fec83c4e0c4 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Sep 2019 17:45:50 +0100 Subject: [PATCH 359/428] Fix non-unique 'count' bug in PetscLib Was using 'count' as a unique identifier, but 'count' is decremented when a PetscLib is destroyed. This commit adds 'unique_id' which is incremented each time a PetscLib is created, and never decremented. Also some small tidy-ups: forward-declare Options; make sure KSP is defined in the no-PETSc branch; add an empty body to createKSPWithOptions in the no-PETSc branch. --- include/bout/petsclib.hxx | 9 +++++++-- src/sys/petsclib.cxx | 5 ++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index 08ad01e8f9..d30a9090a5 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -47,7 +47,7 @@ class PetscLib; #ifdef BOUT_HAS_PETSC -#include "options.hxx" +class Options; #include #include @@ -88,6 +88,7 @@ public: static void cleanup(); private: static int count; ///< How many instances? + static int unique_id; ///< Unique identifier for each created instance static char help[]; ///< Help string // Command-line arguments @@ -117,6 +118,10 @@ private: #include "unused.hxx" +// PETSc not available, so KSP not already defined. KSP should never be called, so forward +// declaration OK here. +class KSP; + class PetscLib { public: PetscLib(Options* UNUSED(opt) = nullptr) {} @@ -124,7 +129,7 @@ public: static void setArgs(int &UNUSED(c), char** &UNUSED(v)) {} - KSP createKSPWithOptions(MPI_Comm& comm); + KSP createKSPWithOptions(MPI_Comm& comm) {} static void cleanup() {} }; diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 2ce738d6d7..035a2090ba 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -2,12 +2,14 @@ #ifdef BOUT_HAS_PETSC #include "boutcomm.hxx" +#include "options.hxx" #include #include // Define all the static member variables int PetscLib::count = 0; +int PetscLib::unique_id = 0; char PetscLib::help[] = "BOUT++: Uses finite difference methods to solve plasma fluid problems in curvilinear coordinates"; int *PetscLib::pargc = nullptr; char ***PetscLib::pargv = nullptr; @@ -35,11 +37,12 @@ PetscLib::PetscLib(Options* opt) { // options, and cannot be passed to KSP, etc. Non-global options can be passed by // defining a custom prefix for the options string, and then passing that to the KSP.) - options_prefix = "boutpetsclib" + std::to_string(count) + "_"; + options_prefix = "boutpetsclib" + std::to_string(unique_id) + "_"; setPetscOptions(*opt, options_prefix); } count++; + unique_id++; } PetscLib::~PetscLib() { From 08f23d6cb72b0c9208feb57c90a436afaff4a42a Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Sep 2019 18:05:02 +0100 Subject: [PATCH 360/428] Pass-by-value in createKSPWithOptions() Otherwise createKSPWithOptions(BoutComm::get()) does not compile. --- include/bout/petsclib.hxx | 2 +- src/sys/petsclib.cxx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index d30a9090a5..b2f1e4772a 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -79,7 +79,7 @@ public: static void setArgs(int &c, char** &v) { pargc = &c; pargv = &v;} /// Create a KSP linear solver that uses the options specific to this PetscLib - KSP createKSPWithOptions(MPI_Comm& comm); + KSP createKSPWithOptions(MPI_Comm comm); /*! * Force cleanup. This will call PetscFinalize, printing a warning diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 035a2090ba..0efca92d43 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -55,7 +55,7 @@ PetscLib::~PetscLib() { } } -KSP PetscLib::createKSPWithOptions(MPI_Comm& comm) { +KSP PetscLib::createKSPWithOptions(MPI_Comm comm) { KSP ksp; auto ierr = KSPCreate(comm, &ksp); From 21d8cb51ff745e83374084cb827f15f7441705bb Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 24 Sep 2019 21:06:38 +0100 Subject: [PATCH 361/428] Add missing .c_str() --- src/sys/petsclib.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 0efca92d43..11fead3f2e 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -115,7 +115,7 @@ void PetscLib::setPetscOptions(Options& options, const std::string& prefix) { } if (ierr) { throw BoutException("PetscOptionsSetValue returned error code %i when setting %s", - ierr, petsc_option_name); + ierr, petsc_option_name.c_str()); } } } From 0b069ec534a8ff10915e1688674c11c3a82798e1 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 28 Sep 2019 22:49:44 +0100 Subject: [PATCH 362/428] Pass by const reference in createKSPWithOptions() --- include/bout/petsclib.hxx | 2 +- src/sys/petsclib.cxx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index b2f1e4772a..0b200577f8 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -79,7 +79,7 @@ public: static void setArgs(int &c, char** &v) { pargc = &c; pargv = &v;} /// Create a KSP linear solver that uses the options specific to this PetscLib - KSP createKSPWithOptions(MPI_Comm comm); + KSP createKSPWithOptions(const MPI_Comm& comm); /*! * Force cleanup. This will call PetscFinalize, printing a warning diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 11fead3f2e..5e5248b9fe 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -55,7 +55,7 @@ PetscLib::~PetscLib() { } } -KSP PetscLib::createKSPWithOptions(MPI_Comm comm) { +KSP PetscLib::createKSPWithOptions(const MPI_Comm& comm) { KSP ksp; auto ierr = KSPCreate(comm, &ksp); From 1683fb32daad5a2e1e1e03abb844c8613b715658 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 28 Sep 2019 23:07:47 +0100 Subject: [PATCH 363/428] Remove KSPSetOptionsPrefix(ksp, "invertable_") in InvertableOperator Setting the prefix conflicts with setting Petsc options in the input file. --- include/bout/invertable_operator.hxx | 3 --- 1 file changed, 3 deletions(-) diff --git a/include/bout/invertable_operator.hxx b/include/bout/invertable_operator.hxx index 39756b1cb7..5380afffa9 100644 --- a/include/bout/invertable_operator.hxx +++ b/include/bout/invertable_operator.hxx @@ -361,9 +361,6 @@ public: ierr = KSPSetInitialGuessNonzero(ksp, PETSC_TRUE); CHKERRQ(ierr); - /// Allow options to be set on command line using a --invertable_ksp_* prefix. - ierr = KSPSetOptionsPrefix(ksp, "invertable_"); - CHKERRQ(ierr); ierr = KSPSetFromOptions(ksp); CHKERRQ(ierr); From f802aafb267089bbcfd5f28d28617025dc7efc84 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Sat, 28 Sep 2019 23:42:00 +0100 Subject: [PATCH 364/428] PetscLib method for setting options instead of one for creating KSP Change the PetscLib method that sets the options prefix from createKSPWithOptions (that also calls KSPCreate()) to setOptionsFromInputFile (that also calls KSPSetFromOptions()). This gives more logical naming, and more logical grouping of functionality. --- include/bout/invertable_operator.hxx | 7 +++++-- include/bout/petsclib.hxx | 9 ++++++--- src/invert/laplace/impls/petsc/petsc_laplace.cxx | 4 ++-- src/invert/laplacexy/laplacexy.cxx | 4 ++-- .../laplacexz/impls/petsc/laplacexz-petsc.cxx | 4 ++-- src/sys/petsclib.cxx | 14 +++++--------- 6 files changed, 22 insertions(+), 20 deletions(-) diff --git a/include/bout/invertable_operator.hxx b/include/bout/invertable_operator.hxx index 5380afffa9..099591b2a4 100644 --- a/include/bout/invertable_operator.hxx +++ b/include/bout/invertable_operator.hxx @@ -339,7 +339,8 @@ public: CHKERRQ(ierr); /// Now create and setup the linear solver with the matrix - ksp = lib.createKSPWithOptions(BoutComm::get()); + ierr = KSPCreate(BoutComm::get(), &ksp); + CHKERRQ(ierr); #if PETSC_VERSION_LT(3, 5, 0) /// Need to provide a MatStructure flag in versions <3.5. This details if we expect @@ -361,7 +362,9 @@ public: ierr = KSPSetInitialGuessNonzero(ksp, PETSC_TRUE); CHKERRQ(ierr); - ierr = KSPSetFromOptions(ksp); + ierr = KSPSetOptionsPrefix(ksp, "invertable_"); + CHKERRQ(ierr); + lib.setOptionsFromInputFile(ksp); CHKERRQ(ierr); /// Do required setup so solve can proceed in invert diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index 0b200577f8..9614677413 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -78,8 +78,11 @@ public: */ static void setArgs(int &c, char** &v) { pargc = &c; pargv = &v;} - /// Create a KSP linear solver that uses the options specific to this PetscLib - KSP createKSPWithOptions(const MPI_Comm& comm); + /// Set options for a KSP linear solver that uses the options specific to this PetscLib, + /// by setting an options prefix for the KSP, and adding that prefix to all the options + /// set in the [petsc] section, or [petsc] subsection of the options, if non-null 'opt' + /// was passed to the constructor. + void setOptionsFromInputFile(KSP& ksp); /*! * Force cleanup. This will call PetscFinalize, printing a warning @@ -129,7 +132,7 @@ public: static void setArgs(int &UNUSED(c), char** &UNUSED(v)) {} - KSP createKSPWithOptions(MPI_Comm& comm) {} + KSP setOptionsFromInputFile(KSP& ksp) {} static void cleanup() {} }; diff --git a/src/invert/laplace/impls/petsc/petsc_laplace.cxx b/src/invert/laplace/impls/petsc/petsc_laplace.cxx index a20bf2f968..64251e3a6f 100644 --- a/src/invert/laplace/impls/petsc/petsc_laplace.cxx +++ b/src/invert/laplace/impls/petsc/petsc_laplace.cxx @@ -274,7 +274,7 @@ LaplacePetsc::LaplacePetsc(Options *opt, const CELL_LOC loc, Mesh *mesh_in) : MatSetUp(MatA); // Declare KSP Context (abstract PETSc object that manages all Krylov methods) - ksp = lib.createKSPWithOptions(comm); + KSPCreate(comm, &ksp); // Get KSP Solver Type (Generalizes Minimal RESidual is the default) ksptype = (*opts)["ksptype"].doc("KSP solver type").withDefault(KSP_GMRES); @@ -783,7 +783,7 @@ FieldPerp LaplacePetsc::solve(const FieldPerp& b, const FieldPerp& x0) { //ierr = KSPSetPCSide(ksp, PC_RIGHT);CHKERRQ(ierr); } - KSPSetFromOptions( ksp ); + lib.setOptionsFromInputFile(ksp); } } diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index 7ade2bdb8d..b9acbc1117 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -493,7 +493,7 @@ LaplaceXY::LaplaceXY(Mesh* m, Options* opt, const CELL_LOC loc) // Set up KSP // Declare KSP Context - ksp = lib.createKSPWithOptions(comm); + KSPCreate(comm, &ksp); // Configure Linear Solver @@ -550,7 +550,7 @@ LaplaceXY::LaplaceXY(Mesh* m, Options* opt, const CELL_LOC loc) } } - KSPSetFromOptions( ksp ); + lib.setOptionsFromInputFile(ksp); /////////////////////////////////////////////////// // Including Y derivatives? diff --git a/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx b/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx index 4e53a4d9c7..3967676f4e 100644 --- a/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx +++ b/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx @@ -228,7 +228,7 @@ LaplaceXZpetsc::LaplaceXZpetsc(Mesh *m, Options *opt, const CELL_LOC loc) ////////////////////////////////////////////////// // Declare KSP Context - data.ksp = lib.createKSPWithOptions(comm); + KSPCreate(comm, &data.ksp); // Set KSP type KSPSetType( data.ksp, ksptype.c_str() ); @@ -245,7 +245,7 @@ LaplaceXZpetsc::LaplaceXZpetsc(Mesh *m, Options *opt, const CELL_LOC loc) PCFactorSetMatSolverPackage(pc,factor_package.c_str()); #endif - KSPSetFromOptions( data.ksp ); + lib.setOptionsFromInputFile(data.ksp); /// Add to slice vector slice.push_back(data); diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 5e5248b9fe..a5abbe9450 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -55,20 +55,16 @@ PetscLib::~PetscLib() { } } -KSP PetscLib::createKSPWithOptions(const MPI_Comm& comm) { - KSP ksp; - - auto ierr = KSPCreate(comm, &ksp); +void PetscLib::setOptionsFromInputFile(KSP& ksp) { + auto ierr = KSPSetOptionsPrefix(ksp, options_prefix.c_str()); if (ierr) { - throw BoutException("KSPCreate failed with error %i", ierr); + throw BoutException("KSPSetOptionsPrefix failed with error %i", ierr); } - ierr = KSPSetOptionsPrefix(ksp, options_prefix.c_str()); + ierr = KSPSetFromOptions(ksp); if (ierr) { - throw BoutException("KSPSetOptionsPrefix failed with error %i", ierr); + throw BoutException("KSPSetFromOptions failed with error %i", ierr); } - - return ksp; } void PetscLib::cleanup() { From 9df655e935892885e17cc2fba554510bd3c2d4a1 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 1 Oct 2019 14:35:46 +0100 Subject: [PATCH 365/428] Tidy up null-value option passing in PetscLib --- manual/sphinx/user_docs/petsc.rst | 13 ++++++++----- src/sys/petsclib.cxx | 30 ++++++++++++++---------------- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/manual/sphinx/user_docs/petsc.rst b/manual/sphinx/user_docs/petsc.rst index 63118d8720..a338a75adb 100644 --- a/manual/sphinx/user_docs/petsc.rst +++ b/manual/sphinx/user_docs/petsc.rst @@ -15,17 +15,20 @@ passed on the command line [*]_, will be ignored for that solver if the subsecti is created. Any options that can be passed on the command line to PETSc can be set, with no preceding -hyphen. Any PETSc options that are passed as flags with no value set should be given the -value ``true`` so BOUT++ knows to read them. So for example, if the command line options -would be:: +hyphen. Flags passed with no value can be passed as options with no value. Internally, +BOUT++ sets options passed with no value to ``"true"``, and ``PetscLib`` passes a null +pointer signifying no value when the option value is ``"true"``. As a workaround, to pass +the string ``"true"`` to the PETSc option, set the option to ``petsc_true``. So for +example, if the command line options would be:: - -ksp_monitor -ksp_type gmres + -ksp_monitor -ksp_type gmres -some_boolean_option true in the input file you would put:: [petsc] - ksp_monitor = true + ksp_monitor ksp_type = gmres + some_boolean_option = petsc_true .. [*] The object-specific options are passed to PETSc by creating an object-specific diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index a5abbe9450..1cbffa95c6 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -86,29 +86,27 @@ void PetscLib::setPetscOptions(Options& options, const std::string& prefix) { "values are allowed in the PETSc options, not subsections", i.first.c_str(), options.str().c_str()); } + // Note, option names in the input file don't start with "-", but need to be passed // to PETSc with "-" prepended - PetscErrorCode ierr; auto petsc_option_name = "-"+prefix+i.first; - if (lowercase(i.second) == "true") { - // PETSc flag with no value -#if PETSC_VERSION_GE(3, 7, 0) - ierr = PetscOptionsSetValue(nullptr, petsc_option_name.c_str(), nullptr); -#else -// no PetscOptions as first argument - ierr = PetscOptionsSetValue(petsc_option_name.c_str(), nullptr); -#endif - } else { - // Option with actual value to pass + const char* value = + // "true" is the value given to an option with no value, when read from BOUT.inp + lowercase(i.second) == "true" ? nullptr : + // workaround to allow passing "true" to the petsc option + lowercase(i.second) == "petsc_true" ? "true" : + // allow this for symmetry with "petsc_true" + lowercase(i.second) == "petsc_false" ? "false" : + // default case: pass the value of the option + i.second.as().c_str(); + + PetscErrorCode ierr; #if PETSC_VERSION_GE(3, 7, 0) - ierr = PetscOptionsSetValue(nullptr, petsc_option_name.c_str(), - i.second.as().c_str()); + ierr = PetscOptionsSetValue(nullptr, petsc_option_name.c_str(), value); #else // no PetscOptions as first argument - ierr = PetscOptionsSetValue(petsc_option_name.c_str(), - i.second.as().c_str()); + ierr = PetscOptionsSetValue(petsc_option_name.c_str(), value); #endif - } if (ierr) { throw BoutException("PetscOptionsSetValue returned error code %i when setting %s", ierr, petsc_option_name.c_str()); From b1e2c19b95e3d22e98070063847912c224fdfda6 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 1 Oct 2019 15:26:25 +0100 Subject: [PATCH 366/428] Remove unneeded work-around for passing "true" to PETSc options --- manual/sphinx/user_docs/petsc.rst | 10 +++------- src/sys/petsclib.cxx | 13 +++++-------- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/manual/sphinx/user_docs/petsc.rst b/manual/sphinx/user_docs/petsc.rst index a338a75adb..02c9865255 100644 --- a/manual/sphinx/user_docs/petsc.rst +++ b/manual/sphinx/user_docs/petsc.rst @@ -15,20 +15,16 @@ passed on the command line [*]_, will be ignored for that solver if the subsecti is created. Any options that can be passed on the command line to PETSc can be set, with no preceding -hyphen. Flags passed with no value can be passed as options with no value. Internally, -BOUT++ sets options passed with no value to ``"true"``, and ``PetscLib`` passes a null -pointer signifying no value when the option value is ``"true"``. As a workaround, to pass -the string ``"true"`` to the PETSc option, set the option to ``petsc_true``. So for -example, if the command line options would be:: +hyphen. Flags passed with no value can be passed as options with no value. So +for example, if the command line options would be:: - -ksp_monitor -ksp_type gmres -some_boolean_option true + -ksp_monitor -ksp_type gmres in the input file you would put:: [petsc] ksp_monitor ksp_type = gmres - some_boolean_option = petsc_true .. [*] The object-specific options are passed to PETSc by creating an object-specific diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 1cbffa95c6..b86c424d40 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -90,15 +90,12 @@ void PetscLib::setPetscOptions(Options& options, const std::string& prefix) { // Note, option names in the input file don't start with "-", but need to be passed // to PETSc with "-" prepended auto petsc_option_name = "-"+prefix+i.first; + + // "true" is the value given to an option with no value, when read from BOUT.inp. Also + // when nullptr is passed to PetscOptionsSetValue for a boolean option, it defaults to + // true so we should always be OK passing nullptr for null or "true". const char* value = - // "true" is the value given to an option with no value, when read from BOUT.inp - lowercase(i.second) == "true" ? nullptr : - // workaround to allow passing "true" to the petsc option - lowercase(i.second) == "petsc_true" ? "true" : - // allow this for symmetry with "petsc_true" - lowercase(i.second) == "petsc_false" ? "false" : - // default case: pass the value of the option - i.second.as().c_str(); + lowercase(i.second) == "true" ? nullptr : i.second.as().c_str(); PetscErrorCode ierr; #if PETSC_VERSION_GE(3, 7, 0) From 1fd0c2c88281ae67aa1e16e7df7a51873b125e8c Mon Sep 17 00:00:00 2001 From: johnomotani Date: Wed, 20 May 2020 21:35:08 +0100 Subject: [PATCH 367/428] Tidy up PetscLib (suggestions from code review) Co-authored-by: Peter Hill --- include/bout/petsclib.hxx | 6 +++--- src/sys/petsclib.cxx | 6 ++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index 9614677413..3de619be98 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -63,7 +63,7 @@ public: /*! * Ensure that PETSc has been initialised */ - PetscLib(Options* opt = nullptr); + explicit PetscLib(Options* opt = nullptr); /*! * Calls PetscFinalize when all PetscLib instances are destroyed @@ -127,12 +127,12 @@ class KSP; class PetscLib { public: - PetscLib(Options* UNUSED(opt) = nullptr) {} + explicit PetscLib(Options* UNUSED(opt) = nullptr) {} ~PetscLib() {} static void setArgs(int &UNUSED(c), char** &UNUSED(v)) {} - KSP setOptionsFromInputFile(KSP& ksp) {} + void setOptionsFromInputFile(KSP& ksp) {} static void cleanup() {} }; diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index b86c424d40..e1236e9201 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -97,12 +97,11 @@ void PetscLib::setPetscOptions(Options& options, const std::string& prefix) { const char* value = lowercase(i.second) == "true" ? nullptr : i.second.as().c_str(); - PetscErrorCode ierr; #if PETSC_VERSION_GE(3, 7, 0) - ierr = PetscOptionsSetValue(nullptr, petsc_option_name.c_str(), value); + const auto ierr = PetscOptionsSetValue(nullptr, petsc_option_name.c_str(), value); #else // no PetscOptions as first argument - ierr = PetscOptionsSetValue(petsc_option_name.c_str(), value); + const auto ierr = PetscOptionsSetValue(petsc_option_name.c_str(), value); #endif if (ierr) { throw BoutException("PetscOptionsSetValue returned error code %i when setting %s", @@ -111,4 +110,3 @@ void PetscLib::setPetscOptions(Options& options, const std::string& prefix) { } } #endif // BOUT_HAS_PETSC - From df6cb0ba00f8098dd8942d6c086ca4bee4813bfd Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 20 May 2020 21:58:32 +0100 Subject: [PATCH 368/428] Remove PetscLib::unique_id, use options section name instead Might happen that options section is used by more than one solver, but then the same options will just be (re-)set more than once, which should not be an error, so options section name is 'unique enough'. --- include/bout/petsclib.hxx | 1 - src/sys/petsclib.cxx | 15 +++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index 3de619be98..3452710cfb 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -91,7 +91,6 @@ public: static void cleanup(); private: static int count; ///< How many instances? - static int unique_id; ///< Unique identifier for each created instance static char help[]; ///< Help string // Command-line arguments diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index e1236e9201..3cda4eedb1 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -9,21 +9,20 @@ // Define all the static member variables int PetscLib::count = 0; -int PetscLib::unique_id = 0; char PetscLib::help[] = "BOUT++: Uses finite difference methods to solve plasma fluid problems in curvilinear coordinates"; int *PetscLib::pargc = nullptr; char ***PetscLib::pargv = nullptr; PetscLogEvent PetscLib::USER_EVENT = 0; PetscLib::PetscLib(Options* opt) { - if(count == 0) { + if (count == 0) { // Initialise PETSc - + output << "Initialising PETSc\n"; PETSC_COMM_WORLD = BoutComm::getInstance()->getComm(); - PetscInitialize(pargc,pargv,PETSC_NULL,help); - PetscLogEventRegister("Total BOUT++",0,&USER_EVENT); - PetscLogEventBegin(USER_EVENT,0,0,0,0); + PetscInitialize(pargc, pargv, PETSC_NULL, help); + PetscLogEventRegister("Total BOUT++", 0, &USER_EVENT); + PetscLogEventBegin(USER_EVENT, 0, 0, 0, 0); // Load global PETSc options from the [petsc] section of the input setPetscOptions(Options::root()["petsc"], ""); @@ -37,12 +36,12 @@ PetscLib::PetscLib(Options* opt) { // options, and cannot be passed to KSP, etc. Non-global options can be passed by // defining a custom prefix for the options string, and then passing that to the KSP.) - options_prefix = "boutpetsclib" + std::to_string(unique_id) + "_"; + options_prefix = "boutpetsclib_" + opt->str() + "_"; setPetscOptions(*opt, options_prefix); } + count++; - unique_id++; } PetscLib::~PetscLib() { From 1d4369a0d9e7ba840d6167766c0558db8a888b75 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Wed, 20 May 2020 22:05:35 +0100 Subject: [PATCH 369/428] Get "petsc" options section inside PetscLib Simplifies calls to PetscLib constructor in the library. --- include/bout/invertable_operator.hxx | 2 +- src/invert/laplace/impls/petsc/petsc_laplace.cxx | 2 +- src/invert/laplacexy/laplacexy.cxx | 2 +- src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx | 2 +- src/sys/petsclib.cxx | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/bout/invertable_operator.hxx b/include/bout/invertable_operator.hxx index 099591b2a4..83127e27a3 100644 --- a/include/bout/invertable_operator.hxx +++ b/include/bout/invertable_operator.hxx @@ -135,7 +135,7 @@ public: opt(optIn == nullptr ? Options::getRoot()->getSection("invertableOperator") : optIn), localmesh(localmeshIn == nullptr ? bout::globals::mesh : localmeshIn), - lib(&(*opt)["petsc"]) { + lib(opt) { AUTO_TRACE(); }; diff --git a/src/invert/laplace/impls/petsc/petsc_laplace.cxx b/src/invert/laplace/impls/petsc/petsc_laplace.cxx index 64251e3a6f..d24b135741 100644 --- a/src/invert/laplace/impls/petsc/petsc_laplace.cxx +++ b/src/invert/laplace/impls/petsc/petsc_laplace.cxx @@ -62,7 +62,7 @@ LaplacePetsc::LaplacePetsc(Options *opt, const CELL_LOC loc, Mesh *mesh_in) : Laplacian(opt, loc, mesh_in), A(0.0), C1(1.0), C2(1.0), D(1.0), Ex(0.0), Ez(0.0), issetD(false), issetC(false), issetE(false), - lib(opt==nullptr ? &(Options::root()["laplace"]["petsc"]) : &(*opt)["petsc"]) + lib(opt==nullptr ? &(Options::root()["laplace"]) : opt) { A.setLocation(location); C1.setLocation(location); diff --git a/src/invert/laplacexy/laplacexy.cxx b/src/invert/laplacexy/laplacexy.cxx index b9acbc1117..4c4276c1c0 100644 --- a/src/invert/laplacexy/laplacexy.cxx +++ b/src/invert/laplacexy/laplacexy.cxx @@ -32,7 +32,7 @@ static PetscErrorCode laplacePCapply(PC pc,Vec x,Vec y) { int LaplaceXY::instance_count = 0; LaplaceXY::LaplaceXY(Mesh* m, Options* opt, const CELL_LOC loc) - : lib(opt == nullptr ? &(Options::root()["laplacexy"]["petsc"]) : &(*opt)["petsc"]), + : lib(opt == nullptr ? &(Options::root()["laplacexy"]) : opt), localmesh(m == nullptr ? bout::globals::mesh : m), location(loc), monitor(*this) { Timer timer("invert"); diff --git a/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx b/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx index 3967676f4e..4900ab4698 100644 --- a/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx +++ b/src/invert/laplacexz/impls/petsc/laplacexz-petsc.cxx @@ -20,7 +20,7 @@ LaplaceXZpetsc::LaplaceXZpetsc(Mesh *m, Options *opt, const CELL_LOC loc) : LaplaceXZ(m, opt, loc), - lib(opt==nullptr ? &(Options::root()["laplacexz"]["petsc"]) : &(*opt)["petsc"]), + lib(opt==nullptr ? &(Options::root()["laplacexz"]) : opt), coefs_set(false) { /* Constructor: LaplaceXZpetsc diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 3cda4eedb1..4deaba65c9 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -38,7 +38,7 @@ PetscLib::PetscLib(Options* opt) { options_prefix = "boutpetsclib_" + opt->str() + "_"; - setPetscOptions(*opt, options_prefix); + setPetscOptions((*opt)["petsc"], options_prefix); } count++; From 720be854dd5a8b5a8a78f274893b4cdab5569fbe Mon Sep 17 00:00:00 2001 From: johnomotani Date: Fri, 22 May 2020 14:20:24 +0100 Subject: [PATCH 370/428] Fix unused parameter warning --- include/bout/petsclib.hxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index 3452710cfb..8262f6cfeb 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -131,7 +131,7 @@ public: static void setArgs(int &UNUSED(c), char** &UNUSED(v)) {} - void setOptionsFromInputFile(KSP& ksp) {} + void setOptionsFromInputFile(KSP& UNUSED(ksp)) {} static void cleanup() {} }; From 341666276a0f4045450fd3a5aef07fc1a3d09131 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 22 May 2020 16:37:35 +0200 Subject: [PATCH 371/428] Remove underscore from PetscLib prefix Petsc sometimes prints the prefix to identify the solver (e.g. when using -ksp_monitor). This looks nicer without a trailing underscore. --- src/sys/petsclib.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 4deaba65c9..47359e067a 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -36,7 +36,7 @@ PetscLib::PetscLib(Options* opt) { // options, and cannot be passed to KSP, etc. Non-global options can be passed by // defining a custom prefix for the options string, and then passing that to the KSP.) - options_prefix = "boutpetsclib_" + opt->str() + "_"; + options_prefix = "boutpetsclib_" + opt->str(); setPetscOptions((*opt)["petsc"], options_prefix); } From 16df85c56ec8715f59034b4d9588a0ea21e51f4f Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 22 May 2020 17:43:57 +0200 Subject: [PATCH 372/428] Avoid double-read of petsc options --- src/sys/petsclib.cxx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 47359e067a..0971ee49b6 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -90,11 +90,12 @@ void PetscLib::setPetscOptions(Options& options, const std::string& prefix) { // to PETSc with "-" prepended auto petsc_option_name = "-"+prefix+i.first; + auto str_value = i.second.as(); // "true" is the value given to an option with no value, when read from BOUT.inp. Also // when nullptr is passed to PetscOptionsSetValue for a boolean option, it defaults to // true so we should always be OK passing nullptr for null or "true". const char* value = - lowercase(i.second) == "true" ? nullptr : i.second.as().c_str(); + str_value == "true" ? nullptr : str_value.c_str(); #if PETSC_VERSION_GE(3, 7, 0) const auto ierr = PetscOptionsSetValue(nullptr, petsc_option_name.c_str(), value); From 607fbf41aabb7938866bc6a8859a2408096bcd51 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Fri, 22 May 2020 17:55:51 +0200 Subject: [PATCH 373/428] Update manual for PETSc options --- manual/sphinx/user_docs/petsc.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/manual/sphinx/user_docs/petsc.rst b/manual/sphinx/user_docs/petsc.rst index 02c9865255..1cb05758d3 100644 --- a/manual/sphinx/user_docs/petsc.rst +++ b/manual/sphinx/user_docs/petsc.rst @@ -12,7 +12,8 @@ particular PETSc-based solver, the options can be set in a ``petsc`` subsection solver's options, e.g. for a LaplaceXY solver (using the default options section) use the ``[laplacexy:petsc]`` section. Note that the global options, including any passed on the command line [*]_, will be ignored for that solver if the subsection -is created. +is created. To set options from the command line, it is recommended to use the BOUT++ +options system rather than PETSc's, e.g. ``./mymodel laplacexy:petsc:type=gmres``. Any options that can be passed on the command line to PETSc can be set, with no preceding hyphen. Flags passed with no value can be passed as options with no value. So @@ -20,20 +21,19 @@ for example, if the command line options would be:: -ksp_monitor -ksp_type gmres -in the input file you would put:: +to set for the LaplaceXY solver, in the input file you would put:: - [petsc] + [laplacexy:petsc] ksp_monitor ksp_type = gmres .. [*] The object-specific options are passed to PETSc by creating an object-specific - prefix ``boutpetsclib#_``, where ``#`` is replaced with an integer counter, - counting the number of PetscLib instances. So an option could in principle be - passed to a particular solver if you work out what the counter is for that solver, - e.g.:: + prefix ``boutpetsclib_``, where ```` is the name of the + options section used to create the PetscLib. So an option could in principle be + passed to a particular solver if you use the section name, e.g.:: - -boutpetsclib1_ksp_type gmres + -boutpetsclib_laplacexyksp_type gmres The PETSc arguments ``-options_view`` and ``options_left`` might be helpful for this - they will show what options have been set, so will show the prefixes used. From 2733d2fed9b3810b59f080b89abe6132acc1c133 Mon Sep 17 00:00:00 2001 From: John Omotani Date: Tue, 26 May 2020 10:31:36 +0100 Subject: [PATCH 374/428] Move forward-declare of class Options outside "#if BOUT_HAS_PETSC" The forward-declare is also needed in the #else branch. --- include/bout/petsclib.hxx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/bout/petsclib.hxx b/include/bout/petsclib.hxx index 8262f6cfeb..6b1c26b873 100644 --- a/include/bout/petsclib.hxx +++ b/include/bout/petsclib.hxx @@ -45,10 +45,10 @@ class PetscLib; #ifndef __PETSCLIB_H__ #define __PETSCLIB_H__ -#ifdef BOUT_HAS_PETSC - class Options; +#ifdef BOUT_HAS_PETSC + #include #include From 075e9fd486331b6d1063f2c17f6d0ad58bfad7e2 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Fri, 22 Nov 2019 13:41:29 +0000 Subject: [PATCH 375/428] Adding an adaptive, arbitrary order, Adams-Bashforth solver Currently performs similarly to other adaptive explicit solvers such as rkgeneric. Has the advantage that in certain cases we need the minimal number of rhs calls per step possible. Could be modified to add adaptive order as well as adaptive timestep. This may help improve performance in cases where solver stability can be limiting. --- .../impls/adams_bashforth/adams_bashforth.cxx | 390 ++++++++++++++++++ .../impls/adams_bashforth/adams_bashforth.hxx | 375 +++++++++++++++++ src/solver/impls/adams_bashforth/makefile | 8 + src/solver/impls/makefile | 3 +- src/solver/solverfactory.cxx | 1 + 5 files changed, 776 insertions(+), 1 deletion(-) create mode 100644 src/solver/impls/adams_bashforth/adams_bashforth.cxx create mode 100644 src/solver/impls/adams_bashforth/adams_bashforth.hxx create mode 100644 src/solver/impls/adams_bashforth/makefile diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx new file mode 100644 index 0000000000..b6c33f4d0e --- /dev/null +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -0,0 +1,390 @@ +#include "adams_bashforth.hxx" + +#include +#include +#include +#include + +#include + +AdamsBashforthSolver::AdamsBashforthSolver(Options* options) : Solver(options) { + AUTO_TRACE(); + canReset = true; +} + +void AdamsBashforthSolver::setMaxTimestep(BoutReal dt) { + AUTO_TRACE(); + if (dt > timestep) + return; // Already less than this + + if (adaptive) // Should we throw if we're not adaptive as we've tried to set a timestep + // limit but couldn't? + timestep = dt; // Won't be used this time, but next +} + +int AdamsBashforthSolver::init(int nout, BoutReal tstep) { + + TRACE("Initialising AdamsBashforth solver"); + + /// Call the generic initialisation first + if (Solver::init(nout, tstep)) + return 1; + + output << "\n\tAdams-Bashforth (explicit) multistep solver\n"; + + nsteps = nout; // Save number of output steps + out_timestep = tstep; + max_dt = tstep; + + // Calculate number of variables + nlocal = getLocalN(); + + // Get total problem size + int ntmp; + if (MPI_Allreduce(&nlocal, &ntmp, 1, MPI_INT, MPI_SUM, BoutComm::get())) { + throw BoutException("MPI_Allreduce failed!"); + } + neq = ntmp; + + output.write("\t3d fields = %d, 2d fields = %d neq=%d, local_N=%d\n", n3Dvars(), + n2Dvars(), neq, nlocal); + + // Get options + atol = (*options)["atol"] + .doc("Absolute tolerance") + .withDefault(1.e-5); // Not used, just here for parity + rtol = (*options)["rtol"].doc("Relative tolerance").withDefault(1.e-5); + dtFac = (*options)["dtFac"] + .doc("Factor by which we scale timestep estimate when adapating") + .withDefault(0.75); + max_timestep = (*options)["max_timestep"].doc("Maximum timestep").withDefault(tstep); + timestep = (*options)["timestep"].doc("Starting timestep").withDefault(max_timestep); + mxstep = (*options)["mxstep"] + .doc("Maximum number of steps taken between outputs") + .withDefault(50000); + adaptive = + (*options)["adaptive"].doc("Adapt internal timestep using rtol.").withDefault(true); + maximum_order = + (*options)["order"].doc("The requested maximum order of the scheme").withDefault(5); + followHighOrder = + (*options)["followHighOrder"] + .doc("If true and adaptive then use the more accurate solution as result.") + .withDefault(true); + + // Check if the requested timestep in the non-adaptive case would lead to us + // effectively violating the MXSTEP specified. + if (not adaptive and (out_timestep / timestep > mxstep)) { + throw BoutException("ERROR: Requested timestep would lead to MXSTEP being exceeded. " + "timestep = %e, MXSTEP=%i\n", + timestep, mxstep); + } + + // Put starting values into states + state.reallocate(nlocal); + nextState.reallocate(nlocal); + std::fill(std::begin(nextState), std::end(nextState), 0.0); + save_vars(std::begin(state)); + + // Set the starting order + current_order = 1; + + return 0; +} + +void AdamsBashforthSolver::resetInternalFields() { + AUTO_TRACE(); + + // History and times + history.clear(); + times.clear(); + + // Order + current_order = 1; + + // States + std::fill(std::begin(nextState), std::end(nextState), 0.0); + save_vars(std::begin(state)); +} + +int AdamsBashforthSolver::run() { + AUTO_TRACE(); + + // Just for developer diagnostics + int nwasted = 0; + int nwasted_following_fail = 0; + + for (int s = 0; s < nsteps; s++) { + BoutReal target = simtime + out_timestep; + + bool running = true; + int internal_steps = 0; + + // Take a single output time step + while (running) { + // Here's the derivative calculation at the current time + // Find d state/dt and store in history -- this doesn't + // need repeating whilst adapting timestep + run_rhs(simtime); + history.emplace_front(nlocal); + save_derivs(std::begin(history[0])); + times.emplace_front(simtime); + + // Just for developer diagnostics - set to true when the previous + // attempt at a time step failed. + bool previous_fail = false; + + BoutReal dt; + + // Take a single internal time step + while (true) { + // Limit the timestep to the specified maximum + timestep = std::min(timestep, max_timestep); + + // We actually use dt to reflect the timestep actually used for an advance + // as we may modify it. + dt = timestep; + + // If running is true then we haven't yet finished this output step + running = true; + + // Check if we're going to reach our target time and adjust + // the timestep to ensure we don't go past it. Note this means + // that even when non-adaptive we may end up changing the + // timestep occassionally. This is ok here as the timestep + // code is completely general, but could potentially be an + // issue for other solvers. + if ((simtime + dt) >= target) { + dt = target - simtime; + running = false; + } + + // Take a step and get the error if adaptive + const BoutReal err = take_step(simtime, dt, current_order, state, nextState); + + // Calculate and check error if adaptive + if (adaptive) { + // Really the following should apply to both adaptive and non-adaptive + // approaches, but the non-adaptive can be determined without needing + // to do any solves so we check during init instead. + internal_steps++; + if (internal_steps > mxstep) + throw BoutException("ERROR: MXSTEP exceeded. timestep = %e, err=%e\n", + timestep, err); + + // Estimate the limiting timestep and update. This is + // really the estimate of the timestep for this current step + // that would just satisfy the tolerance. In cases where we + // move to the next step we actually end up using this new + // timestep for the next step. + const BoutReal dt_lim = dt * exp(-log(err / rtol) / current_order); + + if (err < rtol) { + // Try to limit increases in the timestep to no more than 5%. + timestep = std::min(timestep * 1.05, dt_lim * 0.75); + previous_fail = false; + break; + } else { + // Be more conservative if we've failed; + timestep = 0.75 * dt_lim; + if (previous_fail) { + nwasted_following_fail++; + } + previous_fail = true; + nwasted++; + } + } + } + + // Taken an internal step, update times + simtime += dt; + + if (current_order == maximum_order) { + // Ditch last history point + times.pop_back(); + history.pop_back(); + } else { + // Here we unconditionally increase the order if we've yet to + // reach the maximum. In general it is probably better to + // consider an adaptive order scheme to try to maximise the + // timestep we can take. This is something to explore in the + // future. + current_order++; + } + + // Call the per internal timestep monitors + call_timestep_monitors(simtime, dt); + + // Put the new state into state. + swap(state, nextState); + }; + + // Put result into variables + load_vars(std::begin(state)); + + // Ensure aux. variables are up to date. In the future it would be nice to + // provide a calc_aux(simtime) method on PhysicsModel (that could default to + // calling rhs) which ensures the aux. variables are up to date in order to + // avoid any additional unrequired work associated with run_rhs. + run_rhs(simtime); + + // Advance iteration number + iteration++; + + // Call the output step monitor function + if (call_monitors(simtime, s, nsteps)) + break; // Stop simulation + } + +#if CHECK > 4 + output << "\nNumber of wasted steps = " << nwasted << " and following a fail " + << nwasted_following_fail << "\n\n" + << endl; +#endif + return 0; +} + +// Updates the internal state (?) along with an error estimate? +// Should probably just try taking a step of given size with given +// order, leaving the error calculation for calling code +BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal dt, + const int order, Array& current, + Array& result) { + AUTO_TRACE(); + + // The initial error is 0.0 + BoutReal err = 0.0; + + // Calculate the coefficients for a single step of size dt + const auto coefs = + coefficients_calculator.get_adams_bashforth_coefficients(timeIn + dt, times, order); + + // Create some storage for the update to the state (i.e. state(timeIn + dt) = current + + // full_update). + Array full_update(nlocal); + + // Note we split the work here into initialisation with std::fill + // and a separate double loop to calculate the update. This is + // to ensure we can operate on the contiguous arrays in history + // in order. + std::fill(std::begin(full_update), std::end(full_update), 0.0); + + for (int j = 0; j < order; j++) { + const BoutReal factor = coefs[j]; + + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + full_update[i] += history[j][i] * factor; + } + } + + // Calculate the new state given the history and current state. + // Could possibly skip the following calculation if adaptive and following the high + // order method. + // Possible to write this using algorithms, but until c++ 17 probably prefer the + // explicit loop as + // clearer, compatible with OMP and empirically slightly faster. + // std::transform(std::begin(current), std::end(current), std::begin(full_update), + // std::begin(result), std::plus{}); + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + result[i] = current[i] + full_update[i]; + }; + + if (adaptive) { + + // Create some storage for the small step update and corresponding resulting state + Array result2(nlocal); + Array half_update(nlocal); + + // Use this variable to say how big the first small timestep should be as a fraction + // of the large timestep, dt. Here fixed to 0.5 to take two equally sized half steps + // but left here to enable developer experimentation. + constexpr BoutReal firstPart = 0.5; + + // ------------------------------------------- + // Take a small time step - note we don't need to call the rhs again just yet + // ------------------------------------------- + + // Calculate the coefficients to get to timeIn + dt * firstPart + const auto coefsFirstStep = coefficients_calculator.get_adams_bashforth_coefficients( + timeIn + dt * firstPart, times, order); + + // Initialise the update array to 0. + std::fill(std::begin(half_update), std::end(half_update), 0.0); + + for (int j = 0; j < order; j++) { + const BoutReal factor = coefsFirstStep[j]; + + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + half_update[i] += history[j][i] * factor; + } + } + + // Now we have to calculate the state after the first small step as we will need to + // use this to calculate the derivatives at this point. + // std::transform(std::begin(current), std::end(current), std::begin(half_update), + // std::begin(result2), std::plus{}); + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + result2[i] = current[i] + half_update[i]; + }; + + // ------------------------------------------- + // Now do the second small timestep -- note we need to call rhs again + // ------------------------------------------- + + // Add storage to history and the current time to times + history.emplace_front(nlocal); + times.emplace_front(timeIn + dt * firstPart); + + // Put intermediate result into variables, call rhs and save the derivatives + load_vars(std::begin(result2)); + run_rhs(timeIn + + firstPart + * dt); // This is typically the most expensive part of this routine + save_derivs(std::begin(history[0])); + + // Calculate the coefficients to get to timeIn + dt + const auto coefsSecondStep = coefficients_calculator.get_adams_bashforth_coefficients( + timeIn + dt, times, order); + + for (int j = 0; j < order; j++) { + const BoutReal factor = coefsSecondStep[j]; + + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + half_update[i] += history[j][i] * factor; + } + } + + // Restore fields to the original state + load_vars(std::begin(current)); + + // Drop the temporary history information + history.pop_front(); + times.pop_front(); + + // Note here we don't add a small change onto result2, we recalculate using the + // "full" two half step half_update + // std::transform(std::begin(current), std::end(current), std::begin(half_update), + // std::begin(result2), std::plus{}); + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + result2[i] = current[i] + half_update[i]; + }; + + // Here we calculate the error by comparing the updates rather than output states + // this is to avoid issues where we have large fields but small derivatives (i.e. to + // avoid possible numerical issues at looking at the difference between two large + // numbers). + err = get_error(full_update, half_update); + + // Swap the result to use the more accurate value if requested + if (followHighOrder) { + swap(result, result2); + } + } + + return err; +} diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.hxx b/src/solver/impls/adams_bashforth/adams_bashforth.hxx new file mode 100644 index 0000000000..1686c67ceb --- /dev/null +++ b/src/solver/impls/adams_bashforth/adams_bashforth.hxx @@ -0,0 +1,375 @@ +/************************************************************************** + * Generic Adams Bashforth multistep scheme + * + * Always available, since doesn't depend on external library + * + ************************************************************************** + * Written by D Dickinson 2019 + * + * This file is part of BOUT++. + * + * BOUT++ is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * BOUT++ is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with BOUT++. If not, see . + * + **************************************************************************/ + +class AdamsBashforthSolver; + +#ifndef __ADAMSBASHFORTH_SOLVER_H__ +#define __ADAMSBASHFORTH_SOLVER_H__ + +#include "mpi.h" + +#include +#include +#include + +#include + +namespace { +RegisterSolver registersolveradamsbashforth("adams-bashforth"); +} + +struct AdamsBashforthHelper { + BoutReal lagrange_at_position_denominator(const std::deque& grid, + const int position, const int order) const { + AUTO_TRACE(); + ASSERT2(position < order); + ASSERT2(order <= grid.size()); + + const auto xj = grid[position]; + + BoutReal result = 1.0; + for (int i = 0; i < order; i++) { + result /= (i != position) ? (xj - grid[i]) : 1.0; + } + return result; + }; + + BoutReal lagrange_at_position_numerator(const BoutReal varX, + const std::deque& grid, + const int position, const int order) const { + AUTO_TRACE(); + ASSERT2(position < order); + ASSERT2(order <= grid.size()); + BoutReal result = 1.0; + for (int i = 0; i < order; i++) { + result *= (i != position) ? (varX - grid[i]) : 1.0; + } + return result; + + // // Above could be rewritten as following but not sure this is more readable and + // // the floating comparison that seems to be required is less nice. Possibly that + // // we could use std::iota(grid.size()) as the iterator args and then use that value + // // to index grid (which we'd have to capture) instead but again that seems more + // complex. + // const auto tmp = grid[position]; + // return std::accumulate(std::begin(grid), std::end(grid), 1.0, + // [varX, tmp](BoutReal current, BoutReal gridVal) { + // return current * ((gridVal != tmp)? (varX - gridVal) : + // 1.0); + // }); + }; + + // Integrate using newton-cotes 9 rule + BoutReal integrate_lagrange_curve_nc9(const BoutReal theStart, const BoutReal theEnd, + const std::deque& points, + const int position, const int order) const { + AUTO_TRACE(); + constexpr int size = 9; + constexpr BoutReal fac = 4.0 / 14175.0; + constexpr std::array facs{989.0 * fac, 5888.0 * fac, -928.0 * fac, + 10496.0 * fac, -4540.0 * fac, 10496.0 * fac, + -928.0 * fac, 5888.0 * fac, 989.0 * fac}; + constexpr BoutReal stepFac = 1.0 / (size - 1.0); + const BoutReal stepSize = (theEnd - theStart) * stepFac; + + BoutReal result{0.0}; + for (int i = 0; i < size; i++) { + result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, + position, order); + } + return stepSize * result * lagrange_at_position_denominator(points, position, order); + }; + + // Integrate using newton-cotes 8 rule + BoutReal integrate_lagrange_curve_nc8(const BoutReal theStart, const BoutReal theEnd, + const std::deque& points, + const int position, const int order) const { + AUTO_TRACE(); + constexpr int size = 8; + constexpr BoutReal fac = 7.0 / 17280.0; + constexpr std::array facs{751.0 * fac, 3577.0 * fac, 1323.0 * fac, + 2989.0 * fac, 2989.0 * fac, 1323.0 * fac, + 3577.0 * fac, 751.0 * fac}; + constexpr BoutReal stepFac = 1.0 / (size - 1.0); + const BoutReal stepSize = (theEnd - theStart) * stepFac; + + BoutReal result{0.0}; + for (int i = 0; i < size; i++) { + result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, + position, order); + } + return stepSize * result * lagrange_at_position_denominator(points, position, order); + }; + + // Integrate using newton-cotes 7 rule + BoutReal integrate_lagrange_curve_nc7(const BoutReal theStart, const BoutReal theEnd, + const std::deque& points, + const int position, const int order) const { + AUTO_TRACE(); + constexpr int size = 7; + constexpr BoutReal fac = 1.0 / 140.0; + constexpr std::array facs{41.0 * fac, 216.0 * fac, 27.0 * fac, + 272.0 * fac, 27.0 * fac, 216.0 * fac, + 41.0 * fac}; + constexpr BoutReal stepFac = 1.0 / (size - 1.0); + const BoutReal stepSize = (theEnd - theStart) * stepFac; + + BoutReal result{0.0}; + for (int i = 0; i < size; i++) { + result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, + position, order); + } + return stepSize * result * lagrange_at_position_denominator(points, position, order); + }; + + // Integrate using newton-cotes 6 rule + BoutReal integrate_lagrange_curve_nc6(const BoutReal theStart, const BoutReal theEnd, + const std::deque& points, + const int position, const int order) const { + AUTO_TRACE(); + constexpr int size = 6; + constexpr BoutReal fac = 5.0 / 288.0; + constexpr std::array facs{19.0 * fac, 75.0 * fac, 50.0 * fac, + 50.0 * fac, 75.0 * fac, 19.0 * fac}; + constexpr BoutReal stepFac = 1.0 / (size - 1.0); + const BoutReal stepSize = (theEnd - theStart) * stepFac; + + BoutReal result{0}; + for (int i = 0; i < size; i++) { + result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, + position, order); + } + return stepSize * result * lagrange_at_position_denominator(points, position, order); + }; + + // Integrate using newton-cotes 5 rule (Boole) + BoutReal integrate_lagrange_curve_nc5(const BoutReal theStart, const BoutReal theEnd, + const std::deque& points, + const int position, const int order) const { + AUTO_TRACE(); + constexpr int size = 5; + constexpr BoutReal fac = 2.0 / 45.0; + constexpr std::array facs{7.0 * fac, 32.0 * fac, 12.0 * fac, + 32.0 * fac, 7.0 * fac}; + constexpr BoutReal stepFac = 1.0 / (size - 1.0); + const BoutReal stepSize = (theEnd - theStart) * stepFac; + + BoutReal result{0.0}; + for (int i = 0; i < size; i++) { + result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, + position, order); + } + return stepSize * result * lagrange_at_position_denominator(points, position, order); + }; + + // Integrate using newton-cotes 4 rule (Simpson 3/8) + BoutReal integrate_lagrange_curve_nc4(const BoutReal theStart, const BoutReal theEnd, + const std::deque& points, + const int position, const int order) const { + AUTO_TRACE(); + constexpr int size = 4; + constexpr BoutReal fac = 3.0 / 8.0; + constexpr std::array facs{1.0 * fac, 3.0 * fac, 3.0 * fac, 1.0 * fac}; + constexpr BoutReal stepFac = 1.0 / (size - 1.0); + const BoutReal stepSize = (theEnd - theStart) * stepFac; + + BoutReal result{0.0}; + for (int i = 0; i < size; i++) { + result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, + position, order); + } + return stepSize * result * lagrange_at_position_denominator(points, position, order); + }; + + // Integrate using newton-cotes 3 rule (Simpson) + BoutReal integrate_lagrange_curve_nc3(const BoutReal theStart, const BoutReal theEnd, + const std::deque& points, + const int position, const int order) const { + AUTO_TRACE(); + constexpr int size = 3; + constexpr BoutReal fac = 1.0 / 3.0; + constexpr std::array facs{1.0 * fac, 4.0 * fac, 1.0 * fac}; + constexpr BoutReal stepFac = 1.0 / (size - 1.0); + const BoutReal stepSize = (theEnd - theStart) * stepFac; + + BoutReal result{0.0}; + for (int i = 0; i < size; i++) { + result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, + position, order); + } + return stepSize * result * lagrange_at_position_denominator(points, position, order); + }; + + // Integrate using newton-cotes 2 rule (Trap) + BoutReal integrate_lagrange_curve_nc2(const BoutReal theStart, const BoutReal theEnd, + const std::deque& points, + const int position, const int order) const { + AUTO_TRACE(); + constexpr int size = 2; + constexpr BoutReal fac = 1.0 / 2.0; + constexpr std::array facs{1.0 * fac, 1.0 * fac}; + constexpr BoutReal stepFac = 1.0 / (size - 1.0); + const BoutReal stepSize = (theEnd - theStart) * stepFac; + + BoutReal result{0.0}; + for (int i = 0; i < size; i++) { + result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, + position, order); + } + return stepSize * result * lagrange_at_position_denominator(points, position, order); + }; + + // Integrate lagrange polynomial to find the coefficienst of the requested order (note + // don't currently + // request an order just try to work it out from number of points). + BoutReal integrate_lagrange_curve(const BoutReal theStart, const BoutReal theEnd, + const std::deque& points, + const int position, const int order) const { + AUTO_TRACE(); + ASSERT2(order <= points.size()); + + switch (order) { + case 1: + return integrate_lagrange_curve_nc2(theStart, theEnd, points, position, order); + case 2: + return integrate_lagrange_curve_nc3(theStart, theEnd, points, position, order); + case 3: + return integrate_lagrange_curve_nc4(theStart, theEnd, points, position, order); + case 4: + return integrate_lagrange_curve_nc5(theStart, theEnd, points, position, order); + case 5: + return integrate_lagrange_curve_nc6(theStart, theEnd, points, position, order); + case 6: + return integrate_lagrange_curve_nc7(theStart, theEnd, points, position, order); + case 7: + return integrate_lagrange_curve_nc8(theStart, theEnd, points, position, order); + default: + return integrate_lagrange_curve_nc9(theStart, theEnd, points, position, order); + } + }; + + // Calculate the set of Adams-Bashforth coefficients required to get from t = points[0] + // to t = nextPoint + // at the requested order. + std::vector + get_adams_bashforth_coefficients(const BoutReal nextPoint, + const std::deque& points, + const int order) const { + AUTO_TRACE(); + ASSERT2(order <= points.size()); + + std::vector result; + + for (int i = 0; i < order; i++) { + result.emplace_back( + integrate_lagrange_curve(points[0], nextPoint, points, i, order)); + }; + + return result; + }; +}; + +class AdamsBashforthSolver : public Solver { +public: + AdamsBashforthSolver(Options* options = nullptr); + ~AdamsBashforthSolver() = default; + + void resetInternalFields() override; + + // Utilities only used by the CTU bracket approach + void setMaxTimestep(BoutReal dt) override; + BoutReal getCurrentTimestep() override { return timestep; } + + // Setup solver and scheme + int init(int nout, BoutReal tstep) override; + + // Actually evolve + int run() override; + +private: + // Take a single timestep of specified order. If adaptive also calculates + // and returns an error estimate. + BoutReal take_step(const BoutReal timeIn, const BoutReal dt, const int order, + Array& current, Array& result); + + // Finds the maximum absolute error, i.e. Max(Abs(stateApprox - stateAccurate)) + // over all processors. + BoutReal get_error(const Array& stateApprox, + const Array& stateAccurate) const { + AUTO_TRACE(); + BoutReal local_result = 0.0; + BoutReal err = 0.0; + + for (int i = 0; i < nlocal; i++) { + local_result = std::max(std::abs(stateAccurate[i] - stateApprox[i]), local_result); + + // The below is the more typical error calculation used in other solvers. + // We prefer the above definition as it provides a way to get a reasonable + // estimate of the limiting timestep. + // local_result = std::max(std::abs(stateAccurate[i] - + // stateApprox[i]) / (std::abs(stateAccurate[i]) + + // std::abs(stateApprox[i]) + atol), local_result); + } + + // Reduce over procs + if (MPI_Allreduce(&local_result, &err, 1, MPI_DOUBLE, MPI_MAX, BoutComm::get())) { + throw BoutException("MPI_Allreduce failed"); + } + return err; + }; + + // Coefficient calculator + AdamsBashforthHelper coefficients_calculator; + + // Holds the current/next state + Array state, nextState; + + // State history - we use deque's to make it easy to add/remove from + // either end. Whilst this looks like it might be expensive for + // states (rather than say using std::rotate with a std::vector to + // just move things around) we're relying on the Array store making + // it cheap to get a "new" array. + std::deque> history; // History of d state/dt values + std::deque times; // Times at which above states calculated + + // Inputs + BoutReal atol, rtol; // Tolerances for adaptive timestepping + BoutReal max_timestep; // Maximum timestep + int mxstep; // Maximum number of internal steps between outputs + bool adaptive; // Adapt timestep? + bool + followHighOrder; // If true and adaptive the solution used is the more accurate one. + BoutReal dtFac; // Factor we scale timestep estimate by when adapting. + int maximum_order; // The maximum order scheme to use. + BoutReal timestep; // The internal timestep + + // Internal vars + BoutReal out_timestep; // The output timestep + int current_order; // The current order of the scheme + int nsteps; // Number of output steps + int nlocal, neq; // Number of variables on local processor and in total +}; + +#endif // __ADAMSBASHFORTH_SOLVER_H__ diff --git a/src/solver/impls/adams_bashforth/makefile b/src/solver/impls/adams_bashforth/makefile new file mode 100644 index 0000000000..5349d55f48 --- /dev/null +++ b/src/solver/impls/adams_bashforth/makefile @@ -0,0 +1,8 @@ + +BOUT_TOP = ../../../.. + +SOURCEC = adams_bashforth.cxx +SOURCEH = $(SOURCEC:%.cxx=%.hxx) +TARGET = lib + +include $(BOUT_TOP)/make.config diff --git a/src/solver/impls/makefile b/src/solver/impls/makefile index f342840cc1..ef5fa158d1 100644 --- a/src/solver/impls/makefile +++ b/src/solver/impls/makefile @@ -5,8 +5,9 @@ DIRS = arkode \ pvode cvode ida \ petsc \ snes imex-bdf2 \ - power slepc \ + power slepc adams_bashforth \ karniadakis rk4 euler rk3-ssp rkgeneric split-rk + TARGET = lib include $(BOUT_TOP)/make.config diff --git a/src/solver/solverfactory.cxx b/src/solver/solverfactory.cxx index a9c4c6bccc..44d36a329d 100644 --- a/src/solver/solverfactory.cxx +++ b/src/solver/solverfactory.cxx @@ -1,5 +1,6 @@ #include "bout/solverfactory.hxx" +#include "impls/adams_bashforth/adams_bashforth.hxx" #include "impls/arkode/arkode.hxx" #include "impls/cvode/cvode.hxx" #include "impls/euler/euler.hxx" From 29316bd4c37cd3eb933eae0a7edb7b5e5d7f75b3 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Mon, 25 Nov 2019 19:14:19 +0000 Subject: [PATCH 376/428] Add adaptive_order to Adams-Bashforth solver Fairly crude approx that costs one extra rhs per successful internal step. For the test-drift-instability test case enabling this options leads to a factor two reduction in runtime for the longest running case (but this still remains an order of magnitude longer than with pvode). --- .../impls/adams_bashforth/adams_bashforth.cxx | 120 +++++++++++++----- .../impls/adams_bashforth/adams_bashforth.hxx | 8 ++ 2 files changed, 99 insertions(+), 29 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index b6c33f4d0e..1200c17a3a 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -64,6 +64,10 @@ int AdamsBashforthSolver::init(int nout, BoutReal tstep) { .withDefault(50000); adaptive = (*options)["adaptive"].doc("Adapt internal timestep using rtol.").withDefault(true); + adaptive_order = (*options)["adaptive_order"] + .doc("Adapt algorithm order using rtol.") + .withDefault(true); + maximum_order = (*options)["order"].doc("The requested maximum order of the scheme").withDefault(5); followHighOrder = @@ -118,6 +122,7 @@ int AdamsBashforthSolver::run() { bool running = true; int internal_steps = 0; + bool use_lower = false; // Take a single output time step while (running) { @@ -176,16 +181,68 @@ int AdamsBashforthSolver::run() { // that would just satisfy the tolerance. In cases where we // move to the next step we actually end up using this new // timestep for the next step. - const BoutReal dt_lim = dt * exp(-log(err / rtol) / current_order); + BoutReal dt_lim = dt * get_timestep_limit(err, rtol, current_order); + + if (err < rtol) { // Successful step + + // Now we can consider what result we would get at + // lower/higher order Our timestep limit gets smaller as + // the order increases for fixed error, hence we really + // want to use the lowest order that satisfies the + // tolerance. Or in other words we want to use the order + // that gives us the biggest timestep. For now we just see + // what the error is when using one order lower. + // + // For now we only do this when we've had a successful + // step, in general we might want to do this for failing + // steps as well, but as the error drops quicker with + // higher orders we might hope higher order is better when + // the error condition is not met. + if (adaptive_order and current_order > 2) { + Array lowerNextState(nlocal); + // Currently we just reuse the existing code to take a + // step but just do it with lower order + // coefficients. This means we have to do another rhs + // call. We might be able to get away with reusing the + // half point derivatives from the higher order method + // here instead, which would save the rhs call. This may + // mean we don't trust the error as much and hence have + // to scale the timestep more conservatively but this + // may be worth it. + const BoutReal lowerErr = + take_step(simtime, dt, current_order - 1, state, lowerNextState); + + const BoutReal lower_dt_lim = + dt * get_timestep_limit(lowerErr, rtol, current_order - 1); + + // Decide if we want to use the lower order method based + // on which gives us the biggest timestep. + use_lower = lower_dt_lim > dt_lim; + + // If we decide the lower order is better then swap/set + // the associated values to use the lower order result. + if (use_lower) { + dt_lim = lower_dt_lim; + swap(nextState, lowerNextState); + current_order = current_order - 1; + } + } - if (err < rtol) { // Try to limit increases in the timestep to no more than 5%. + // We could/should make these numbers runtime to give more + // control to the users, just wary of option overload. timestep = std::min(timestep * 1.05, dt_lim * 0.75); + + // For developers previous_fail = false; + break; + } else { // Be more conservative if we've failed; timestep = 0.75 * dt_lim; + + // For developers if (previous_fail) { nwasted_following_fail++; } @@ -198,17 +255,16 @@ int AdamsBashforthSolver::run() { // Taken an internal step, update times simtime += dt; - if (current_order == maximum_order) { - // Ditch last history point + // Ditch last history point if we have enough + if (times.size() == maximum_order) times.pop_back(); + if (history.size() == maximum_order) history.pop_back(); - } else { - // Here we unconditionally increase the order if we've yet to - // reach the maximum. In general it is probably better to - // consider an adaptive order scheme to try to maximise the - // timestep we can take. This is something to explore in the - // future. - current_order++; + + if (current_order < maximum_order) { + // Don't increase the order if we wanted to use the lower order. + if (not use_lower) + current_order++; } // Call the per internal timestep monitors @@ -285,10 +341,12 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d // clearer, compatible with OMP and empirically slightly faster. // std::transform(std::begin(current), std::end(current), std::begin(full_update), // std::begin(result), std::plus{}); - BOUT_OMP(parallel for); - for (int i = 0; i < nlocal; i++) { - result[i] = current[i] + full_update[i]; - }; + if (not(adaptive and followHighOrder)) { + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + result[i] = current[i] + full_update[i]; + }; + } if (adaptive) { @@ -340,9 +398,8 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d // Put intermediate result into variables, call rhs and save the derivatives load_vars(std::begin(result2)); - run_rhs(timeIn - + firstPart - * dt); // This is typically the most expensive part of this routine + // This is typically the most expensive part of this routine + run_rhs(timeIn + firstPart * dt); save_derivs(std::begin(history[0])); // Calculate the coefficients to get to timeIn + dt @@ -365,26 +422,31 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d history.pop_front(); times.pop_front(); - // Note here we don't add a small change onto result2, we recalculate using the - // "full" two half step half_update - // std::transform(std::begin(current), std::end(current), std::begin(half_update), - // std::begin(result2), std::plus{}); - BOUT_OMP(parallel for); - for (int i = 0; i < nlocal; i++) { - result2[i] = current[i] + half_update[i]; - }; - // Here we calculate the error by comparing the updates rather than output states // this is to avoid issues where we have large fields but small derivatives (i.e. to // avoid possible numerical issues at looking at the difference between two large // numbers). err = get_error(full_update, half_update); - // Swap the result to use the more accurate value if requested + // Note here we don't add a small change onto result, we recalculate using the + // "full" two half step half_update. Rather than using result2 we just replace + // result here as we want to use this smaller step result if (followHighOrder) { - swap(result, result2); + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + result[i] = current[i] + half_update[i]; + }; } } return err; } + +// Free function to return an estimate of the factor by which a +// timestep giving aerror = error should be scaled to give aerror = +// tolerance when using a scheme of order = order, where aerror = +// abs(soln_accurate - soln_approx) +BoutReal get_timestep_limit(const BoutReal error, const BoutReal tolerance, + const int order) { + return exp(-log(error / tolerance) / order); +}; diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.hxx b/src/solver/impls/adams_bashforth/adams_bashforth.hxx index 1686c67ceb..f341e83d74 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.hxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.hxx @@ -359,6 +359,7 @@ private: BoutReal max_timestep; // Maximum timestep int mxstep; // Maximum number of internal steps between outputs bool adaptive; // Adapt timestep? + bool adaptive_order; // Adapt order? bool followHighOrder; // If true and adaptive the solution used is the more accurate one. BoutReal dtFac; // Factor we scale timestep estimate by when adapting. @@ -372,4 +373,11 @@ private: int nlocal, neq; // Number of variables on local processor and in total }; +// Free function to return an estimate of the factor by which a +// timestep giving aerror = error should be scaled to give aerror = +// tolerance when using a scheme of order = order, where aerror = +// abs(soln_accurate - soln_approx) +BoutReal get_timestep_limit(const BoutReal error, const BoutReal tolerance, + const int order); + #endif // __ADAMSBASHFORTH_SOLVER_H__ From e041630d8d845d83a668b8578c2b5a5885e76e10 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Mon, 25 Nov 2019 21:07:53 +0000 Subject: [PATCH 377/428] Bug fix : Ensure non-adaptive code exits inner loop --- src/solver/impls/adams_bashforth/adams_bashforth.cxx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index 1200c17a3a..dbff3a4956 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -249,6 +249,8 @@ int AdamsBashforthSolver::run() { previous_fail = true; nwasted++; } + } else { + break; } } @@ -266,7 +268,7 @@ int AdamsBashforthSolver::run() { if (not use_lower) current_order++; } - + // Call the per internal timestep monitors call_timestep_monitors(simtime, dt); From fa6d719e317c3462792e7b286c8ec6fba4fbe722 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Tue, 26 Nov 2019 13:07:38 +0000 Subject: [PATCH 378/428] Bug fix: Reduce scope of use_lower and ensure it is reset for every internal step. --- .../impls/adams_bashforth/adams_bashforth.cxx | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index dbff3a4956..eaee529de1 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -122,7 +122,6 @@ int AdamsBashforthSolver::run() { bool running = true; int internal_steps = 0; - bool use_lower = false; // Take a single output time step while (running) { @@ -138,6 +137,9 @@ int AdamsBashforthSolver::run() { // attempt at a time step failed. bool previous_fail = false; + // Flag to indicate if we want to use a lower order method + bool use_lower = false; + BoutReal dt; // Take a single internal time step @@ -254,9 +256,6 @@ int AdamsBashforthSolver::run() { } } - // Taken an internal step, update times - simtime += dt; - // Ditch last history point if we have enough if (times.size() == maximum_order) times.pop_back(); @@ -269,11 +268,14 @@ int AdamsBashforthSolver::run() { current_order++; } - // Call the per internal timestep monitors - call_timestep_monitors(simtime, dt); + // Taken an internal step, update times + simtime += dt; // Put the new state into state. swap(state, nextState); + // Call the per internal timestep monitors + call_timestep_monitors(simtime, dt); + }; // Put result into variables From 813ddd5f6aceb4edeadd9a933e6a5fd92d71c3f3 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Tue, 26 Nov 2019 13:08:23 +0000 Subject: [PATCH 379/428] Bug fix: Ensure we put the state into the fields before calculating the derivatives --- src/solver/impls/adams_bashforth/adams_bashforth.cxx | 1 + 1 file changed, 1 insertion(+) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index eaee529de1..1c30d6318b 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -128,6 +128,7 @@ int AdamsBashforthSolver::run() { // Here's the derivative calculation at the current time // Find d state/dt and store in history -- this doesn't // need repeating whilst adapting timestep + load_vars(std::begin(state)); run_rhs(simtime); history.emplace_front(nlocal); save_derivs(std::begin(history[0])); From 6912951a9bbd4b5a69387add4c9beb8a0c55b790 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Tue, 26 Nov 2019 13:09:39 +0000 Subject: [PATCH 380/428] Allow order to adapt down to 1 (previously 2 was the minimum allowed) Also be a little more aggressive with our timestep changes --- src/solver/impls/adams_bashforth/adams_bashforth.cxx | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index 1c30d6318b..c4c334b986 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -201,7 +201,7 @@ int AdamsBashforthSolver::run() { // steps as well, but as the error drops quicker with // higher orders we might hope higher order is better when // the error condition is not met. - if (adaptive_order and current_order > 2) { + if (adaptive_order and current_order > 1) { Array lowerNextState(nlocal); // Currently we just reuse the existing code to take a // step but just do it with lower order @@ -231,10 +231,10 @@ int AdamsBashforthSolver::run() { } } - // Try to limit increases in the timestep to no more than 5%. + // Try to limit increases in the timestep to no more than 10%. // We could/should make these numbers runtime to give more // control to the users, just wary of option overload. - timestep = std::min(timestep * 1.05, dt_lim * 0.75); + timestep = std::min(timestep * 1.1, dt_lim); // For developers previous_fail = false; @@ -243,7 +243,7 @@ int AdamsBashforthSolver::run() { } else { // Be more conservative if we've failed; - timestep = 0.75 * dt_lim; + timestep = 0.9 * dt_lim; // For developers if (previous_fail) { @@ -268,7 +268,7 @@ int AdamsBashforthSolver::run() { if (not use_lower) current_order++; } - + // Taken an internal step, update times simtime += dt; @@ -276,7 +276,6 @@ int AdamsBashforthSolver::run() { swap(state, nextState); // Call the per internal timestep monitors call_timestep_monitors(simtime, dt); - }; // Put result into variables From ad9eff4dd3e2bcf4c2102d14dce7767cb049bb29 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Tue, 26 Nov 2019 13:13:13 +0000 Subject: [PATCH 381/428] Cheat a bit when doing adaptive order Now we don't actually calculate the rhs again at half time step point with the lower order. This saves one rhs call per successful internal timestep when adaptive_order is true. --- .../impls/adams_bashforth/adams_bashforth.cxx | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index c4c334b986..bb934fa0f0 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -212,6 +212,9 @@ int AdamsBashforthSolver::run() { // mean we don't trust the error as much and hence have // to scale the timestep more conservatively but this // may be worth it. + // + // Actually currently we do skip the second rhs call + // and instead try to reuse the existing data. const BoutReal lowerErr = take_step(simtime, dt, current_order - 1, state, lowerNextState); @@ -401,9 +404,18 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d times.emplace_front(timeIn + dt * firstPart); // Put intermediate result into variables, call rhs and save the derivatives - load_vars(std::begin(result2)); - // This is typically the most expensive part of this routine - run_rhs(timeIn + firstPart * dt); + // Try to cheat for now with this HACK. If the order /= + // current_order then call must be part of the adapative_order code + // so don't recalculate just reuse stored derivatives. + if (order == current_order) { + load_vars(std::begin(result2)); + // This is typically the most expensive part of this routine. + // + run_rhs(timeIn + firstPart * dt); + + // Restore fields to the original state + load_vars(std::begin(current)); + } save_derivs(std::begin(history[0])); // Calculate the coefficients to get to timeIn + dt @@ -419,9 +431,6 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d } } - // Restore fields to the original state - load_vars(std::begin(current)); - // Drop the temporary history information history.pop_front(); times.pop_front(); From 220392fd092dbe1429973028246ad5414d1d2bf8 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Tue, 26 Nov 2019 15:02:45 +0000 Subject: [PATCH 382/428] Minor optimisation to skip some work. --- .../impls/adams_bashforth/adams_bashforth.cxx | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index bb934fa0f0..48795eff6b 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -357,8 +357,7 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d if (adaptive) { - // Create some storage for the small step update and corresponding resulting state - Array result2(nlocal); + // Create some storage for the small step update. Array half_update(nlocal); // Use this variable to say how big the first small timestep should be as a fraction @@ -386,15 +385,6 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d } } - // Now we have to calculate the state after the first small step as we will need to - // use this to calculate the derivatives at this point. - // std::transform(std::begin(current), std::end(current), std::begin(half_update), - // std::begin(result2), std::plus{}); - BOUT_OMP(parallel for); - for (int i = 0; i < nlocal; i++) { - result2[i] = current[i] + half_update[i]; - }; - // ------------------------------------------- // Now do the second small timestep -- note we need to call rhs again // ------------------------------------------- @@ -408,6 +398,17 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d // current_order then call must be part of the adapative_order code // so don't recalculate just reuse stored derivatives. if (order == current_order) { + Array result2(nlocal); + + // Now we have to calculate the state after the first small step as we will need to + // use this to calculate the derivatives at this point. + // std::transform(std::begin(current), std::end(current), std::begin(half_update), + // std::begin(result2), std::plus{}); + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + result2[i] = current[i] + half_update[i]; + }; + load_vars(std::begin(result2)); // This is typically the most expensive part of this routine. // From 6ee88b560a08fa11c7dab6517fb525f34dd6e2c7 Mon Sep 17 00:00:00 2001 From: David Dickinson Date: Tue, 26 Nov 2019 15:33:23 +0000 Subject: [PATCH 383/428] Put state into variables at the end of each internal step rather than at the start. --- src/solver/impls/adams_bashforth/adams_bashforth.cxx | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index 48795eff6b..030221717b 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -128,7 +128,6 @@ int AdamsBashforthSolver::run() { // Here's the derivative calculation at the current time // Find d state/dt and store in history -- this doesn't // need repeating whilst adapting timestep - load_vars(std::begin(state)); run_rhs(simtime); history.emplace_front(nlocal); save_derivs(std::begin(history[0])); @@ -277,6 +276,10 @@ int AdamsBashforthSolver::run() { // Put the new state into state. swap(state, nextState); + + // Put the state into the fields + load_vars(std::begin(state)); + // Call the per internal timestep monitors call_timestep_monitors(simtime, dt); }; From 94fdef152457b38365ae4facba94764d5d072424 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 22 May 2020 16:45:53 +0100 Subject: [PATCH 384/428] Add Adams-Bashforth solver to CMake --- CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index e8af27b43b..29e8deee56 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -241,6 +241,8 @@ set(BOUT_SOURCES ./src/physics/snb.cxx ./src/physics/sourcex.cxx ./src/physics/snb.cxx + ./src/solver/impls/adams_bashforth/adams_bashforth.cxx + ./src/solver/impls/adams_bashforth/adams_bashforth.hxx ./src/solver/impls/arkode/arkode.cxx ./src/solver/impls/arkode/arkode.hxx ./src/solver/impls/cvode/cvode.cxx From cee0e220d615daa06f49c98f21094031bd4f40a5 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 28 May 2020 15:27:20 +0100 Subject: [PATCH 385/428] Move struct AdamsBashforthHelper to anonymous namespace --- .../impls/adams_bashforth/adams_bashforth.cxx | 193 ++++++++++++- .../impls/adams_bashforth/adams_bashforth.hxx | 254 ------------------ 2 files changed, 187 insertions(+), 260 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index 030221717b..453de87786 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -7,6 +7,188 @@ #include +namespace { +BoutReal lagrange_at_position_denominator(const std::deque& grid, + const int position, const int order) { + AUTO_TRACE(); + + const auto xj = grid[position]; + + BoutReal result = 1.0; + for (int i = 0; i < order; i++) { + result /= (i != position) ? (xj - grid[i]) : 1.0; + } + return result; +} + +BoutReal lagrange_at_position_numerator(const BoutReal varX, + const std::deque& grid, + const int position, const int order) { + AUTO_TRACE(); + BoutReal result = 1.0; + for (int i = 0; i < order; i++) { + result *= (i != position) ? (varX - grid[i]) : 1.0; + } + return result; +} + +template +BoutReal lagrange_interpolate(BoutReal start, BoutReal end, + const std::deque& points, const int position, + const std::array& facs) { + const BoutReal stepSize = (end - start) / (N - 1.0); + + BoutReal result{0.0}; + for (std::size_t i = 0; i < N; i++) { + result += + facs[i] + * lagrange_at_position_numerator(start + i * stepSize, points, position, N - 1); + } + return stepSize * result * lagrange_at_position_denominator(points, position, N - 1); +} + +// Integrate using newton-cotes 9 rule +BoutReal integrate_lagrange_curve_nc9(const BoutReal start, const BoutReal end, + const std::deque& points, + const int position) { + AUTO_TRACE(); + constexpr std::size_t size = 9; + constexpr BoutReal fac = 4.0 / 14175.0; + constexpr std::array facs{989.0 * fac, 5888.0 * fac, -928.0 * fac, + 10496.0 * fac, -4540.0 * fac, 10496.0 * fac, + -928.0 * fac, 5888.0 * fac, 989.0 * fac}; + return lagrange_interpolate(start, end, points, position, facs); +} + +// Integrate using newton-cotes 8 rule +BoutReal integrate_lagrange_curve_nc8(const BoutReal start, const BoutReal end, + const std::deque& points, + const int position) { + AUTO_TRACE(); + constexpr std::size_t size = 8; + constexpr BoutReal fac = 7.0 / 17280.0; + constexpr std::array facs{751.0 * fac, 3577.0 * fac, 1323.0 * fac, + 2989.0 * fac, 2989.0 * fac, 1323.0 * fac, + 3577.0 * fac, 751.0 * fac}; + return lagrange_interpolate(start, end, points, position, facs); +} + +// Integrate using newton-cotes 7 rule +BoutReal integrate_lagrange_curve_nc7(const BoutReal start, const BoutReal end, + const std::deque& points, + const int position) { + AUTO_TRACE(); + constexpr std::size_t size = 7; + constexpr BoutReal fac = 1.0 / 140.0; + constexpr std::array facs{41.0 * fac, 216.0 * fac, 27.0 * fac, + 272.0 * fac, 27.0 * fac, 216.0 * fac, + 41.0 * fac}; + return lagrange_interpolate(start, end, points, position, facs); +} + +// Integrate using newton-cotes 6 rule +BoutReal integrate_lagrange_curve_nc6(const BoutReal start, const BoutReal end, + const std::deque& points, + const int position) { + AUTO_TRACE(); + constexpr std::size_t size = 6; + constexpr BoutReal fac = 5.0 / 288.0; + constexpr std::array facs{19.0 * fac, 75.0 * fac, 50.0 * fac, + 50.0 * fac, 75.0 * fac, 19.0 * fac}; + return lagrange_interpolate(start, end, points, position, facs); +}; + +// Integrate using newton-cotes 5 rule (Boole) +BoutReal integrate_lagrange_curve_nc5(const BoutReal start, const BoutReal end, + const std::deque& points, + const int position) { + AUTO_TRACE(); + constexpr std::size_t size = 5; + constexpr BoutReal fac = 2.0 / 45.0; + constexpr std::array facs{7.0 * fac, 32.0 * fac, 12.0 * fac, 32.0 * fac, + 7.0 * fac}; + return lagrange_interpolate(start, end, points, position, facs); +} + +// Integrate using newton-cotes 4 rule (Simpson 3/8) +BoutReal integrate_lagrange_curve_nc4(const BoutReal start, const BoutReal end, + const std::deque& points, + const int position) { + AUTO_TRACE(); + constexpr std::size_t size = 4; + constexpr BoutReal fac = 3.0 / 8.0; + constexpr std::array facs{1.0 * fac, 3.0 * fac, 3.0 * fac, 1.0 * fac}; + return lagrange_interpolate(start, end, points, position, facs); +} + +// Integrate using newton-cotes 3 rule (Simpson) +BoutReal integrate_lagrange_curve_nc3(const BoutReal start, const BoutReal end, + const std::deque& points, + const int position) { + AUTO_TRACE(); + constexpr std::size_t size = 3; + constexpr BoutReal fac = 1.0 / 3.0; + constexpr std::array facs{1.0 * fac, 4.0 * fac, 1.0 * fac}; + return lagrange_interpolate(start, end, points, position, facs); +} + +// Integrate using newton-cotes 2 rule (Trap) +BoutReal integrate_lagrange_curve_nc2(const BoutReal start, const BoutReal end, + const std::deque& points, + const int position) { + AUTO_TRACE(); + constexpr std::size_t size = 2; + constexpr BoutReal fac = 1.0 / 2.0; + constexpr std::array facs{1.0 * fac, 1.0 * fac}; + return lagrange_interpolate(start, end, points, position, facs); +} + +// Integrate lagrange polynomial to find the coefficienst of the requested order +BoutReal integrate_lagrange_curve(const BoutReal start, const BoutReal end, + const std::deque& points, const int position, + const int order) { + AUTO_TRACE(); + + switch (order) { + case 1: + return integrate_lagrange_curve_nc2(start, end, points, position); + case 2: + return integrate_lagrange_curve_nc3(start, end, points, position); + case 3: + return integrate_lagrange_curve_nc4(start, end, points, position); + case 4: + return integrate_lagrange_curve_nc5(start, end, points, position); + case 5: + return integrate_lagrange_curve_nc6(start, end, points, position); + case 6: + return integrate_lagrange_curve_nc7(start, end, points, position); + case 7: + return integrate_lagrange_curve_nc8(start, end, points, position); + default: + return integrate_lagrange_curve_nc9(start, end, points, position); + } +} + +// Calculate the set of Adams-Bashforth coefficients required to get from t = points[0] +// to t = nextPoint +// at the requested order. +std::vector get_adams_bashforth_coefficients(const BoutReal nextPoint, + const std::deque& points, + const int order) { + AUTO_TRACE(); + ASSERT2(order <= points.size()); + + std::vector result; + result.reserve(order); + + for (int i = 0; i < order; i++) { + result.emplace_back(integrate_lagrange_curve(points[0], nextPoint, points, i, order)); + } + + return result; +} +} // namespace + AdamsBashforthSolver::AdamsBashforthSolver(Options* options) : Solver(options) { AUTO_TRACE(); canReset = true; @@ -321,8 +503,7 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d BoutReal err = 0.0; // Calculate the coefficients for a single step of size dt - const auto coefs = - coefficients_calculator.get_adams_bashforth_coefficients(timeIn + dt, times, order); + const auto coefs = get_adams_bashforth_coefficients(timeIn + dt, times, order); // Create some storage for the update to the state (i.e. state(timeIn + dt) = current + // full_update). @@ -373,8 +554,8 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d // ------------------------------------------- // Calculate the coefficients to get to timeIn + dt * firstPart - const auto coefsFirstStep = coefficients_calculator.get_adams_bashforth_coefficients( - timeIn + dt * firstPart, times, order); + const auto coefsFirstStep = + get_adams_bashforth_coefficients(timeIn + dt * firstPart, times, order); // Initialise the update array to 0. std::fill(std::begin(half_update), std::end(half_update), 0.0); @@ -423,8 +604,8 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d save_derivs(std::begin(history[0])); // Calculate the coefficients to get to timeIn + dt - const auto coefsSecondStep = coefficients_calculator.get_adams_bashforth_coefficients( - timeIn + dt, times, order); + const auto coefsSecondStep = + get_adams_bashforth_coefficients(timeIn + dt, times, order); for (int j = 0; j < order; j++) { const BoutReal factor = coefsSecondStep[j]; diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.hxx b/src/solver/impls/adams_bashforth/adams_bashforth.hxx index f341e83d74..128376aaa6 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.hxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.hxx @@ -40,257 +40,6 @@ namespace { RegisterSolver registersolveradamsbashforth("adams-bashforth"); } -struct AdamsBashforthHelper { - BoutReal lagrange_at_position_denominator(const std::deque& grid, - const int position, const int order) const { - AUTO_TRACE(); - ASSERT2(position < order); - ASSERT2(order <= grid.size()); - - const auto xj = grid[position]; - - BoutReal result = 1.0; - for (int i = 0; i < order; i++) { - result /= (i != position) ? (xj - grid[i]) : 1.0; - } - return result; - }; - - BoutReal lagrange_at_position_numerator(const BoutReal varX, - const std::deque& grid, - const int position, const int order) const { - AUTO_TRACE(); - ASSERT2(position < order); - ASSERT2(order <= grid.size()); - BoutReal result = 1.0; - for (int i = 0; i < order; i++) { - result *= (i != position) ? (varX - grid[i]) : 1.0; - } - return result; - - // // Above could be rewritten as following but not sure this is more readable and - // // the floating comparison that seems to be required is less nice. Possibly that - // // we could use std::iota(grid.size()) as the iterator args and then use that value - // // to index grid (which we'd have to capture) instead but again that seems more - // complex. - // const auto tmp = grid[position]; - // return std::accumulate(std::begin(grid), std::end(grid), 1.0, - // [varX, tmp](BoutReal current, BoutReal gridVal) { - // return current * ((gridVal != tmp)? (varX - gridVal) : - // 1.0); - // }); - }; - - // Integrate using newton-cotes 9 rule - BoutReal integrate_lagrange_curve_nc9(const BoutReal theStart, const BoutReal theEnd, - const std::deque& points, - const int position, const int order) const { - AUTO_TRACE(); - constexpr int size = 9; - constexpr BoutReal fac = 4.0 / 14175.0; - constexpr std::array facs{989.0 * fac, 5888.0 * fac, -928.0 * fac, - 10496.0 * fac, -4540.0 * fac, 10496.0 * fac, - -928.0 * fac, 5888.0 * fac, 989.0 * fac}; - constexpr BoutReal stepFac = 1.0 / (size - 1.0); - const BoutReal stepSize = (theEnd - theStart) * stepFac; - - BoutReal result{0.0}; - for (int i = 0; i < size; i++) { - result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, - position, order); - } - return stepSize * result * lagrange_at_position_denominator(points, position, order); - }; - - // Integrate using newton-cotes 8 rule - BoutReal integrate_lagrange_curve_nc8(const BoutReal theStart, const BoutReal theEnd, - const std::deque& points, - const int position, const int order) const { - AUTO_TRACE(); - constexpr int size = 8; - constexpr BoutReal fac = 7.0 / 17280.0; - constexpr std::array facs{751.0 * fac, 3577.0 * fac, 1323.0 * fac, - 2989.0 * fac, 2989.0 * fac, 1323.0 * fac, - 3577.0 * fac, 751.0 * fac}; - constexpr BoutReal stepFac = 1.0 / (size - 1.0); - const BoutReal stepSize = (theEnd - theStart) * stepFac; - - BoutReal result{0.0}; - for (int i = 0; i < size; i++) { - result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, - position, order); - } - return stepSize * result * lagrange_at_position_denominator(points, position, order); - }; - - // Integrate using newton-cotes 7 rule - BoutReal integrate_lagrange_curve_nc7(const BoutReal theStart, const BoutReal theEnd, - const std::deque& points, - const int position, const int order) const { - AUTO_TRACE(); - constexpr int size = 7; - constexpr BoutReal fac = 1.0 / 140.0; - constexpr std::array facs{41.0 * fac, 216.0 * fac, 27.0 * fac, - 272.0 * fac, 27.0 * fac, 216.0 * fac, - 41.0 * fac}; - constexpr BoutReal stepFac = 1.0 / (size - 1.0); - const BoutReal stepSize = (theEnd - theStart) * stepFac; - - BoutReal result{0.0}; - for (int i = 0; i < size; i++) { - result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, - position, order); - } - return stepSize * result * lagrange_at_position_denominator(points, position, order); - }; - - // Integrate using newton-cotes 6 rule - BoutReal integrate_lagrange_curve_nc6(const BoutReal theStart, const BoutReal theEnd, - const std::deque& points, - const int position, const int order) const { - AUTO_TRACE(); - constexpr int size = 6; - constexpr BoutReal fac = 5.0 / 288.0; - constexpr std::array facs{19.0 * fac, 75.0 * fac, 50.0 * fac, - 50.0 * fac, 75.0 * fac, 19.0 * fac}; - constexpr BoutReal stepFac = 1.0 / (size - 1.0); - const BoutReal stepSize = (theEnd - theStart) * stepFac; - - BoutReal result{0}; - for (int i = 0; i < size; i++) { - result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, - position, order); - } - return stepSize * result * lagrange_at_position_denominator(points, position, order); - }; - - // Integrate using newton-cotes 5 rule (Boole) - BoutReal integrate_lagrange_curve_nc5(const BoutReal theStart, const BoutReal theEnd, - const std::deque& points, - const int position, const int order) const { - AUTO_TRACE(); - constexpr int size = 5; - constexpr BoutReal fac = 2.0 / 45.0; - constexpr std::array facs{7.0 * fac, 32.0 * fac, 12.0 * fac, - 32.0 * fac, 7.0 * fac}; - constexpr BoutReal stepFac = 1.0 / (size - 1.0); - const BoutReal stepSize = (theEnd - theStart) * stepFac; - - BoutReal result{0.0}; - for (int i = 0; i < size; i++) { - result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, - position, order); - } - return stepSize * result * lagrange_at_position_denominator(points, position, order); - }; - - // Integrate using newton-cotes 4 rule (Simpson 3/8) - BoutReal integrate_lagrange_curve_nc4(const BoutReal theStart, const BoutReal theEnd, - const std::deque& points, - const int position, const int order) const { - AUTO_TRACE(); - constexpr int size = 4; - constexpr BoutReal fac = 3.0 / 8.0; - constexpr std::array facs{1.0 * fac, 3.0 * fac, 3.0 * fac, 1.0 * fac}; - constexpr BoutReal stepFac = 1.0 / (size - 1.0); - const BoutReal stepSize = (theEnd - theStart) * stepFac; - - BoutReal result{0.0}; - for (int i = 0; i < size; i++) { - result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, - position, order); - } - return stepSize * result * lagrange_at_position_denominator(points, position, order); - }; - - // Integrate using newton-cotes 3 rule (Simpson) - BoutReal integrate_lagrange_curve_nc3(const BoutReal theStart, const BoutReal theEnd, - const std::deque& points, - const int position, const int order) const { - AUTO_TRACE(); - constexpr int size = 3; - constexpr BoutReal fac = 1.0 / 3.0; - constexpr std::array facs{1.0 * fac, 4.0 * fac, 1.0 * fac}; - constexpr BoutReal stepFac = 1.0 / (size - 1.0); - const BoutReal stepSize = (theEnd - theStart) * stepFac; - - BoutReal result{0.0}; - for (int i = 0; i < size; i++) { - result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, - position, order); - } - return stepSize * result * lagrange_at_position_denominator(points, position, order); - }; - - // Integrate using newton-cotes 2 rule (Trap) - BoutReal integrate_lagrange_curve_nc2(const BoutReal theStart, const BoutReal theEnd, - const std::deque& points, - const int position, const int order) const { - AUTO_TRACE(); - constexpr int size = 2; - constexpr BoutReal fac = 1.0 / 2.0; - constexpr std::array facs{1.0 * fac, 1.0 * fac}; - constexpr BoutReal stepFac = 1.0 / (size - 1.0); - const BoutReal stepSize = (theEnd - theStart) * stepFac; - - BoutReal result{0.0}; - for (int i = 0; i < size; i++) { - result += facs[i] * lagrange_at_position_numerator(theStart + i * stepSize, points, - position, order); - } - return stepSize * result * lagrange_at_position_denominator(points, position, order); - }; - - // Integrate lagrange polynomial to find the coefficienst of the requested order (note - // don't currently - // request an order just try to work it out from number of points). - BoutReal integrate_lagrange_curve(const BoutReal theStart, const BoutReal theEnd, - const std::deque& points, - const int position, const int order) const { - AUTO_TRACE(); - ASSERT2(order <= points.size()); - - switch (order) { - case 1: - return integrate_lagrange_curve_nc2(theStart, theEnd, points, position, order); - case 2: - return integrate_lagrange_curve_nc3(theStart, theEnd, points, position, order); - case 3: - return integrate_lagrange_curve_nc4(theStart, theEnd, points, position, order); - case 4: - return integrate_lagrange_curve_nc5(theStart, theEnd, points, position, order); - case 5: - return integrate_lagrange_curve_nc6(theStart, theEnd, points, position, order); - case 6: - return integrate_lagrange_curve_nc7(theStart, theEnd, points, position, order); - case 7: - return integrate_lagrange_curve_nc8(theStart, theEnd, points, position, order); - default: - return integrate_lagrange_curve_nc9(theStart, theEnd, points, position, order); - } - }; - - // Calculate the set of Adams-Bashforth coefficients required to get from t = points[0] - // to t = nextPoint - // at the requested order. - std::vector - get_adams_bashforth_coefficients(const BoutReal nextPoint, - const std::deque& points, - const int order) const { - AUTO_TRACE(); - ASSERT2(order <= points.size()); - - std::vector result; - - for (int i = 0; i < order; i++) { - result.emplace_back( - integrate_lagrange_curve(points[0], nextPoint, points, i, order)); - }; - - return result; - }; -}; - class AdamsBashforthSolver : public Solver { public: AdamsBashforthSolver(Options* options = nullptr); @@ -340,9 +89,6 @@ private: return err; }; - // Coefficient calculator - AdamsBashforthHelper coefficients_calculator; - // Holds the current/next state Array state, nextState; From 3a5c7f0e10da6f8c3029b75a9270bd06e6b0679e Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 28 May 2020 15:59:00 +0100 Subject: [PATCH 386/428] Pull out helper function for AB integration --- .../impls/adams_bashforth/adams_bashforth.cxx | 92 ++++++++----------- 1 file changed, 37 insertions(+), 55 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index 453de87786..8e65acb1e9 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -187,6 +187,37 @@ std::vector get_adams_bashforth_coefficients(const BoutReal nextPoint, return result; } + +// In-place Adams-Bashforth integration +void AB_integrate_update(Array& update, BoutReal timestep, + const std::deque& times, + const std::deque>& history, int order) { + + const auto AB_coefficients = get_adams_bashforth_coefficients(timestep, times, order); + + for (std::size_t j = 0; j < static_cast(order); ++j) { + const BoutReal factor = AB_coefficients[j]; + BOUT_OMP(parallel for); + for (std::size_t i = 0; i < static_cast(update.size()); ++i) { + update[i] += history[j][i] * factor; + } + } +} + +// Integrate \p history with Adams-Bashforth of order \p order +Array AB_integrate(int nlocal, BoutReal timestep, + const std::deque& times, + const std::deque>& history, int order) { + Array update(nlocal); + + // Zero-initialise to ensure we can operate on the contiguous + // history arrays in order + std::fill(std::begin(update), std::end(update), 0.0); + + AB_integrate_update(update, timestep, times, history, order); + return update; +} + } // namespace AdamsBashforthSolver::AdamsBashforthSolver(Options* options) : Solver(options) { @@ -502,27 +533,7 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d // The initial error is 0.0 BoutReal err = 0.0; - // Calculate the coefficients for a single step of size dt - const auto coefs = get_adams_bashforth_coefficients(timeIn + dt, times, order); - - // Create some storage for the update to the state (i.e. state(timeIn + dt) = current + - // full_update). - Array full_update(nlocal); - - // Note we split the work here into initialisation with std::fill - // and a separate double loop to calculate the update. This is - // to ensure we can operate on the contiguous arrays in history - // in order. - std::fill(std::begin(full_update), std::end(full_update), 0.0); - - for (int j = 0; j < order; j++) { - const BoutReal factor = coefs[j]; - - BOUT_OMP(parallel for); - for (int i = 0; i < nlocal; i++) { - full_update[i] += history[j][i] * factor; - } - } + Array full_update = AB_integrate(nlocal, timeIn + dt, times, history, order); // Calculate the new state given the history and current state. // Could possibly skip the following calculation if adaptive and following the high @@ -536,38 +547,19 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d BOUT_OMP(parallel for); for (int i = 0; i < nlocal; i++) { result[i] = current[i] + full_update[i]; - }; + } } if (adaptive) { - // Create some storage for the small step update. - Array half_update(nlocal); - // Use this variable to say how big the first small timestep should be as a fraction // of the large timestep, dt. Here fixed to 0.5 to take two equally sized half steps // but left here to enable developer experimentation. constexpr BoutReal firstPart = 0.5; - // ------------------------------------------- // Take a small time step - note we don't need to call the rhs again just yet - // ------------------------------------------- - - // Calculate the coefficients to get to timeIn + dt * firstPart - const auto coefsFirstStep = - get_adams_bashforth_coefficients(timeIn + dt * firstPart, times, order); - - // Initialise the update array to 0. - std::fill(std::begin(half_update), std::end(half_update), 0.0); - - for (int j = 0; j < order; j++) { - const BoutReal factor = coefsFirstStep[j]; - - BOUT_OMP(parallel for); - for (int i = 0; i < nlocal; i++) { - half_update[i] += history[j][i] * factor; - } - } + Array half_update = + AB_integrate(nlocal, timeIn + (dt * firstPart), times, history, order); // ------------------------------------------- // Now do the second small timestep -- note we need to call rhs again @@ -603,18 +595,8 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d } save_derivs(std::begin(history[0])); - // Calculate the coefficients to get to timeIn + dt - const auto coefsSecondStep = - get_adams_bashforth_coefficients(timeIn + dt, times, order); - - for (int j = 0; j < order; j++) { - const BoutReal factor = coefsSecondStep[j]; - - BOUT_OMP(parallel for); - for (int i = 0; i < nlocal; i++) { - half_update[i] += history[j][i] * factor; - } - } + // Finish the time step + AB_integrate_update(half_update, timeIn + dt, times, history, order); // Drop the temporary history information history.pop_front(); From be77914eaa4efd96e4ca3c87ebeae9db72851a14 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 28 May 2020 16:03:48 +0100 Subject: [PATCH 387/428] Move helper function to anonymous namespace --- .../impls/adams_bashforth/adams_bashforth.cxx | 17 +++++++++-------- .../impls/adams_bashforth/adams_bashforth.hxx | 7 ------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index 8e65acb1e9..b096f7fe86 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -218,6 +218,15 @@ Array AB_integrate(int nlocal, BoutReal timestep, return update; } +// Free function to return an estimate of the factor by which a +// timestep giving aerror = error should be scaled to give aerror = +// tolerance when using a scheme of order = order, where aerror = +// abs(soln_accurate - soln_approx) +BoutReal get_timestep_limit(const BoutReal error, const BoutReal tolerance, + const int order) { + return std::exp(-std::log(error / tolerance) / order); +}; + } // namespace AdamsBashforthSolver::AdamsBashforthSolver(Options* options) : Solver(options) { @@ -622,11 +631,3 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d return err; } -// Free function to return an estimate of the factor by which a -// timestep giving aerror = error should be scaled to give aerror = -// tolerance when using a scheme of order = order, where aerror = -// abs(soln_accurate - soln_approx) -BoutReal get_timestep_limit(const BoutReal error, const BoutReal tolerance, - const int order) { - return exp(-log(error / tolerance) / order); -}; diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.hxx b/src/solver/impls/adams_bashforth/adams_bashforth.hxx index 128376aaa6..10b5040993 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.hxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.hxx @@ -119,11 +119,4 @@ private: int nlocal, neq; // Number of variables on local processor and in total }; -// Free function to return an estimate of the factor by which a -// timestep giving aerror = error should be scaled to give aerror = -// tolerance when using a scheme of order = order, where aerror = -// abs(soln_accurate - soln_approx) -BoutReal get_timestep_limit(const BoutReal error, const BoutReal tolerance, - const int order); - #endif // __ADAMSBASHFORTH_SOLVER_H__ From b9ea4dc894d9a5cb9377dc0cf5da6a4ea6a74456 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 28 May 2020 16:26:19 +0100 Subject: [PATCH 388/428] Move another helper to anonymous namespace --- .../impls/adams_bashforth/adams_bashforth.cxx | 26 +++++++++++++++++ .../impls/adams_bashforth/adams_bashforth.hxx | 28 ------------------- 2 files changed, 26 insertions(+), 28 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index b096f7fe86..0531e5603d 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -227,6 +227,32 @@ BoutReal get_timestep_limit(const BoutReal error, const BoutReal tolerance, return std::exp(-std::log(error / tolerance) / order); }; +/// Finds the maximum absolute error, i.e. Max(Abs(stateApprox - stateAccurate)) +/// over all processors. +BoutReal get_error(const Array& stateApprox, + const Array& stateAccurate) { + AUTO_TRACE(); + BoutReal local_result = 0.0; + BoutReal err = 0.0; + + const auto nlocal = stateAccurate.size(); + for (int i = 0; i < nlocal; i++) { + local_result = std::max(std::abs(stateAccurate[i] - stateApprox[i]), local_result); + + // The below is the more typical error calculation used in other solvers. + // We prefer the above definition as it provides a way to get a reasonable + // estimate of the limiting timestep. + // local_result = std::max(std::abs(stateAccurate[i] - + // stateApprox[i]) / (std::abs(stateAccurate[i]) + + // std::abs(stateApprox[i]) + atol), local_result); + } + + // Reduce over procs + if (MPI_Allreduce(&local_result, &err, 1, MPI_DOUBLE, MPI_MAX, BoutComm::get()) != 0) { + throw BoutException("MPI_Allreduce failed"); + } + return err; +} } // namespace AdamsBashforthSolver::AdamsBashforthSolver(Options* options) : Solver(options) { diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.hxx b/src/solver/impls/adams_bashforth/adams_bashforth.hxx index 10b5040993..a0918a2e4e 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.hxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.hxx @@ -28,8 +28,6 @@ class AdamsBashforthSolver; #ifndef __ADAMSBASHFORTH_SOLVER_H__ #define __ADAMSBASHFORTH_SOLVER_H__ -#include "mpi.h" - #include #include #include @@ -63,32 +61,6 @@ private: BoutReal take_step(const BoutReal timeIn, const BoutReal dt, const int order, Array& current, Array& result); - // Finds the maximum absolute error, i.e. Max(Abs(stateApprox - stateAccurate)) - // over all processors. - BoutReal get_error(const Array& stateApprox, - const Array& stateAccurate) const { - AUTO_TRACE(); - BoutReal local_result = 0.0; - BoutReal err = 0.0; - - for (int i = 0; i < nlocal; i++) { - local_result = std::max(std::abs(stateAccurate[i] - stateApprox[i]), local_result); - - // The below is the more typical error calculation used in other solvers. - // We prefer the above definition as it provides a way to get a reasonable - // estimate of the limiting timestep. - // local_result = std::max(std::abs(stateAccurate[i] - - // stateApprox[i]) / (std::abs(stateAccurate[i]) + - // std::abs(stateApprox[i]) + atol), local_result); - } - - // Reduce over procs - if (MPI_Allreduce(&local_result, &err, 1, MPI_DOUBLE, MPI_MAX, BoutComm::get())) { - throw BoutException("MPI_Allreduce failed"); - } - return err; - }; - // Holds the current/next state Array state, nextState; From 5dfb45f8dc35933e3ce4de51bd682a64a6e77270 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 28 May 2020 16:27:44 +0100 Subject: [PATCH 389/428] Flip conditional to break loop early if non-adaptive Avoids else-after-break and reduces indentation --- .../impls/adams_bashforth/adams_bashforth.cxx | 161 +++++++++--------- 1 file changed, 80 insertions(+), 81 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index 0531e5603d..c058c0a04e 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -417,94 +417,93 @@ int AdamsBashforthSolver::run() { const BoutReal err = take_step(simtime, dt, current_order, state, nextState); // Calculate and check error if adaptive - if (adaptive) { - // Really the following should apply to both adaptive and non-adaptive - // approaches, but the non-adaptive can be determined without needing - // to do any solves so we check during init instead. - internal_steps++; - if (internal_steps > mxstep) - throw BoutException("ERROR: MXSTEP exceeded. timestep = %e, err=%e\n", - timestep, err); - - // Estimate the limiting timestep and update. This is - // really the estimate of the timestep for this current step - // that would just satisfy the tolerance. In cases where we - // move to the next step we actually end up using this new - // timestep for the next step. - BoutReal dt_lim = dt * get_timestep_limit(err, rtol, current_order); - - if (err < rtol) { // Successful step - - // Now we can consider what result we would get at - // lower/higher order Our timestep limit gets smaller as - // the order increases for fixed error, hence we really - // want to use the lowest order that satisfies the - // tolerance. Or in other words we want to use the order - // that gives us the biggest timestep. For now we just see - // what the error is when using one order lower. - // - // For now we only do this when we've had a successful - // step, in general we might want to do this for failing - // steps as well, but as the error drops quicker with - // higher orders we might hope higher order is better when - // the error condition is not met. - if (adaptive_order and current_order > 1) { - Array lowerNextState(nlocal); - // Currently we just reuse the existing code to take a - // step but just do it with lower order - // coefficients. This means we have to do another rhs - // call. We might be able to get away with reusing the - // half point derivatives from the higher order method - // here instead, which would save the rhs call. This may - // mean we don't trust the error as much and hence have - // to scale the timestep more conservatively but this - // may be worth it. - // - // Actually currently we do skip the second rhs call - // and instead try to reuse the existing data. - const BoutReal lowerErr = - take_step(simtime, dt, current_order - 1, state, lowerNextState); - - const BoutReal lower_dt_lim = - dt * get_timestep_limit(lowerErr, rtol, current_order - 1); - - // Decide if we want to use the lower order method based - // on which gives us the biggest timestep. - use_lower = lower_dt_lim > dt_lim; - - // If we decide the lower order is better then swap/set - // the associated values to use the lower order result. - if (use_lower) { - dt_lim = lower_dt_lim; - swap(nextState, lowerNextState); - current_order = current_order - 1; - } - } + if (not adaptive) { + break; + } - // Try to limit increases in the timestep to no more than 10%. - // We could/should make these numbers runtime to give more - // control to the users, just wary of option overload. - timestep = std::min(timestep * 1.1, dt_lim); + // Really the following should apply to both adaptive and non-adaptive + // approaches, but the non-adaptive can be determined without needing + // to do any solves so we check during init instead. + ++internal_steps; + if (internal_steps > mxstep) { + throw BoutException("ERROR: MXSTEP exceeded. timestep = %e, err=%e\n", + timestep, err); + } - // For developers - previous_fail = false; + // Estimate the limiting timestep and update. This is + // really the estimate of the timestep for this current step + // that would just satisfy the tolerance. In cases where we + // move to the next step we actually end up using this new + // timestep for the next step. + BoutReal dt_lim = dt * get_timestep_limit(err, rtol, current_order); + + if (err < rtol) { // Successful step + + // Now we can consider what result we would get at + // lower/higher order Our timestep limit gets smaller as + // the order increases for fixed error, hence we really + // want to use the lowest order that satisfies the + // tolerance. Or in other words we want to use the order + // that gives us the biggest timestep. For now we just see + // what the error is when using one order lower. + // + // For now we only do this when we've had a successful + // step, in general we might want to do this for failing + // steps as well, but as the error drops quicker with + // higher orders we might hope higher order is better when + // the error condition is not met. + if (adaptive_order and current_order > 1) { + Array lowerNextState(nlocal); + // Currently we just reuse the existing code to take a + // step but just do it with lower order + // coefficients. This means we have to do another rhs + // call. We might be able to get away with reusing the + // half point derivatives from the higher order method + // here instead, which would save the rhs call. This may + // mean we don't trust the error as much and hence have + // to scale the timestep more conservatively but this + // may be worth it. + // + // Actually currently we do skip the second rhs call + // and instead try to reuse the existing data. + const BoutReal lowerErr = + take_step(simtime, dt, current_order - 1, state, lowerNextState); + + const BoutReal lower_dt_lim = + dt * get_timestep_limit(lowerErr, rtol, current_order - 1); + + // Decide if we want to use the lower order method based + // on which gives us the biggest timestep. + use_lower = lower_dt_lim > dt_lim; + + // If we decide the lower order is better then swap/set + // the associated values to use the lower order result. + if (use_lower) { + dt_lim = lower_dt_lim; + swap(nextState, lowerNextState); + current_order = current_order - 1; + } + } - break; + // Try to limit increases in the timestep to no more than 10%. + // We could/should make these numbers runtime to give more + // control to the users, just wary of option overload. + timestep = std::min(timestep * 1.1, dt_lim); - } else { - // Be more conservative if we've failed; - timestep = 0.9 * dt_lim; + // For developers + previous_fail = false; - // For developers - if (previous_fail) { - nwasted_following_fail++; - } - previous_fail = true; - nwasted++; - } - } else { break; } + // Be more conservative if we've failed; + timestep = 0.9 * dt_lim; + + // For developers + if (previous_fail) { + nwasted_following_fail++; + } + previous_fail = true; + nwasted++; } // Ditch last history point if we have enough From c5384511c5b313b312fd375ddc09e2ed3324e135 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 28 May 2020 16:33:59 +0100 Subject: [PATCH 390/428] Fix a few minor issues - "implicit bools" - extra semicolons - sign comparison - braced conditionals --- .../impls/adams_bashforth/adams_bashforth.cxx | 54 ++++++++++--------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index c058c0a04e..1c7c83bba2 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -96,7 +96,7 @@ BoutReal integrate_lagrange_curve_nc6(const BoutReal start, const BoutReal end, constexpr std::array facs{19.0 * fac, 75.0 * fac, 50.0 * fac, 50.0 * fac, 75.0 * fac, 19.0 * fac}; return lagrange_interpolate(start, end, points, position, facs); -}; +} // Integrate using newton-cotes 5 rule (Boole) BoutReal integrate_lagrange_curve_nc5(const BoutReal start, const BoutReal end, @@ -176,7 +176,7 @@ std::vector get_adams_bashforth_coefficients(const BoutReal nextPoint, const std::deque& points, const int order) { AUTO_TRACE(); - ASSERT2(order <= points.size()); + ASSERT2(static_cast(order) <= points.size()); std::vector result; result.reserve(order); @@ -188,7 +188,7 @@ std::vector get_adams_bashforth_coefficients(const BoutReal nextPoint, return result; } -// In-place Adams-Bashforth integration +/// In-place Adams-Bashforth integration void AB_integrate_update(Array& update, BoutReal timestep, const std::deque& times, const std::deque>& history, int order) { @@ -204,7 +204,7 @@ void AB_integrate_update(Array& update, BoutReal timestep, } } -// Integrate \p history with Adams-Bashforth of order \p order +/// Integrate \p history with Adams-Bashforth of order \p order Array AB_integrate(int nlocal, BoutReal timestep, const std::deque& times, const std::deque>& history, int order) { @@ -218,14 +218,14 @@ Array AB_integrate(int nlocal, BoutReal timestep, return update; } -// Free function to return an estimate of the factor by which a -// timestep giving aerror = error should be scaled to give aerror = -// tolerance when using a scheme of order = order, where aerror = -// abs(soln_accurate - soln_approx) +/// Free function to return an estimate of the factor by which a +/// timestep giving aerror = error should be scaled to give aerror = +/// tolerance when using a scheme of order = order, where aerror = +/// abs(soln_accurate - soln_approx) BoutReal get_timestep_limit(const BoutReal error, const BoutReal tolerance, const int order) { return std::exp(-std::log(error / tolerance) / order); -}; +} /// Finds the maximum absolute error, i.e. Max(Abs(stateApprox - stateAccurate)) /// over all processors. @@ -262,21 +262,25 @@ AdamsBashforthSolver::AdamsBashforthSolver(Options* options) : Solver(options) { void AdamsBashforthSolver::setMaxTimestep(BoutReal dt) { AUTO_TRACE(); - if (dt > timestep) + if (dt > timestep) { return; // Already less than this + } - if (adaptive) // Should we throw if we're not adaptive as we've tried to set a timestep - // limit but couldn't? + // Should we throw if we're not adaptive as we've tried to set a + // timestep limit but couldn't? + if (adaptive) { timestep = dt; // Won't be used this time, but next + } } int AdamsBashforthSolver::init(int nout, BoutReal tstep) { TRACE("Initialising AdamsBashforth solver"); - /// Call the generic initialisation first - if (Solver::init(nout, tstep)) + // Call the generic initialisation first + if (Solver::init(nout, tstep) != 0) { return 1; + } output << "\n\tAdams-Bashforth (explicit) multistep solver\n"; @@ -289,7 +293,7 @@ int AdamsBashforthSolver::init(int nout, BoutReal tstep) { // Get total problem size int ntmp; - if (MPI_Allreduce(&nlocal, &ntmp, 1, MPI_INT, MPI_SUM, BoutComm::get())) { + if (MPI_Allreduce(&nlocal, &ntmp, 1, MPI_INT, MPI_SUM, BoutComm::get()) != 0) { throw BoutException("MPI_Allreduce failed!"); } neq = ntmp; @@ -507,15 +511,18 @@ int AdamsBashforthSolver::run() { } // Ditch last history point if we have enough - if (times.size() == maximum_order) + if (times.size() == static_cast(maximum_order)) { times.pop_back(); - if (history.size() == maximum_order) + } + if (history.size() == static_cast(maximum_order)) { history.pop_back(); + } if (current_order < maximum_order) { // Don't increase the order if we wanted to use the lower order. - if (not use_lower) + if (not use_lower) { current_order++; + } } // Taken an internal step, update times @@ -529,7 +536,7 @@ int AdamsBashforthSolver::run() { // Call the per internal timestep monitors call_timestep_monitors(simtime, dt); - }; + } // Put result into variables load_vars(std::begin(state)); @@ -544,8 +551,9 @@ int AdamsBashforthSolver::run() { iteration++; // Call the output step monitor function - if (call_monitors(simtime, s, nsteps)) + if (call_monitors(simtime, s, nsteps) != 0) { break; // Stop simulation + } } #if CHECK > 4 @@ -617,11 +625,10 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d BOUT_OMP(parallel for); for (int i = 0; i < nlocal; i++) { result2[i] = current[i] + half_update[i]; - }; + } load_vars(std::begin(result2)); // This is typically the most expensive part of this routine. - // run_rhs(timeIn + firstPart * dt); // Restore fields to the original state @@ -649,10 +656,9 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d BOUT_OMP(parallel for); for (int i = 0; i < nlocal; i++) { result[i] = current[i] + half_update[i]; - }; + } } } return err; } - From 1b36fee1ce3a56d8bcd736ec2b6611e22258a799 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Thu, 28 May 2020 16:36:12 +0100 Subject: [PATCH 391/428] Fix const of primitive types in function declaration --- src/solver/impls/adams_bashforth/adams_bashforth.hxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.hxx b/src/solver/impls/adams_bashforth/adams_bashforth.hxx index a0918a2e4e..755c155865 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.hxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.hxx @@ -58,7 +58,7 @@ public: private: // Take a single timestep of specified order. If adaptive also calculates // and returns an error estimate. - BoutReal take_step(const BoutReal timeIn, const BoutReal dt, const int order, + BoutReal take_step(BoutReal timeIn, BoutReal dt, int order, Array& current, Array& result); // Holds the current/next state From 10d5b540cc8648c965aeb6a26102b3e4c5b5db06 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 29 May 2020 09:58:43 +0100 Subject: [PATCH 392/428] Return early if not adaptive in take_step --- .../impls/adams_bashforth/adams_bashforth.cxx | 130 +++++++++--------- 1 file changed, 63 insertions(+), 67 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index 1c7c83bba2..b976099711 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -444,7 +444,7 @@ int AdamsBashforthSolver::run() { if (err < rtol) { // Successful step // Now we can consider what result we would get at - // lower/higher order Our timestep limit gets smaller as + // lower/higher order. Our timestep limit gets smaller as // the order increases for fixed error, hence we really // want to use the lowest order that satisfies the // tolerance. Or in other words we want to use the order @@ -572,9 +572,6 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d Array& result) { AUTO_TRACE(); - // The initial error is 0.0 - BoutReal err = 0.0; - Array full_update = AB_integrate(nlocal, timeIn + dt, times, history, order); // Calculate the new state given the history and current state. @@ -592,73 +589,72 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d } } - if (adaptive) { + if (not adaptive) { + return 0.0; + } - // Use this variable to say how big the first small timestep should be as a fraction - // of the large timestep, dt. Here fixed to 0.5 to take two equally sized half steps - // but left here to enable developer experimentation. - constexpr BoutReal firstPart = 0.5; - - // Take a small time step - note we don't need to call the rhs again just yet - Array half_update = - AB_integrate(nlocal, timeIn + (dt * firstPart), times, history, order); - - // ------------------------------------------- - // Now do the second small timestep -- note we need to call rhs again - // ------------------------------------------- - - // Add storage to history and the current time to times - history.emplace_front(nlocal); - times.emplace_front(timeIn + dt * firstPart); - - // Put intermediate result into variables, call rhs and save the derivatives - // Try to cheat for now with this HACK. If the order /= - // current_order then call must be part of the adapative_order code - // so don't recalculate just reuse stored derivatives. - if (order == current_order) { - Array result2(nlocal); - - // Now we have to calculate the state after the first small step as we will need to - // use this to calculate the derivatives at this point. - // std::transform(std::begin(current), std::end(current), std::begin(half_update), - // std::begin(result2), std::plus{}); - BOUT_OMP(parallel for); - for (int i = 0; i < nlocal; i++) { - result2[i] = current[i] + half_update[i]; - } + // Use this variable to say how big the first small timestep should be as a fraction + // of the large timestep, dt. Here fixed to 0.5 to take two equally sized half steps + // but left here to enable developer experimentation. + constexpr BoutReal firstPart = 0.5; + + // Take a small time step - note we don't need to call the rhs again just yet + Array half_update = + AB_integrate(nlocal, timeIn + (dt * firstPart), times, history, order); + + // ------------------------------------------- + // Now do the second small timestep -- note we need to call rhs again + // ------------------------------------------- + + // Add storage to history and the current time to times + history.emplace_front(nlocal); + times.emplace_front(timeIn + dt * firstPart); + + // Put intermediate result into variables, call rhs and save the derivatives + // Try to cheat for now with this HACK. If the order /= + // current_order then call must be part of the adapative_order code + // so don't recalculate just reuse stored derivatives. + if (order == current_order) { + Array result2(nlocal); + + // Now we have to calculate the state after the first small step as we will need to + // use this to calculate the derivatives at this point. + // std::transform(std::begin(current), std::end(current), std::begin(half_update), + // std::begin(result2), std::plus{}); + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + result2[i] = current[i] + half_update[i]; + } - load_vars(std::begin(result2)); - // This is typically the most expensive part of this routine. - run_rhs(timeIn + firstPart * dt); + load_vars(std::begin(result2)); + // This is typically the most expensive part of this routine. + run_rhs(timeIn + firstPart * dt); - // Restore fields to the original state - load_vars(std::begin(current)); - } - save_derivs(std::begin(history[0])); - - // Finish the time step - AB_integrate_update(half_update, timeIn + dt, times, history, order); - - // Drop the temporary history information - history.pop_front(); - times.pop_front(); - - // Here we calculate the error by comparing the updates rather than output states - // this is to avoid issues where we have large fields but small derivatives (i.e. to - // avoid possible numerical issues at looking at the difference between two large - // numbers). - err = get_error(full_update, half_update); - - // Note here we don't add a small change onto result, we recalculate using the - // "full" two half step half_update. Rather than using result2 we just replace - // result here as we want to use this smaller step result - if (followHighOrder) { - BOUT_OMP(parallel for); - for (int i = 0; i < nlocal; i++) { - result[i] = current[i] + half_update[i]; - } + // Restore fields to the original state + load_vars(std::begin(current)); + } + save_derivs(std::begin(history[0])); + + // Finish the time step + AB_integrate_update(half_update, timeIn + dt, times, history, order); + + // Drop the temporary history information + history.pop_front(); + times.pop_front(); + + // Note here we don't add a small change onto result, we recalculate using the + // "full" two half step half_update. Rather than using result2 we just replace + // result here as we want to use this smaller step result + if (followHighOrder) { + BOUT_OMP(parallel for); + for (int i = 0; i < nlocal; i++) { + result[i] = current[i] + half_update[i]; } } - return err; + // Here we calculate the error by comparing the updates rather than output states + // this is to avoid issues where we have large fields but small derivatives (i.e. to + // avoid possible numerical issues at looking at the difference between two large + // numbers). + return get_error(full_update, half_update); } From 6d5f3200a516872bef919bcdac03a29a774930e0 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 29 May 2020 15:27:15 +0100 Subject: [PATCH 393/428] Remove trailing commas from OpenMP pragmas --- src/solver/impls/adams_bashforth/adams_bashforth.cxx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/solver/impls/adams_bashforth/adams_bashforth.cxx b/src/solver/impls/adams_bashforth/adams_bashforth.cxx index b976099711..046f6d30dc 100644 --- a/src/solver/impls/adams_bashforth/adams_bashforth.cxx +++ b/src/solver/impls/adams_bashforth/adams_bashforth.cxx @@ -197,7 +197,7 @@ void AB_integrate_update(Array& update, BoutReal timestep, for (std::size_t j = 0; j < static_cast(order); ++j) { const BoutReal factor = AB_coefficients[j]; - BOUT_OMP(parallel for); + BOUT_OMP(parallel for) for (std::size_t i = 0; i < static_cast(update.size()); ++i) { update[i] += history[j][i] * factor; } @@ -583,7 +583,7 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d // std::transform(std::begin(current), std::end(current), std::begin(full_update), // std::begin(result), std::plus{}); if (not(adaptive and followHighOrder)) { - BOUT_OMP(parallel for); + BOUT_OMP(parallel for) for (int i = 0; i < nlocal; i++) { result[i] = current[i] + full_update[i]; } @@ -621,7 +621,7 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d // use this to calculate the derivatives at this point. // std::transform(std::begin(current), std::end(current), std::begin(half_update), // std::begin(result2), std::plus{}); - BOUT_OMP(parallel for); + BOUT_OMP(parallel for) for (int i = 0; i < nlocal; i++) { result2[i] = current[i] + half_update[i]; } @@ -646,7 +646,7 @@ BoutReal AdamsBashforthSolver::take_step(const BoutReal timeIn, const BoutReal d // "full" two half step half_update. Rather than using result2 we just replace // result here as we want to use this smaller step result if (followHighOrder) { - BOUT_OMP(parallel for); + BOUT_OMP(parallel for) for (int i = 0; i < nlocal; i++) { result[i] = current[i] + half_update[i]; } From 314e648618746c0f076c8e3cf2756e7b5f47293b Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 13 Jan 2020 15:10:28 +0000 Subject: [PATCH 394/428] Add Mesh::getRegion for use in generic code Need to add the type aliases to Region so that the gmock matchers work (`EXPECT_THAT(region, ElementsAreArray(expected))`) --- include/bout/mesh.hxx | 16 ++++++++++++++++ include/bout/region.hxx | 8 ++++++++ tests/unit/mesh/test_mesh.cxx | 18 ++++++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/include/bout/mesh.hxx b/include/bout/mesh.hxx index e9d387c454..27640d8cd1 100644 --- a/include/bout/mesh.hxx +++ b/include/bout/mesh.hxx @@ -849,6 +849,9 @@ class Mesh { /// Get the named region from the region_map for the data iterator /// /// Throws if region_name not found + template + const Region& getRegion(const std::string ®ion_name) const; + const Region<> &getRegion(const std::string ®ion_name) const{ return getRegion3D(region_name); } @@ -943,4 +946,17 @@ private: Array indexLookup3Dto2D; }; +template <> +inline const Region& Mesh::getRegion(const std::string& region_name) const { + return getRegion3D(region_name); +} +template <> +inline const Region& Mesh::getRegion(const std::string& region_name) const { + return getRegion2D(region_name); +} +template <> +inline const Region& Mesh::getRegion(const std::string& region_name) const { + return getRegionPerp(region_name); +} + #endif // __MESH_H__ diff --git a/include/bout/region.hxx b/include/bout/region.hxx index ec88541dad..5bb4c55ba9 100644 --- a/include/bout/region.hxx +++ b/include/bout/region.hxx @@ -482,6 +482,14 @@ public: /// Collection of contiguous regions using ContiguousBlocks = std::vector; + // Type aliases for STL-container compatibility + using value_type = T; + using reference = value_type&; + using const_reference = const value_type&; + using size_type = typename RegionIndices::size_type; + using iterator = typename RegionIndices::iterator; + using const_iterator = typename RegionIndices::const_iterator; + // NOTE:: // Probably want to require a mesh in constructor, both to know nx/ny/nz // but also to ensure consistency etc. diff --git a/tests/unit/mesh/test_mesh.cxx b/tests/unit/mesh/test_mesh.cxx index 7a6d1bd236..23b5940208 100644 --- a/tests/unit/mesh/test_mesh.cxx +++ b/tests/unit/mesh/test_mesh.cxx @@ -1,4 +1,5 @@ #include "gtest/gtest.h" +#include "gmock/gmock.h" #include "bout/mesh.hxx" #include "bout/region.hxx" @@ -54,6 +55,23 @@ TEST_F(MeshTest, GetRegionPerpFromMesh) { EXPECT_THROW(localmesh.getRegionPerp("SOME_MADE_UP_REGION_NAME"), BoutException); } +TEST_F(MeshTest, GetRegionTemplatedFromMesh) { + using namespace ::testing; + localmesh.createDefaultRegions(); + + const auto& region3d = localmesh.getRegion3D("RGN_ALL"); + const auto& regionT_3d = localmesh.getRegion("RGN_ALL"); + EXPECT_THAT(regionT_3d, ElementsAreArray(region3d)); + + const auto& region2d = localmesh.getRegion2D("RGN_ALL"); + const auto& regionT_2d = localmesh.getRegion("RGN_ALL"); + EXPECT_THAT(regionT_2d, ElementsAreArray(region2d)); + + const auto& regionPerp = localmesh.getRegionPerp("RGN_ALL"); + const auto& regionT_Perp = localmesh.getRegion("RGN_ALL"); + EXPECT_THAT(regionT_Perp, ElementsAreArray(regionPerp)); +} + TEST_F(MeshTest, AddRegionToMesh) { Region junk(0, 0, 0, 0, 0, 0, 1, 1); EXPECT_NO_THROW(localmesh.addRegion("RGN_JUNK", junk)); From 38b3e5f40085e10452aa162702713f4da4c5ef56 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 21 Sep 2020 13:21:45 +0100 Subject: [PATCH 395/428] Fix bug in DataFormat::writeFieldAttributes with FieldPerp Writing `yindex_global` was not the inverse of reading it --- src/fileio/dataformat.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fileio/dataformat.cxx b/src/fileio/dataformat.cxx index 0213b8c63a..db6b26b753 100644 --- a/src/fileio/dataformat.cxx +++ b/src/fileio/dataformat.cxx @@ -51,7 +51,7 @@ void DataFormat::writeFieldAttributes(const std::string& name, const FieldPerp& int yindex = f.getIndex(); if (yindex >= 0 and yindex < fieldmesh.LocalNy) { // write global y-index as attribute - setAttribute(name, "yindex_global", fieldmesh.getGlobalYIndex(f.getIndex())); + setAttribute(name, "yindex_global", fieldmesh.getGlobalYIndexNoBoundaries(f.getIndex())); } else { // y-index is not valid, set global y-index to -1 to indicate 'not-valid' setAttribute(name, "yindex_global", -1); From cc5a3f4c69bb79f52d1eda7a3cbeacea5f26e9e5 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 21 Sep 2020 13:37:16 +0100 Subject: [PATCH 396/428] Enable some HDF5-specific tests in default test set --- tests/integrated/test-io_hdf5/runtest | 3 ++- tests/integrated/test-restart-io_hdf5/runtest | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integrated/test-io_hdf5/runtest b/tests/integrated/test-io_hdf5/runtest index 21a9fdb1f0..0541372d11 100755 --- a/tests/integrated/test-io_hdf5/runtest +++ b/tests/integrated/test-io_hdf5/runtest @@ -4,7 +4,8 @@ # Run the test, compare results against the benchmark # -#Requires: hdf5 +# Requires: hdf5 +# Cores: 4 from boututils.run_wrapper import build_and_log, shell, launch_safe from boutdata.collect import collect diff --git a/tests/integrated/test-restart-io_hdf5/runtest b/tests/integrated/test-restart-io_hdf5/runtest index 18810a6f7f..d8970fd591 100755 --- a/tests/integrated/test-restart-io_hdf5/runtest +++ b/tests/integrated/test-restart-io_hdf5/runtest @@ -3,6 +3,7 @@ # Test file I/O by loading from restart files and writing to dump files # # requires: hdf5 +# cores: 4 from boutdata import restart from boutdata.collect import collect From d8ec5c7ed9fbc9bd3426c3ab49c59b32ee33deb6 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 21 Sep 2020 14:14:53 +0100 Subject: [PATCH 397/428] Some tidying up to test-restarting - Apply black formatting - Suppress warning from `rm` - Don't quit on first failure - Fix typo ("Field3D" -> "Field2D") - Use `numpy.allclose` --- tests/integrated/test-restarting/runtest | 51 ++++++++++++++---------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/tests/integrated/test-restarting/runtest b/tests/integrated/test-restarting/runtest index fdc75b2b96..4fd1577680 100755 --- a/tests/integrated/test-restarting/runtest +++ b/tests/integrated/test-restarting/runtest @@ -12,61 +12,68 @@ build_and_log("restart test") s, out = launch_safe("./test_restarting nout=10", nproc=1, pipe=True) # Read reference data -f3d_0 = collect("f3d", path="data", info=False); -f2d_0 = collect("f2d", path="data", info=False); +f3d_0 = collect("f3d", path="data", info=False) +f2d_0 = collect("f2d", path="data", info=False) ########################################### # Run twice, restarting and appending print("-> Testing restart append") -shell("rm data/BOUT.dmp.0.nc") +shell("rm -f data/BOUT.dmp.0.nc") s, out = launch_safe("./test_restarting nout=5", nproc=1, pipe=True) s, out = launch_safe("./test_restarting nout=5 restart append", nproc=1, pipe=True) -f3d_1 = collect("f3d", path="data", info=False); -f2d_1 = collect("f2d", path="data", info=False); +f3d_1 = collect("f3d", path="data", info=False) +f2d_1 = collect("f2d", path="data", info=False) + +success = True +tolerance = 1e-10 if f3d_1.shape != f3d_0.shape: print("Fail: Field3D field has wrong shape") - exit(1) + success = False if f2d_1.shape != f2d_0.shape: print("Fail: Field2D field has wrong shape") - exit(1) + success = False -if np.max(np.abs(f3d_1 - f3d_0)) > 1e-10: +if not np.allclose(f3d_1, f3d_0, atol=tolerance): print("Fail: Field3D values differ") - exit(1) + success = False -if np.max(np.abs(f2d_1 - f2d_0)) > 1e-10: - print("Fail: Field3D values differ") - exit(1) +if not np.allclose(f2d_1, f2d_0, atol=tolerance): + print("Fail: Field2D values differ") + success = False ########################################### # Test restart print("-> Testing restart") -shell("rm data/BOUT.dmp.0.nc") +shell("rm -f data/BOUT.dmp.0.nc") s, out = launch_safe("./test_restarting nout=5", nproc=1, pipe=True) s, out = launch_safe("./test_restarting nout=5 restart", nproc=1, pipe=True) -f3d_1 = collect("f3d", path="data", info=False); -f2d_1 = collect("f2d", path="data", info=False); +f3d_1 = collect("f3d", path="data", info=False) +f2d_1 = collect("f2d", path="data", info=False) if f3d_1.shape[0] != 6: print("Fail: Field3D has wrong shape") - exit(1) + success = False if f2d_1.shape[0] != 6: print("Fail: Field2D has wrong shape") - exit(1) + success = False -if np.max(np.abs(f3d_1 - f3d_0[5:,:,:,:])) > 1e-10: - print("Fail: Field3D values differ") - exit(1) -if np.max(np.abs(f2d_1 - f2d_0[5:,:,:])) > 1e-10: +if not np.allclose(f3d_1, f3d_0[5:, :, :, :], atol=tolerance): print("Fail: Field3D values differ") + success = False +if not np.allclose(f2d_1, f2d_0[5:, :, :], atol=tolerance): + print("Fail: Field2D values differ") + success = False + +if not success: + print("=> Some tests failed") exit(1) -print("Success") +print("=> Success") exit(0) From e402f56bf3fe51e18117f1e502b0b8530e3422c3 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 21 Sep 2020 16:13:04 +0100 Subject: [PATCH 398/428] Use global mesh/dump in test-io_hdf5 --- tests/integrated/test-io_hdf5/test_io_hdf5.cxx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integrated/test-io_hdf5/test_io_hdf5.cxx b/tests/integrated/test-io_hdf5/test_io_hdf5.cxx index 53188846fd..3ce5f9f861 100644 --- a/tests/integrated/test-io_hdf5/test_io_hdf5.cxx +++ b/tests/integrated/test-io_hdf5/test_io_hdf5.cxx @@ -9,6 +9,9 @@ #include +using bout::globals::dump; +using bout::globals::mesh; + int main(int argc, char **argv) { // Initialise BOUT++, setting up mesh From 32eda6e36d23fd99a7a49c6d8e55a24f6feb8e86 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 21 Sep 2020 16:31:47 +0100 Subject: [PATCH 399/428] Require netCDF for some tests Some tests use grid files which puts a hard dependency on netCDF, but others could be adapted to use either netCDF or HDF5 if we had a nice way to determine which is available at test-time (and maybe run the test for both if needed) --- tests/integrated/test-cyclic/CMakeLists.txt | 1 + .../test-drift-instability-staggered/CMakeLists.txt | 1 + tests/integrated/test-drift-instability/CMakeLists.txt | 1 + tests/integrated/test-drift-instability/runtest | 3 ++- tests/integrated/test-fieldgroupComm/CMakeLists.txt | 1 + tests/integrated/test-fieldgroupComm/runtest | 6 ++++-- .../test-griddata-yboundary-guards/CMakeLists.txt | 1 + tests/integrated/test-gyro/CMakeLists.txt | 1 + .../integrated/test-interchange-instability/CMakeLists.txt | 1 + tests/integrated/test-laplace/CMakeLists.txt | 1 + tests/integrated/test-smooth/CMakeLists.txt | 1 + 11 files changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/integrated/test-cyclic/CMakeLists.txt b/tests/integrated/test-cyclic/CMakeLists.txt index 38c1825b45..2c0da15421 100644 --- a/tests/integrated/test-cyclic/CMakeLists.txt +++ b/tests/integrated/test-cyclic/CMakeLists.txt @@ -3,4 +3,5 @@ bout_add_integrated_test(test-cyclic USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES test_io.grd.nc + REQUIRES BOUT_HAS_NETCDF ) diff --git a/tests/integrated/test-drift-instability-staggered/CMakeLists.txt b/tests/integrated/test-drift-instability-staggered/CMakeLists.txt index 340a5429e2..4900f37c06 100644 --- a/tests/integrated/test-drift-instability-staggered/CMakeLists.txt +++ b/tests/integrated/test-drift-instability-staggered/CMakeLists.txt @@ -3,4 +3,5 @@ bout_add_integrated_test(test-drift-instability-staggered USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES runtest.py uedge.grd_std.cdl + REQUIRES BOUT_HAS_NETCDF ) diff --git a/tests/integrated/test-drift-instability/CMakeLists.txt b/tests/integrated/test-drift-instability/CMakeLists.txt index 31037226a6..a6a5c57a0d 100644 --- a/tests/integrated/test-drift-instability/CMakeLists.txt +++ b/tests/integrated/test-drift-instability/CMakeLists.txt @@ -3,4 +3,5 @@ bout_add_integrated_test(test-drift-instability USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES runtest.py uedge.grd_std.cdl + REQUIRES BOUT_HAS_NETCDF ) diff --git a/tests/integrated/test-drift-instability/runtest b/tests/integrated/test-drift-instability/runtest index 80f2b98b4e..bcac5e3fe2 100755 --- a/tests/integrated/test-drift-instability/runtest +++ b/tests/integrated/test-drift-instability/runtest @@ -1,5 +1,6 @@ #!/bin/sh -#requires: all_tests +# Requires: all_tests +# Requires: netcdf ./runtest.py diff --git a/tests/integrated/test-fieldgroupComm/CMakeLists.txt b/tests/integrated/test-fieldgroupComm/CMakeLists.txt index 24b0924d30..d95436b01a 100644 --- a/tests/integrated/test-fieldgroupComm/CMakeLists.txt +++ b/tests/integrated/test-fieldgroupComm/CMakeLists.txt @@ -3,4 +3,5 @@ bout_add_integrated_test(test-fieldgroupComm USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES cyclone_68x32.nc + REQUIRES BOUT_HAS_NETCDF ) diff --git a/tests/integrated/test-fieldgroupComm/runtest b/tests/integrated/test-fieldgroupComm/runtest index 4a70f0c2d4..976bc3bc12 100755 --- a/tests/integrated/test-fieldgroupComm/runtest +++ b/tests/integrated/test-fieldgroupComm/runtest @@ -1,10 +1,12 @@ #!/usr/bin/env python3 -# +# # Run the test, compare results # -#requires: all_tests +# Requires: all_tests +# Requires: netcdf +# Cores: 4 # Variables to compare from __future__ import print_function diff --git a/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt b/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt index 47d6f5dea4..365db5bd5d 100644 --- a/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt +++ b/tests/integrated/test-griddata-yboundary-guards/CMakeLists.txt @@ -8,4 +8,5 @@ bout_add_integrated_test(test-griddata-yboundary-guards data-singlenull-0/BOUT.inp data-singlenull-1/BOUT.inp data-singlenull-2/BOUT.inp + REQUIRES BOUT_HAS_NETCDF ) diff --git a/tests/integrated/test-gyro/CMakeLists.txt b/tests/integrated/test-gyro/CMakeLists.txt index 94f74c5776..bfa45a773c 100644 --- a/tests/integrated/test-gyro/CMakeLists.txt +++ b/tests/integrated/test-gyro/CMakeLists.txt @@ -3,4 +3,5 @@ bout_add_integrated_test(test-gyro USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES cyclone_68x32.nc data/benchmark.0.nc + REQUIRES BOUT_HAS_NETCDF ) diff --git a/tests/integrated/test-interchange-instability/CMakeLists.txt b/tests/integrated/test-interchange-instability/CMakeLists.txt index 971a70805a..f2d4847a40 100644 --- a/tests/integrated/test-interchange-instability/CMakeLists.txt +++ b/tests/integrated/test-interchange-instability/CMakeLists.txt @@ -2,4 +2,5 @@ bout_add_integrated_test(test-interchange-instability SOURCES 2fluid.cxx USE_RUNTEST EXTRA_FILES slab.6b5.r1.cdl slab.6b5.r10.cdl data_1/BOUT.inp data_10/BOUT.inp + REQUIRES BOUT_HAS_NETCDF ) diff --git a/tests/integrated/test-laplace/CMakeLists.txt b/tests/integrated/test-laplace/CMakeLists.txt index 488b0b7314..ec1c065df0 100644 --- a/tests/integrated/test-laplace/CMakeLists.txt +++ b/tests/integrated/test-laplace/CMakeLists.txt @@ -3,4 +3,5 @@ bout_add_integrated_test(test-laplace EXTRA_FILES test_laplace.grd.nc data/benchmark.0.nc USE_RUNTEST USE_DATA_BOUT_INP + REQUIRES BOUT_HAS_NETCDF ) diff --git a/tests/integrated/test-smooth/CMakeLists.txt b/tests/integrated/test-smooth/CMakeLists.txt index b7063beb38..4d087108ca 100644 --- a/tests/integrated/test-smooth/CMakeLists.txt +++ b/tests/integrated/test-smooth/CMakeLists.txt @@ -3,4 +3,5 @@ bout_add_integrated_test(test-smooth USE_RUNTEST USE_DATA_BOUT_INP EXTRA_FILES test_smooth.nc data/benchmark.0.nc + REQUIRES BOUT_HAS_NETCDF ) From dd776bdc3067ea78ceb7d6b0d8d9cd9af2871d97 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 21 Sep 2020 16:39:16 +0100 Subject: [PATCH 400/428] Remove unnecessary hardcoding of dump_format in test-invpar --- tests/integrated/test-invpar/data/BOUT.inp | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/integrated/test-invpar/data/BOUT.inp b/tests/integrated/test-invpar/data/BOUT.inp index 72b5170394..93af8673a6 100644 --- a/tests/integrated/test-invpar/data/BOUT.inp +++ b/tests/integrated/test-invpar/data/BOUT.inp @@ -5,8 +5,6 @@ NOUT = 0 # No timesteps MZ = 5 # Z size -dump_format = "nc" # NetCDF format. Alternative is "pdb" - TwistShift = false Ballooning = false From 062500b159a615a60210705efb009fe5ac5ff985 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 22 Sep 2020 11:39:54 +0100 Subject: [PATCH 401/428] Add Mesh::getLocal{X,Y,Z}Index{NoBoundaries} methods These are inverses of the `getGlobal` versions, replacing `{X,Y}LOCAL`, and are necessary for consistent, unique global indices into the boundaries for double-null tokamak grids --- include/bout/mesh.hxx | 34 ++++++++++++++++++++++--- src/mesh/impls/bout/boutmesh.cxx | 43 ++++++++++++++++++++++---------- src/mesh/impls/bout/boutmesh.hxx | 9 ++++--- tests/unit/test_extras.hxx | 8 ++++-- 4 files changed, 72 insertions(+), 22 deletions(-) diff --git a/include/bout/mesh.hxx b/include/bout/mesh.hxx index e9d387c454..da3a91bf5f 100644 --- a/include/bout/mesh.hxx +++ b/include/bout/mesh.hxx @@ -492,14 +492,16 @@ class Mesh { /// Returns the global Y index given a local index /// The local index must include the boundary, the global index does not. [[deprecated("Use getGlobalYIndex or getGlobalYIndexNoBoundaries instead")]] - virtual int YGLOBAL(int yloc) const { return getGlobalYIndexNoBoundaries(yloc); } + int YGLOBAL(int yloc) const { return getGlobalYIndexNoBoundaries(yloc); } /// Returns the local X index given a global index /// If the global index includes the boundary cells, then so does the local. - virtual int XLOCAL(int xglo) const = 0; + [[deprecated("Use getLocalXIndex or getLocalXIndexNoBoundaries instead")]] + int XLOCAL(int xglo) const { return getLocalXIndex(xglo); }; /// Returns the local Y index given a global index - /// The global index does not include the boundary cells, the local does. - virtual int YLOCAL(int yglo) const = 0; + /// If the global index includes the boundary cells, then so does the local. + [[deprecated("Use getLocalYIndex or getLocalYIndexNoBoundaries instead")]] + int YLOCAL(int yglo) const { return getLocalYIndexNoBoundaries(yglo); }; /// Returns a global X index given a local index. /// Global index includes boundary cells, local index includes boundary or guard cells. @@ -509,6 +511,14 @@ class Mesh { /// Global index excludes boundary cells, local index includes boundary or guard cells. virtual int getGlobalXIndexNoBoundaries(int xlocal) const = 0; + /// Returns a local X index given a global index. + /// Global index includes boundary cells, local index includes boundary or guard cells. + virtual int getLocalXIndex(int xlocal) const = 0; + + /// Returns a local X index given a global index. + /// Global index excludes boundary cells, local index includes boundary or guard cells. + virtual int getLocalXIndexNoBoundaries(int xlocal) const = 0; + /// Returns a global Y index given a local index. /// Global index includes boundary cells, local index includes boundary or guard cells. virtual int getGlobalYIndex(int ylocal) const = 0; @@ -517,6 +527,14 @@ class Mesh { /// Global index excludes boundary cells, local index includes boundary or guard cells. virtual int getGlobalYIndexNoBoundaries(int ylocal) const = 0; + /// Returns a local Y index given a global index. + /// Global index includes boundary cells, local index includes boundary or guard cells. + virtual int getLocalYIndex(int ylocal) const = 0; + + /// Returns a local Y index given a global index. + /// Global index excludes boundary cells, local index includes boundary or guard cells. + virtual int getLocalYIndexNoBoundaries(int ylocal) const = 0; + /// Returns a global Z index given a local index. /// Global index includes boundary cells, local index includes boundary or guard cells. virtual int getGlobalZIndex(int zlocal) const = 0; @@ -525,6 +543,14 @@ class Mesh { /// Global index excludes boundary cells, local index includes boundary or guard cells. virtual int getGlobalZIndexNoBoundaries(int zlocal) const = 0; + /// Returns a local Z index given a global index. + /// Global index includes boundary cells, local index includes boundary or guard cells. + virtual int getLocalZIndex(int zlocal) const = 0; + + /// Returns a local Z index given a global index. + /// Global index excludes boundary cells, local index includes boundary or guard cells. + virtual int getLocalZIndexNoBoundaries(int zlocal) const = 0; + /// Size of the mesh on this processor including guard/boundary cells int LocalNx, LocalNy, LocalNz; diff --git a/src/mesh/impls/bout/boutmesh.cxx b/src/mesh/impls/bout/boutmesh.cxx index 7f7d87cee1..9e13c49180 100644 --- a/src/mesh/impls/bout/boutmesh.cxx +++ b/src/mesh/impls/bout/boutmesh.cxx @@ -1521,8 +1521,11 @@ int BoutMesh::getGlobalXIndexNoBoundaries(int xlocal) const { return xlocal + PE_XIND * MXSUB - MXG; } -/// Returns a local X index given a global index -int BoutMesh::XLOCAL(int xglo) const { return xglo - PE_XIND * MXSUB; } +int BoutMesh::getLocalXIndex(int xlocal) const { return xlocal - PE_XIND * MXSUB; } + +int BoutMesh::getLocalXIndexNoBoundaries(int xlocal) const { + return xlocal - PE_XIND * MXSUB + MXG; +} /// Returns the global Y index given a local index int BoutMesh::YGLOBAL(BoutReal yloc, BoutReal &yglo) const { @@ -1547,12 +1550,22 @@ int BoutMesh::getGlobalYIndexNoBoundaries(int ylocal) const { return ylocal + PE_YIND * MYSUB - MYG; } +int BoutMesh::getLocalYIndex(int yglobal) const { + int ylocal = yglobal - PE_YIND * MYSUB; + if (jyseps1_2 > jyseps2_1 and PE_YIND * MYSUB + 2 * MYG + 1 > ny_inner) { + // Double null, and we are past the upper target + ylocal -= 2 * MYG; + } + return ylocal; +} + +int BoutMesh::getLocalYIndexNoBoundaries(int yglobal) const { + return yglobal - PE_YIND * MYSUB + MYG; +} + /// Global Y index given local index and processor int BoutMesh::YGLOBAL(int yloc, int yproc) const { return yloc + yproc * MYSUB - MYG; } -/// Returns a local Y index given a global index -int BoutMesh::YLOCAL(int yglo) const { return yglo - PE_YIND * MYSUB + MYG; } - int BoutMesh::YLOCAL(int yglo, int yproc) const { return yglo - yproc * MYSUB + MYG; } /// Returns a global Z index given a local index. @@ -1564,6 +1577,10 @@ int BoutMesh::getGlobalZIndex(int zlocal) const { return zlocal; } /// Note: at the moment z-direction is always periodic, so has zero boundary cells int BoutMesh::getGlobalZIndexNoBoundaries(int zlocal) const { return zlocal; } +int BoutMesh::getLocalZIndex(int zglobal) const { return zglobal; } + +int BoutMesh::getLocalZIndexNoBoundaries(int zglobal) const { return zglobal; } + /// Return the Y processor number given a global Y index int BoutMesh::YPROC(int yind) { if ((yind < 0) || (yind > ny)) @@ -1658,8 +1675,8 @@ void BoutMesh::set_connection(int ypos1, int ypos2, int xge, int xlt, bool ts) { // Convert X coordinates into local indices - xge = XLOCAL(xge); - xlt = XLOCAL(xlt); + xge = getLocalXIndex(xge); + xlt = getLocalXIndex(xlt); if ((xge >= LocalNx) || (xlt <= 0)) { return; // Not in this x domain @@ -1750,8 +1767,8 @@ void BoutMesh::add_target(int ypos, int xge, int xlt) { ypedown, xge, xlt); // Convert X coordinates into local indices - xge = XLOCAL(xge); - xlt = XLOCAL(xlt); + xge = getLocalXIndex(xge); + xlt = getLocalXIndex(xlt); if ((xge >= LocalNx) || (xlt <= 0)) { return; // Not in this x domain } @@ -2496,14 +2513,14 @@ const Field3D BoutMesh::smoothSeparatrix(const Field3D &f) { Field3D result{emptyFrom(f)}; if ((ixseps_inner > 0) && (ixseps_inner < nx - 1)) { if (XPROC(ixseps_inner) == PE_XIND) { - int x = XLOCAL(ixseps_inner); + int x = getLocalXIndex(ixseps_inner); for (int y = 0; y < LocalNy; y++) for (int z = 0; z < LocalNz; z++) { result(x, y, z) = 0.5 * (f(x, y, z) + f(x - 1, y, z)); } } if (XPROC(ixseps_inner - 1) == PE_XIND) { - int x = XLOCAL(ixseps_inner - 1); + int x = getLocalXIndex(ixseps_inner - 1); for (int y = 0; y < LocalNy; y++) for (int z = 0; z < LocalNz; z++) { result(x, y, z) = 0.5 * (f(x, y, z) + f(x + 1, y, z)); @@ -2512,14 +2529,14 @@ const Field3D BoutMesh::smoothSeparatrix(const Field3D &f) { } if ((ixseps_outer > 0) && (ixseps_outer < nx - 1) && (ixseps_outer != ixseps_inner)) { if (XPROC(ixseps_outer) == PE_XIND) { - int x = XLOCAL(ixseps_outer); + int x = getLocalXIndex(ixseps_outer); for (int y = 0; y < LocalNy; y++) for (int z = 0; z < LocalNz; z++) { result(x, y, z) = 0.5 * (f(x, y, z) + f(x - 1, y, z)); } } if (XPROC(ixseps_outer - 1) == PE_XIND) { - int x = XLOCAL(ixseps_outer - 1); + int x = getLocalXIndex(ixseps_outer - 1); for (int y = 0; y < LocalNy; y++) for (int z = 0; z < LocalNz; z++) { result(x, y, z) = 0.5 * (f(x, y, z) + f(x + 1, y, z)); diff --git a/src/mesh/impls/bout/boutmesh.hxx b/src/mesh/impls/bout/boutmesh.hxx index c16d5d3f0f..82f82d6539 100644 --- a/src/mesh/impls/bout/boutmesh.hxx +++ b/src/mesh/impls/bout/boutmesh.hxx @@ -177,13 +177,16 @@ class BoutMesh : public Mesh { int getGlobalXIndex(int xlocal) const override; int getGlobalXIndexNoBoundaries(int xlocal) const override; + int getLocalXIndex(int xlocal) const override; + int getLocalXIndexNoBoundaries(int xlocal) const override; int getGlobalYIndex(int ylocal) const override; int getGlobalYIndexNoBoundaries(int ylocal) const override; + int getLocalYIndex(int ylocal) const override; + int getLocalYIndexNoBoundaries(int ylocal) const override; int getGlobalZIndex(int zlocal) const override; int getGlobalZIndexNoBoundaries(int zlocal) const override; - - int XLOCAL(int xglo) const override; - int YLOCAL(int yglo) const override; + int getLocalZIndex(int zlocal) const override; + int getLocalZIndexNoBoundaries(int zlocal) const override; private: std::string gridname; diff --git a/tests/unit/test_extras.hxx b/tests/unit/test_extras.hxx index 322cbd1f99..5bb451f5bd 100644 --- a/tests/unit/test_extras.hxx +++ b/tests/unit/test_extras.hxx @@ -291,8 +291,12 @@ public: int getGlobalYIndexNoBoundaries(int) const override { return 0; } int getGlobalZIndex(int) const override { return 0; } int getGlobalZIndexNoBoundaries(int) const override { return 0; } - int XLOCAL(int UNUSED(xglo)) const override { return 0; } - int YLOCAL(int UNUSED(yglo)) const override { return 0; } + int getLocalXIndex(int) const override { return 0; } + int getLocalXIndexNoBoundaries(int) const override { return 0; } + int getLocalYIndex(int) const override { return 0; } + int getLocalYIndexNoBoundaries(int) const override { return 0; } + int getLocalZIndex(int) const override { return 0; } + int getLocalZIndexNoBoundaries(int) const override { return 0; } void initDerivs(Options * opt){ StaggerGrids=true; From 6dd22c5ae931adda1be632a6b2311de30c5696a6 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 22 Sep 2020 11:42:06 +0100 Subject: [PATCH 402/428] Use more consistent global index when writing/reading `FieldPerp`s --- src/fileio/dataformat.cxx | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/fileio/dataformat.cxx b/src/fileio/dataformat.cxx index db6b26b753..4bfe8b8b1b 100644 --- a/src/fileio/dataformat.cxx +++ b/src/fileio/dataformat.cxx @@ -51,7 +51,7 @@ void DataFormat::writeFieldAttributes(const std::string& name, const FieldPerp& int yindex = f.getIndex(); if (yindex >= 0 and yindex < fieldmesh.LocalNy) { // write global y-index as attribute - setAttribute(name, "yindex_global", fieldmesh.getGlobalYIndexNoBoundaries(f.getIndex())); + setAttribute(name, "yindex_global", fieldmesh.getGlobalYIndex(f.getIndex())); } else { // y-index is not valid, set global y-index to -1 to indicate 'not-valid' setAttribute(name, "yindex_global", -1); @@ -82,9 +82,8 @@ void DataFormat::readFieldAttributes(const std::string& name, FieldPerp& f) { // Note: don't use DataFormat::mesh variable, because it may be null if the DataFormat // is part of a GridFromFile, which is created before the Mesh. if (getAttribute(name, "yindex_global", yindex_global)) { - // Apply correction because yindex_global includes boundaries - f.setIndex(f.getMesh()->YLOCAL(yindex_global) - f.getMesh()->ystart); + f.setIndex(f.getMesh()->getLocalYIndex(yindex_global)); } else { - f.setIndex(f.getMesh()->YLOCAL(0)); + f.setIndex(f.getMesh()->getLocalYIndex(0)); } } From 7ab1f484bbe44ab411de9d927620f438c22d99a9 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 22 Sep 2020 15:21:13 +0100 Subject: [PATCH 403/428] Use no boundary form of global index conversion in FieldPerp I/O --- src/fileio/dataformat.cxx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/fileio/dataformat.cxx b/src/fileio/dataformat.cxx index 4bfe8b8b1b..590f34fbca 100644 --- a/src/fileio/dataformat.cxx +++ b/src/fileio/dataformat.cxx @@ -51,7 +51,7 @@ void DataFormat::writeFieldAttributes(const std::string& name, const FieldPerp& int yindex = f.getIndex(); if (yindex >= 0 and yindex < fieldmesh.LocalNy) { // write global y-index as attribute - setAttribute(name, "yindex_global", fieldmesh.getGlobalYIndex(f.getIndex())); + setAttribute(name, "yindex_global", fieldmesh.getGlobalYIndexNoBoundaries(yindex)); } else { // y-index is not valid, set global y-index to -1 to indicate 'not-valid' setAttribute(name, "yindex_global", -1); @@ -82,8 +82,8 @@ void DataFormat::readFieldAttributes(const std::string& name, FieldPerp& f) { // Note: don't use DataFormat::mesh variable, because it may be null if the DataFormat // is part of a GridFromFile, which is created before the Mesh. if (getAttribute(name, "yindex_global", yindex_global)) { - f.setIndex(f.getMesh()->getLocalYIndex(yindex_global)); + f.setIndex(f.getMesh()->getLocalYIndexNoBoundaries(yindex_global)); } else { - f.setIndex(f.getMesh()->getLocalYIndex(0)); + f.setIndex(f.getMesh()->getLocalYIndexNoBoundaries(0)); } } From b568ba80370b9739166bee5c8965a6e57694ee39 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 23 Sep 2020 16:23:13 +0100 Subject: [PATCH 404/428] Rename *local -> *global argument in mesh index routines Co-authored-by: johnomotani --- include/bout/mesh.hxx | 12 ++++++------ src/mesh/impls/bout/boutmesh.cxx | 6 +++--- src/mesh/impls/bout/boutmesh.hxx | 12 ++++++------ 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/include/bout/mesh.hxx b/include/bout/mesh.hxx index da3a91bf5f..e280950a2e 100644 --- a/include/bout/mesh.hxx +++ b/include/bout/mesh.hxx @@ -513,11 +513,11 @@ class Mesh { /// Returns a local X index given a global index. /// Global index includes boundary cells, local index includes boundary or guard cells. - virtual int getLocalXIndex(int xlocal) const = 0; + virtual int getLocalXIndex(int xglobal) const = 0; /// Returns a local X index given a global index. /// Global index excludes boundary cells, local index includes boundary or guard cells. - virtual int getLocalXIndexNoBoundaries(int xlocal) const = 0; + virtual int getLocalXIndexNoBoundaries(int xglobal) const = 0; /// Returns a global Y index given a local index. /// Global index includes boundary cells, local index includes boundary or guard cells. @@ -529,11 +529,11 @@ class Mesh { /// Returns a local Y index given a global index. /// Global index includes boundary cells, local index includes boundary or guard cells. - virtual int getLocalYIndex(int ylocal) const = 0; + virtual int getLocalYIndex(int yglobal) const = 0; /// Returns a local Y index given a global index. /// Global index excludes boundary cells, local index includes boundary or guard cells. - virtual int getLocalYIndexNoBoundaries(int ylocal) const = 0; + virtual int getLocalYIndexNoBoundaries(int yglobal) const = 0; /// Returns a global Z index given a local index. /// Global index includes boundary cells, local index includes boundary or guard cells. @@ -545,11 +545,11 @@ class Mesh { /// Returns a local Z index given a global index. /// Global index includes boundary cells, local index includes boundary or guard cells. - virtual int getLocalZIndex(int zlocal) const = 0; + virtual int getLocalZIndex(int zglobal) const = 0; /// Returns a local Z index given a global index. /// Global index excludes boundary cells, local index includes boundary or guard cells. - virtual int getLocalZIndexNoBoundaries(int zlocal) const = 0; + virtual int getLocalZIndexNoBoundaries(int zglobal) const = 0; /// Size of the mesh on this processor including guard/boundary cells int LocalNx, LocalNy, LocalNz; diff --git a/src/mesh/impls/bout/boutmesh.cxx b/src/mesh/impls/bout/boutmesh.cxx index 9e13c49180..63fbd0180a 100644 --- a/src/mesh/impls/bout/boutmesh.cxx +++ b/src/mesh/impls/bout/boutmesh.cxx @@ -1521,10 +1521,10 @@ int BoutMesh::getGlobalXIndexNoBoundaries(int xlocal) const { return xlocal + PE_XIND * MXSUB - MXG; } -int BoutMesh::getLocalXIndex(int xlocal) const { return xlocal - PE_XIND * MXSUB; } +int BoutMesh::getLocalXIndex(int xglobal) const { return xglobal - PE_XIND * MXSUB; } -int BoutMesh::getLocalXIndexNoBoundaries(int xlocal) const { - return xlocal - PE_XIND * MXSUB + MXG; +int BoutMesh::getLocalXIndexNoBoundaries(int xglobal) const { + return xglobal - PE_XIND * MXSUB + MXG; } /// Returns the global Y index given a local index diff --git a/src/mesh/impls/bout/boutmesh.hxx b/src/mesh/impls/bout/boutmesh.hxx index 82f82d6539..d18ab9e2f9 100644 --- a/src/mesh/impls/bout/boutmesh.hxx +++ b/src/mesh/impls/bout/boutmesh.hxx @@ -177,16 +177,16 @@ class BoutMesh : public Mesh { int getGlobalXIndex(int xlocal) const override; int getGlobalXIndexNoBoundaries(int xlocal) const override; - int getLocalXIndex(int xlocal) const override; - int getLocalXIndexNoBoundaries(int xlocal) const override; + int getLocalXIndex(int xglobal) const override; + int getLocalXIndexNoBoundaries(int xglobal) const override; int getGlobalYIndex(int ylocal) const override; int getGlobalYIndexNoBoundaries(int ylocal) const override; - int getLocalYIndex(int ylocal) const override; - int getLocalYIndexNoBoundaries(int ylocal) const override; + int getLocalYIndex(int yglobal) const override; + int getLocalYIndexNoBoundaries(int yglobal) const override; int getGlobalZIndex(int zlocal) const override; int getGlobalZIndexNoBoundaries(int zlocal) const override; - int getLocalZIndex(int zlocal) const override; - int getLocalZIndexNoBoundaries(int zlocal) const override; + int getLocalZIndex(int zglobal) const override; + int getLocalZIndexNoBoundaries(int zglobal) const override; private: std::string gridname; From 1eadf81f1ebc1f8f97ee9fd4b694c1b96175fb2f Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 23 Sep 2020 16:19:58 +0100 Subject: [PATCH 405/428] Better fix for reading/writing FieldPerp index Use boundary-aware form of global index conversion, but check it's an interior point when writing it. --- src/fileio/dataformat.cxx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/fileio/dataformat.cxx b/src/fileio/dataformat.cxx index 590f34fbca..dcf681d01d 100644 --- a/src/fileio/dataformat.cxx +++ b/src/fileio/dataformat.cxx @@ -49,9 +49,9 @@ void DataFormat::writeFieldAttributes(const std::string& name, const FieldPerp& auto& fieldmesh = *f.getMesh(); int yindex = f.getIndex(); - if (yindex >= 0 and yindex < fieldmesh.LocalNy) { + if (yindex >= fieldmesh.ystart and yindex <= fieldmesh.yend) { // write global y-index as attribute - setAttribute(name, "yindex_global", fieldmesh.getGlobalYIndexNoBoundaries(yindex)); + setAttribute(name, "yindex_global", fieldmesh.getGlobalYIndex(yindex)); } else { // y-index is not valid, set global y-index to -1 to indicate 'not-valid' setAttribute(name, "yindex_global", -1); @@ -82,8 +82,9 @@ void DataFormat::readFieldAttributes(const std::string& name, FieldPerp& f) { // Note: don't use DataFormat::mesh variable, because it may be null if the DataFormat // is part of a GridFromFile, which is created before the Mesh. if (getAttribute(name, "yindex_global", yindex_global)) { - f.setIndex(f.getMesh()->getLocalYIndexNoBoundaries(yindex_global)); + f.setIndex(f.getMesh()->getLocalYIndex(yindex_global)); } else { + // No boundary form here, so default value is on a grid cell f.setIndex(f.getMesh()->getLocalYIndexNoBoundaries(0)); } } From 2f6496171b286cf11e13d3d2b8f08534983c49b4 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 25 Sep 2020 10:50:26 +0100 Subject: [PATCH 406/428] Use last boundary point for global y-index in restart IO test --- tests/integrated/test-restart-io/runtest | 2 +- tests/integrated/test-restart-io_hdf5/runtest | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integrated/test-restart-io/runtest b/tests/integrated/test-restart-io/runtest index b01d8af6c6..8e2de37e18 100755 --- a/tests/integrated/test-restart-io/runtest +++ b/tests/integrated/test-restart-io/runtest @@ -30,7 +30,7 @@ testvars = {} testvars['f3d'] = BoutArray(numpy.exp(numpy.sin(x + y + z)), attributes = {'bout_type':'Field3D'}) testvars['f2d'] = BoutArray(numpy.exp(numpy.sin(x + y + 1.))[:, :, 0], attributes = {'bout_type':'Field2D'}) testvars['fperp_lower'] = BoutArray(numpy.exp(numpy.sin(x + z + 2.))[:, 0, :], attributes = {'bout_type':'FieldPerp', 'yindex_global':0}) -testvars['fperp_upper'] = BoutArray(numpy.exp(numpy.sin(x + z + 3.))[:, 0, :], attributes = {'bout_type':'FieldPerp', 'yindex_global':16}) +testvars['fperp_upper'] = BoutArray(numpy.exp(numpy.sin(x + z + 3.))[:, 0, :], attributes = {'bout_type':'FieldPerp', 'yindex_global':ny-1}) # make restart file restartdir = os.path.join('data', 'restart') diff --git a/tests/integrated/test-restart-io_hdf5/runtest b/tests/integrated/test-restart-io_hdf5/runtest index d8970fd591..c323492fce 100755 --- a/tests/integrated/test-restart-io_hdf5/runtest +++ b/tests/integrated/test-restart-io_hdf5/runtest @@ -30,7 +30,7 @@ testvars = {} testvars['f3d'] = BoutArray(numpy.exp(numpy.sin(x + y + z)), attributes = {'bout_type':'Field3D'}) testvars['f2d'] = BoutArray(numpy.exp(numpy.sin(x + y + 1.))[:, :, 0], attributes = {'bout_type':'Field2D'}) testvars['fperp_lower'] = BoutArray(numpy.exp(numpy.sin(x + z + 2.))[:, 0, :], attributes = {'bout_type':'FieldPerp', 'yindex_global':0}) -testvars['fperp_upper'] = BoutArray(numpy.exp(numpy.sin(x + z + 3.))[:, 0, :], attributes = {'bout_type':'FieldPerp', 'yindex_global':16}) +testvars['fperp_upper'] = BoutArray(numpy.exp(numpy.sin(x + z + 3.))[:, 0, :], attributes = {'bout_type':'FieldPerp', 'yindex_global':ny-1}) # make restart file restartdir = os.path.join('data', 'restart') From 6da048e96a7b14f98f7e5f7d9adadfd67bf5104c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 25 Sep 2020 10:51:42 +0100 Subject: [PATCH 407/428] Don't read/write FieldPerps in guard cells To be more precise, mark the global yindex for FieldPerps in guard cells as invalid when writing them, and set the (local) yindex to be invalid if its in a guard cell on this processor when reading them. It is still possible to write FieldPerps in guard cells, it's just that `collect` won't read them, and the user will need to use a lower-level API to do so. --- src/fileio/dataformat.cxx | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/src/fileio/dataformat.cxx b/src/fileio/dataformat.cxx index dcf681d01d..ce58f8cbfb 100644 --- a/src/fileio/dataformat.cxx +++ b/src/fileio/dataformat.cxx @@ -48,14 +48,18 @@ void DataFormat::writeFieldAttributes(const std::string& name, const FieldPerp& writeFieldAttributes(name, static_cast(f), shiftOutput); auto& fieldmesh = *f.getMesh(); - int yindex = f.getIndex(); - if (yindex >= fieldmesh.ystart and yindex <= fieldmesh.yend) { - // write global y-index as attribute - setAttribute(name, "yindex_global", fieldmesh.getGlobalYIndex(yindex)); - } else { - // y-index is not valid, set global y-index to -1 to indicate 'not-valid' - setAttribute(name, "yindex_global", -1); - } + const int yindex = f.getIndex(); + const int start = fieldmesh.hasBndryLowerY() ? 0 : fieldmesh.ystart; + const int end = fieldmesh.hasBndryUpperY() ? fieldmesh.LocalNy : fieldmesh.yend + 1; + + // Only use the global y index if it's either an interior (grid) + // point, or a boundary point. Otherwise, use -1 to indicate a guard + // cell or an invalid value. The actual FieldPerp value is still + // written to file + const int global_yindex = + (yindex >= start and yindex < end) ? fieldmesh.getGlobalYIndex(yindex) : -1; + + setAttribute(name, "yindex_global", global_yindex); } void DataFormat::readFieldAttributes(const std::string& name, Field& f) { @@ -82,9 +86,22 @@ void DataFormat::readFieldAttributes(const std::string& name, FieldPerp& f) { // Note: don't use DataFormat::mesh variable, because it may be null if the DataFormat // is part of a GridFromFile, which is created before the Mesh. if (getAttribute(name, "yindex_global", yindex_global)) { - f.setIndex(f.getMesh()->getLocalYIndex(yindex_global)); + + auto& fieldmesh = *f.getMesh(); + const int start = fieldmesh.hasBndryLowerY() ? 0 : fieldmesh.ystart; + const int end = fieldmesh.hasBndryUpperY() ? fieldmesh.LocalNy : fieldmesh.yend + 1; + + // Only use the global y index if it's either an interior (grid) + // point, or a boundary point. Otherwise, use -1 to indicate a + // guard cell or an invalid value. This may mean that `f` does not + // get allocated + const int yindex_local = fieldmesh.getLocalYIndex(yindex_global); + const int yindex = (yindex_local >= start and yindex_local < end) ? yindex_local : -1; + f.setIndex(yindex); } else { - // No boundary form here, so default value is on a grid cell + // "yindex_global" wasn't present, so this might be an older + // file. We use the no-boundary form here, such that we get a + // default value on a grid cell f.setIndex(f.getMesh()->getLocalYIndexNoBoundaries(0)); } } From 50f039f63e7cc6c3c60e60974db7f1ba2572344b Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 25 Sep 2020 10:55:27 +0100 Subject: [PATCH 408/428] Make sure test-restart-io sets exit code correctly on failure Also: - Don't plot anything on failure by default - Clean up quietly - More consistent pass/fail message --- tests/integrated/test-restart-io/runtest | 13 +++++++------ tests/integrated/test-restart-io_hdf5/runtest | 17 ++++++++++------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/tests/integrated/test-restart-io/runtest b/tests/integrated/test-restart-io/runtest index 8e2de37e18..bf1f9ac605 100755 --- a/tests/integrated/test-restart-io/runtest +++ b/tests/integrated/test-restart-io/runtest @@ -84,7 +84,7 @@ success = True # y-processor-indices, and collect() cannot handle this. for nproc in [1, 2, 4]: # delete any existing output - shell("rm data/BOUT.dmp.*.nc data/BOUT.restart.*.nc") + shell("rm -f data/BOUT.dmp.*.nc data/BOUT.restart.*.nc") # create restart files for the run restart.redistribute(nproc, path=restartdir, output='data') @@ -105,8 +105,10 @@ for nproc in [1, 2, 4]: if not numpy.all(testvar == result): success = False print(name+' is different') - from boututils.showdata import showdata - showdata([result, testvar]) + # Don't plot anything by default + if False: + from boututils.showdata import showdata + showdata([result, testvar]) if name == 'fperp_lower' or name == 'fperp_upper': yindex_result = result.attributes['yindex_global'] yindex_test = testvar.attributes['yindex_global'] @@ -145,10 +147,9 @@ for nproc in [1, 2, 4]: ) if success: - print('pass') - + print('=> All restart I/O tests passed') # clean up binary files - shell("rm data/BOUT.dmp.*.nc data/BOUT.restart.*.nc data/restart/BOUT.restart.0.nc") + shell("rm -f data/BOUT.dmp.*.nc data/BOUT.restart.*.nc data/restart/BOUT.restart.0.nc") exit(0) print("=> Some failed tests") diff --git a/tests/integrated/test-restart-io_hdf5/runtest b/tests/integrated/test-restart-io_hdf5/runtest index c323492fce..8d6f00b7a2 100755 --- a/tests/integrated/test-restart-io_hdf5/runtest +++ b/tests/integrated/test-restart-io_hdf5/runtest @@ -71,7 +71,7 @@ success = True # y-processor-indices, and collect() cannot handle this. for nproc in [1, 2, 4]: # delete any existing output - shell("rm data/BOUT.dmp.*.h5 data/BOUT.restart.*.h5") + shell("rm -f data/BOUT.dmp.*.h5 data/BOUT.restart.*.h5") # create restart files for the run restart.redistribute(nproc, path=restartdir, output='data') @@ -92,8 +92,10 @@ for nproc in [1, 2, 4]: if not numpy.all(testvar == result): success = False print(name+' is different') - from boututils.showdata import showdata - showdata([result, testvar]) + # Don't plot anything by default + if False: + from boututils.showdata import showdata + showdata([result, testvar]) if name == 'fperp_lower' or name == 'fperp_upper': yindex_result = result.attributes['yindex_global'] yindex_test = testvar.attributes['yindex_global'] @@ -116,9 +118,10 @@ for nproc in [1, 2, 4]: print('Fail: yindex_global of '+name+' evolving version is '+str(yindex_result)+' should be '+str(yindex_test)) if success: - print('pass') - + print('=> All restart I/O tests passed') # clean up binary files - shell("rm data/BOUT.dmp.*.h5 data/BOUT.restart.*.h5 data/restart/BOUT.restart.0.h5") + shell("rm -f data/BOUT.dmp.*.h5 data/BOUT.restart.*.h5 data/restart/BOUT.restart.0.h5") + exit(0) -exit(0) +print("=> Some failed tests") +exit(1) From 5689b51865a2eec45a1a245d9e632c6bf400511c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 4 Aug 2021 15:49:46 +0100 Subject: [PATCH 409/428] Bump boutdata/boututils for v4.4 --- externalpackages/boutdata | 2 +- externalpackages/boututils | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/externalpackages/boutdata b/externalpackages/boutdata index 211434161d..3a66e6cb16 160000 --- a/externalpackages/boutdata +++ b/externalpackages/boutdata @@ -1 +1 @@ -Subproject commit 211434161df05a85af4d152df44ed9a8225f170a +Subproject commit 3a66e6cb162afb357d96c37a95eb3adf5f77a717 diff --git a/externalpackages/boututils b/externalpackages/boututils index 08b572d20a..a79a00a54f 160000 --- a/externalpackages/boututils +++ b/externalpackages/boututils @@ -1 +1 @@ -Subproject commit 08b572d20a6c693b051f6504c599c539f5a68e82 +Subproject commit a79a00a54f69663117a93dd42ffa6004783432c8 From a60fdbab94205786d00b2438de551cdf46ad031f Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 11 Jun 2021 16:04:49 +0100 Subject: [PATCH 410/428] Don't run pytest on GHA: all tested modules are now separate repos --- .travis_script.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis_script.sh b/.travis_script.sh index 2958d6fb8b..acddb8720e 100755 --- a/.travis_script.sh +++ b/.travis_script.sh @@ -126,7 +126,6 @@ fi if [[ ${INTEGRATED} == 1 ]] then time make check-integrated-tests - time py.test-3 tools/pylib/ fi if [[ ${MMS} == 1 ]] From 8ae765c60b96a673d37eb474f11400ac4e3d3dfd Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 11 Jun 2021 16:14:22 +0100 Subject: [PATCH 411/428] Make bout-squashoutput a symlink to boutdata version --- bin/bout-squashoutput | 60 +------------------------------------------ 1 file changed, 1 insertion(+), 59 deletions(-) mode change 100755 => 120000 bin/bout-squashoutput diff --git a/bin/bout-squashoutput b/bin/bout-squashoutput deleted file mode 100755 index 804e68ab7f..0000000000 --- a/bin/bout-squashoutput +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python3 -# PYTHON_ARGCOMPLETE_OK - -# Call the squashoutput function using arguments from -# command line when this file is called as an executable - -import argparse -from sys import exit -try: - import argcomplete -except ImportError: - argcomplete = None -import boutdata.squashoutput as squash - -# Parse command line arguments -parser = argparse.ArgumentParser( - squash.__doc__ + "\n\n" + squash.squashoutput.__doc__) - - -def str_to_bool(string): - return string.lower() == "true" or string.lower() == "t" - - -def int_or_none(string): - try: - return int(string) - except ValueError: - if string.lower() == 'none' or string.lower() == 'n': - return None - else: - raise - -parser.add_argument("datadir", nargs='?', default=".") -parser.add_argument("--outputname", default="BOUT.dmp.nc") -parser.add_argument("--tind", type=int_or_none, nargs='*', default=[None]) -parser.add_argument("--xind", type=int_or_none, nargs='*', default=[None]) -parser.add_argument("--yind", type=int_or_none, nargs='*', default=[None]) -parser.add_argument("--zind", type=int_or_none, nargs='*', default=[None]) -parser.add_argument("-s", "--singleprecision", - action="store_true", default=False) -parser.add_argument("-c", "--compress", action="store_true", default=False) -parser.add_argument("-l", "--complevel", type=int_or_none, default=None) -parser.add_argument("-i", "--least-significant-digit", - type=int_or_none, default=None) -parser.add_argument("-q", "--quiet", action="store_true", default=False) -parser.add_argument("-a", "--append", action="store_true", default=False) -parser.add_argument("-d", "--delete", action="store_true", default=False) - -if argcomplete: - argcomplete.autocomplete(parser) - -args = parser.parse_args() - -for ind in "txyz": - args.__dict__[ind + "ind"] = slice(*args.__dict__[ind + "ind"]) -# Call the function, using command line arguments -squash.squashoutput(**args.__dict__) - -exit(0) diff --git a/bin/bout-squashoutput b/bin/bout-squashoutput new file mode 120000 index 0000000000..56c622b49f --- /dev/null +++ b/bin/bout-squashoutput @@ -0,0 +1 @@ +../externalpackages/boutdata/boutdata/scripts/bout_squashoutput.py \ No newline at end of file From 8121266d15f16d80f848821314b0768eb3d4d959 Mon Sep 17 00:00:00 2001 From: johnomotani Date: Wed, 4 Aug 2021 18:47:55 +0100 Subject: [PATCH 412/428] Fix typo in PETSc options docs --- manual/sphinx/user_docs/petsc.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manual/sphinx/user_docs/petsc.rst b/manual/sphinx/user_docs/petsc.rst index 1cb05758d3..e7d05ffe8d 100644 --- a/manual/sphinx/user_docs/petsc.rst +++ b/manual/sphinx/user_docs/petsc.rst @@ -35,5 +35,5 @@ to set for the LaplaceXY solver, in the input file you would put:: -boutpetsclib_laplacexyksp_type gmres - The PETSc arguments ``-options_view`` and ``options_left`` might be helpful for + The PETSc arguments ``-options_view`` and ``-options_left`` might be helpful for this - they will show what options have been set, so will show the prefixes used. From 9bf10e82d6693cd48852cd70448df118f57bab4a Mon Sep 17 00:00:00 2001 From: johnomotani Date: Wed, 4 Aug 2021 18:48:15 +0100 Subject: [PATCH 413/428] Add `const` suggested by clang-tidy --- src/sys/petsclib.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sys/petsclib.cxx b/src/sys/petsclib.cxx index 0971ee49b6..c5adb0fb1a 100644 --- a/src/sys/petsclib.cxx +++ b/src/sys/petsclib.cxx @@ -79,7 +79,7 @@ void PetscLib::cleanup() { void PetscLib::setPetscOptions(Options& options, const std::string& prefix) { // Pass all options in the section to PETSc - for (auto& i : options.getChildren()) { + for (const auto& i : options.getChildren()) { if (not i.second.isValue()) { throw BoutException("Found subsection %s in %s when reading PETSc options - only " "values are allowed in the PETSc options, not subsections", From a5d2eb98a00cf369a40ff47aab32bea59c548c4c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 23 Nov 2020 14:26:41 +0000 Subject: [PATCH 414/428] Add toString, operator<<, operator==, typeName for FieldPerp Necessary to add FieldPerp to variant in Options --- include/bout/sys/type_name.hxx | 4 ++++ include/fieldperp.hxx | 17 +++++++++++++ src/field/fieldperp.cxx | 12 ++++++++++ src/sys/type_name.cxx | 5 ++++ tests/unit/field/test_fieldperp.cxx | 37 +++++++++++++++++++++++++++++ 5 files changed, 75 insertions(+) diff --git a/include/bout/sys/type_name.hxx b/include/bout/sys/type_name.hxx index f35645a2fe..046ffa467f 100644 --- a/include/bout/sys/type_name.hxx +++ b/include/bout/sys/type_name.hxx @@ -10,6 +10,7 @@ class Field2D; class Field3D; +class FieldPerp; namespace bout { namespace utils { @@ -37,6 +38,9 @@ namespace utils { template <> std::string typeName(); + + template <> + std::string typeName(); } } diff --git a/include/fieldperp.hxx b/include/fieldperp.hxx index c97437937b..81eabd0c0a 100644 --- a/include/fieldperp.hxx +++ b/include/fieldperp.hxx @@ -36,6 +36,9 @@ class FieldPerp; #include "unused.hxx" +#include +#include + class Field2D; // #include "field2d.hxx" class Field3D; // #include "field3d.hxx" @@ -334,4 +337,18 @@ void invalidateGuards(FieldPerp &var); inline void invalidateGuards(FieldPerp &UNUSED(var)) {} #endif +/// toString template specialisation +/// Defined in utils.hxx +template <> +inline std::string toString<>(const FieldPerp& UNUSED(val)) { + return ""; +} + +/// Test if two fields are the same, by calculating +/// the minimum absolute difference between them +bool operator==(const FieldPerp &a, const FieldPerp &b); + +/// Output a string describing a FieldPerp to a stream +std::ostream& operator<<(std::ostream &out, const FieldPerp &value); + #endif diff --git a/src/field/fieldperp.cxx b/src/field/fieldperp.cxx index b4e2374a0d..fd7082bd16 100644 --- a/src/field/fieldperp.cxx +++ b/src/field/fieldperp.cxx @@ -170,3 +170,15 @@ void invalidateGuards(FieldPerp &var) { BOUT_FOR(i, var.getRegion("RGN_GUARDS")) { var[i] = BoutNaN; } } #endif + +bool operator==(const FieldPerp &a, const FieldPerp &b) { + if (!a.isAllocated() || !b.isAllocated()) { + return false; + } + return min(abs(a - b)) < 1e-10; +} + +std::ostream& operator<<(std::ostream &out, const FieldPerp &value) { + out << toString(value); + return out; +} diff --git a/src/sys/type_name.cxx b/src/sys/type_name.cxx index bbcff04cff..61cf6bd331 100644 --- a/src/sys/type_name.cxx +++ b/src/sys/type_name.cxx @@ -2,6 +2,7 @@ #include "field2d.hxx" #include "field3d.hxx" +#include "fieldperp.hxx" namespace bout { namespace utils { @@ -36,5 +37,9 @@ std::string typeName() { return "Field3D"; } +template <> +std::string typeName() { + return "FieldPerp"; +} } } diff --git a/tests/unit/field/test_fieldperp.cxx b/tests/unit/field/test_fieldperp.cxx index e8943c368e..a5efa235c5 100644 --- a/tests/unit/field/test_fieldperp.cxx +++ b/tests/unit/field/test_fieldperp.cxx @@ -14,6 +14,8 @@ #include #include +#include +#include #include /// Global mesh @@ -1762,4 +1764,39 @@ TEST_F(FieldPerpTest, ZeroFrom) { EXPECT_TRUE(field2.isAllocated()); EXPECT_TRUE(IsFieldEqual(field2, 0.)); } + +TEST_F(FieldPerpTest, ToString) { + // Just check we can call toString + const std::string expected = ""; + const FieldPerp field{}; + EXPECT_EQ(toString(field), expected); +} + +TEST_F(FieldPerpTest, OperatorStream) { + // Just check we can call operator<< + const FieldPerp field{}; + std::stringstream stream; + stream << field; + EXPECT_EQ(stream.str(), toString(field)); +} + +TEST_F(FieldPerpTest, Equality) { + FieldPerp field1 = 1.; + FieldPerp field2 = 1.; + EXPECT_TRUE(field1 == field2); +} + +TEST_F(FieldPerpTest, Inequality) { + FieldPerp field1 = 1.; + FieldPerp field2{}; + EXPECT_FALSE(field1 == field2); + + FieldPerp field3{}; + EXPECT_FALSE(field2 == field3); + + FieldPerp field4 = 1.00001; + EXPECT_FALSE(field1 == field4); +} + + #pragma GCC diagnostic pop From 09d95bd2ad9058fc351fcd2ff7d26c7f42e3f9d8 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 23 Nov 2020 15:10:23 +0000 Subject: [PATCH 415/428] Add FieldPerp constructor from Array Needed to add FieldPerp to Options --- include/fieldperp.hxx | 8 ++++++++ src/field/fieldperp.cxx | 10 ++++++++++ tests/unit/field/test_fieldperp.cxx | 23 +++++++++++++++++++++++ 3 files changed, 41 insertions(+) diff --git a/include/fieldperp.hxx b/include/fieldperp.hxx index 81eabd0c0a..9302f4ac86 100644 --- a/include/fieldperp.hxx +++ b/include/fieldperp.hxx @@ -78,6 +78,14 @@ class FieldPerp : public Field { */ FieldPerp(BoutReal val, Mesh *localmesh = nullptr); + /*! + * Constructor from Array and Mesh + */ + FieldPerp(Array data, Mesh* fieldmesh, CELL_LOC location_in = CELL_CENTRE, + int yindex_in = -1, + DirectionTypes directions_in = {YDirectionType::Standard, + ZDirectionType::Standard}); + ~FieldPerp() override = default; /*! diff --git a/src/field/fieldperp.cxx b/src/field/fieldperp.cxx index fd7082bd16..ecff18ffa4 100644 --- a/src/field/fieldperp.cxx +++ b/src/field/fieldperp.cxx @@ -48,6 +48,16 @@ FieldPerp::FieldPerp(BoutReal val, Mesh *localmesh) : FieldPerp(localmesh) { *this = val; } +FieldPerp::FieldPerp(Array data_in, Mesh* localmesh, CELL_LOC location_in, + int yindex_in, DirectionTypes directions) + : Field(localmesh, location_in, directions), yindex(yindex_in), + nx(fieldmesh->LocalNx), nz(fieldmesh->LocalNz), data(std::move(data_in)) { + + ASSERT1(data.size() == nx * nz); + + setLocation(location_in); +} + FieldPerp& FieldPerp::allocate() { if (data.empty()) { if (!fieldmesh) { diff --git a/tests/unit/field/test_fieldperp.cxx b/tests/unit/field/test_fieldperp.cxx index a5efa235c5..4bb07ec082 100644 --- a/tests/unit/field/test_fieldperp.cxx +++ b/tests/unit/field/test_fieldperp.cxx @@ -3,6 +3,7 @@ #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #include "gtest/gtest.h" +#include "bout/array.hxx" #include "bout/constants.hxx" #include "bout/mesh.hxx" #include "boutexception.hxx" @@ -13,6 +14,7 @@ #include "utils.hxx" #include +#include #include #include #include @@ -127,6 +129,27 @@ TEST_F(FieldPerpTest, CreateOnGivenMesh) { EXPECT_EQ(field.getNz(), test_nz); } +TEST_F(FieldPerpTest, CreateFromArray) { + WithQuietOutput quiet{output_info}; + + int test_nx = FieldPerpTest::nx + 1; + int test_ny = FieldPerpTest::ny + 1; + int test_nz = FieldPerpTest::nz + 1; + + FakeMesh fieldmesh{test_nx, test_ny, test_nz}; + fieldmesh.setCoordinates(nullptr); + fieldmesh.createDefaultRegions(); + + auto expected = makeField([](IndPerp& i) { return i.ind; }, &fieldmesh); + + Array array_data(test_nx * test_nz); + std::iota(array_data.begin(), array_data.end(), 0); + + FieldPerp field{array_data, &fieldmesh}; + + EXPECT_TRUE(IsFieldEqual(field, expected)); +} + TEST_F(FieldPerpTest, CopyCheckFieldmesh) { WithQuietOutput quiet{output_info}; From 14fe22ae125267953f6d7d5cca0f4f92ef9609b6 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 24 Nov 2020 10:30:17 +0000 Subject: [PATCH 416/428] Add FieldPerp::getGlobalindex/setIndexFromGlobal Move implementation from DataFormat methods to FieldPerp --- include/fieldperp.hxx | 32 +++++++++++++++++++++----------- src/field/fieldperp.cxx | 25 +++++++++++++++++++++++++ src/fileio/dataformat.cxx | 29 ++++------------------------- tests/unit/test_extras.hxx | 8 ++++---- 4 files changed, 54 insertions(+), 40 deletions(-) diff --git a/include/fieldperp.hxx b/include/fieldperp.hxx index 9302f4ac86..b4229dc07c 100644 --- a/include/fieldperp.hxx +++ b/include/fieldperp.hxx @@ -116,23 +116,33 @@ class FieldPerp : public Field { inline const BoutReal& operator[](const Ind3D &d) const { ASSERT3(d.y() == yindex); return operator()(d.x(), d.z()); - } + } - /*! - * Returns the y index at which this field is defined - */ - int getIndex() const {return yindex;} - - /*! - * Sets the y index at which this field is defined - * - * This is used in arithmetic operations - */ + /// Return the y index at which this field is defined. This value is + /// local to each processor + int getIndex() const { return yindex; } + + /// Return the globally defined y index if it's either an interior + /// (grid) point, or a boundary point. Otherwise, return -1 to + /// indicate a guard cell or an invalid value + int getGlobalIndex() const; + + /// Set the (local) y index at which this field is defined + /// + /// This is used in arithmetic operations FieldPerp& setIndex(int y) { yindex = y; return *this; } + /// Set the (local) y index at which this field is defined from a + /// globally defined y index + /// + /// Only use the global y index if it's either an interior (grid) + /// point, or a boundary point. Otherwise, sets yindex to -1 to + /// indicate a guard cell or an invalid value + FieldPerp& setIndexFromGlobal(int y_global); + // these methods return FieldPerp to allow method chaining FieldPerp& setLocation(CELL_LOC new_location) { Field::setLocation(new_location); diff --git a/src/field/fieldperp.cxx b/src/field/fieldperp.cxx index ecff18ffa4..3fc8d0697d 100644 --- a/src/field/fieldperp.cxx +++ b/src/field/fieldperp.cxx @@ -114,6 +114,31 @@ const Region &FieldPerp::getRegion(const std::string ®ion_name) cons return fieldmesh->getRegionPerp(region_name); }; +int FieldPerp::getGlobalIndex() const { + auto& fieldmesh = *getMesh(); + const int start = fieldmesh.hasBndryLowerY() ? 0 : fieldmesh.ystart; + const int end = fieldmesh.hasBndryUpperY() ? fieldmesh.LocalNy : fieldmesh.yend + 1; + + // Only use the global y index if it's either an interior (grid) + // point, or a boundary point. Otherwise, use -1 to indicate a guard + // cell or an invalid value. The actual FieldPerp value is still + // written to file + return (yindex >= start and yindex < end) ? fieldmesh.getGlobalYIndex(yindex) : -1; +} + +FieldPerp& FieldPerp::setIndexFromGlobal(int y_global) { + auto& fieldmesh = *getMesh(); + const int start = fieldmesh.hasBndryLowerY() ? 0 : fieldmesh.ystart; + const int end = fieldmesh.hasBndryUpperY() ? fieldmesh.LocalNy : fieldmesh.yend + 1; + + // Only use the global y index if it's either an interior (grid) + // point, or a boundary point. Otherwise, use -1 to indicate a + // guard cell or an invalid value + const int yindex_local = fieldmesh.getLocalYIndex(y_global); + yindex = (yindex_local >= start and yindex_local < end) ? yindex_local : -1; + return *this; +} + //////////////// NON-MEMBER FUNCTIONS ////////////////// FieldPerp toFieldAligned(const FieldPerp& f, const std::string& region) { diff --git a/src/fileio/dataformat.cxx b/src/fileio/dataformat.cxx index ce58f8cbfb..3c2f48581d 100644 --- a/src/fileio/dataformat.cxx +++ b/src/fileio/dataformat.cxx @@ -47,19 +47,7 @@ void DataFormat::writeFieldAttributes(const std::string& name, const Field& f, b void DataFormat::writeFieldAttributes(const std::string& name, const FieldPerp& f, bool shiftOutput) { writeFieldAttributes(name, static_cast(f), shiftOutput); - auto& fieldmesh = *f.getMesh(); - const int yindex = f.getIndex(); - const int start = fieldmesh.hasBndryLowerY() ? 0 : fieldmesh.ystart; - const int end = fieldmesh.hasBndryUpperY() ? fieldmesh.LocalNy : fieldmesh.yend + 1; - - // Only use the global y index if it's either an interior (grid) - // point, or a boundary point. Otherwise, use -1 to indicate a guard - // cell or an invalid value. The actual FieldPerp value is still - // written to file - const int global_yindex = - (yindex >= start and yindex < end) ? fieldmesh.getGlobalYIndex(yindex) : -1; - - setAttribute(name, "yindex_global", global_yindex); + setAttribute(name, "yindex_global", f.getGlobalIndex()); } void DataFormat::readFieldAttributes(const std::string& name, Field& f) { @@ -86,18 +74,9 @@ void DataFormat::readFieldAttributes(const std::string& name, FieldPerp& f) { // Note: don't use DataFormat::mesh variable, because it may be null if the DataFormat // is part of a GridFromFile, which is created before the Mesh. if (getAttribute(name, "yindex_global", yindex_global)) { - - auto& fieldmesh = *f.getMesh(); - const int start = fieldmesh.hasBndryLowerY() ? 0 : fieldmesh.ystart; - const int end = fieldmesh.hasBndryUpperY() ? fieldmesh.LocalNy : fieldmesh.yend + 1; - - // Only use the global y index if it's either an interior (grid) - // point, or a boundary point. Otherwise, use -1 to indicate a - // guard cell or an invalid value. This may mean that `f` does not - // get allocated - const int yindex_local = fieldmesh.getLocalYIndex(yindex_global); - const int yindex = (yindex_local >= start and yindex_local < end) ? yindex_local : -1; - f.setIndex(yindex); + // If yindex_global is a guard cell or otherwise invalid value, f + // may not get allocated + f.setIndexFromGlobal(yindex_global); } else { // "yindex_global" wasn't present, so this might be an older // file. We use the no-boundary form here, such that we get a diff --git a/tests/unit/test_extras.hxx b/tests/unit/test_extras.hxx index 5bb451f5bd..a3893dc2e3 100644 --- a/tests/unit/test_extras.hxx +++ b/tests/unit/test_extras.hxx @@ -287,14 +287,14 @@ public: BoutReal GlobalY(BoutReal jy) const override { return jy; } int getGlobalXIndex(int) const override { return 0; } int getGlobalXIndexNoBoundaries(int) const override { return 0; } - int getGlobalYIndex(int) const override { return 0; } - int getGlobalYIndexNoBoundaries(int) const override { return 0; } + int getGlobalYIndex(int y) const override { return y; } + int getGlobalYIndexNoBoundaries(int y) const override { return y; } int getGlobalZIndex(int) const override { return 0; } int getGlobalZIndexNoBoundaries(int) const override { return 0; } int getLocalXIndex(int) const override { return 0; } int getLocalXIndexNoBoundaries(int) const override { return 0; } - int getLocalYIndex(int) const override { return 0; } - int getLocalYIndexNoBoundaries(int) const override { return 0; } + int getLocalYIndex(int y) const override { return y; } + int getLocalYIndexNoBoundaries(int y) const override { return y; } int getLocalZIndex(int) const override { return 0; } int getLocalZIndexNoBoundaries(int) const override { return 0; } From a902332a3bc5086ac7561ccd07f17f51127689c0 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 23 Nov 2020 15:11:34 +0000 Subject: [PATCH 417/428] Fix range-based for loops in tests copying element --- tests/unit/field/test_field3d.cxx | 14 +++++++------- tests/unit/field/test_fieldperp.cxx | 8 ++++---- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/unit/field/test_field3d.cxx b/tests/unit/field/test_field3d.cxx index 8d4925e197..830b02cc21 100644 --- a/tests/unit/field/test_field3d.cxx +++ b/tests/unit/field/test_field3d.cxx @@ -546,7 +546,7 @@ TEST_F(Field3DTest, IterateOverRGN_NOX) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1], index[2]) = sentinel; } @@ -595,7 +595,7 @@ TEST_F(Field3DTest, IterateOverRGN_NOY) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1], index[2]) = sentinel; } @@ -649,7 +649,7 @@ TEST_F(Field3DTest, IterateOverRGN_NOZ) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1], index[2]) = sentinel; } @@ -698,7 +698,7 @@ TEST_F(Field3DTest, IterateOverRGN_XGUARDS) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1], index[2]) = sentinel; } @@ -746,7 +746,7 @@ TEST_F(Field3DTest, IterateOverRGN_YGUARDS) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1], index[2]) = sentinel; } @@ -792,7 +792,7 @@ TEST_F(Field3DTest, IterateOverRGN_ZGUARDS) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1], index[2]) = sentinel; } @@ -844,7 +844,7 @@ TEST_F(Field3DTest, IterateOverRGN_NOCORNERS) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1], index[2]) = sentinel; } diff --git a/tests/unit/field/test_fieldperp.cxx b/tests/unit/field/test_fieldperp.cxx index 4bb07ec082..9b09566422 100644 --- a/tests/unit/field/test_fieldperp.cxx +++ b/tests/unit/field/test_fieldperp.cxx @@ -467,7 +467,7 @@ TEST_F(FieldPerpTest, IterateOverRGN_XGUARDS) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1]) = sentinel; } @@ -509,7 +509,7 @@ TEST_F(FieldPerpTest, IterateOverRGN_YGUARDS) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1]) = sentinel; } @@ -551,7 +551,7 @@ TEST_F(FieldPerpTest, IterateOverRGN_ZGUARDS) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1]) = sentinel; } @@ -597,7 +597,7 @@ TEST_F(FieldPerpTest, IterateOverRGN_NOCORNERS) { const int num_sentinels = region_indices.size(); // Assign sentinel value to watch out for to our chosen points - for (const auto index : test_indices) { + for (const auto& index : test_indices) { field(index[0], index[1]) = sentinel; } From e461adf55fa2b843ac22030a30dee5174b3e081e Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 24 Nov 2020 11:32:46 +0000 Subject: [PATCH 418/428] Add Options::hasAttribute to check for existence of attribute --- include/options.hxx | 5 +++++ tests/unit/sys/test_options.cxx | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/include/options.hxx b/include/options.hxx index 54de2d8e72..3f44ba5813 100644 --- a/include/options.hxx +++ b/include/options.hxx @@ -243,6 +243,11 @@ public: /// - doc [string] Documentation, describing what the variable does /// std::map attributes; + + /// Return true if this value has attribute \p key + bool hasAttribute(const std::string& key) const { + return attributes.find(key) != attributes.end(); + } /// Get a sub-section or value /// diff --git a/tests/unit/sys/test_options.cxx b/tests/unit/sys/test_options.cxx index 2f1550cb17..d0241ae5e5 100644 --- a/tests/unit/sys/test_options.cxx +++ b/tests/unit/sys/test_options.cxx @@ -812,6 +812,15 @@ TEST_F(OptionsTest, AssignSubSectionParent) { EXPECT_EQ(&option2["key2"]["key1"].parent(), &option2["key2"]); } +TEST_F(OptionsTest, HasAttribute) { + Options option; + + EXPECT_FALSE(option.hasAttribute("not here")); + + option.attributes["here"] = true; + EXPECT_TRUE(option.hasAttribute("here")); +} + TEST_F(OptionsTest, AttributeMissingBool) { Options option; From 1472570644e488f98d7cca2802292e67bf9fe0e3 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Mon, 23 Nov 2020 15:12:16 +0000 Subject: [PATCH 419/428] Add FieldPerp to Options::ValueType variant Allows storing `FieldPerp` in `Options` --- include/options.hxx | 5 +- src/sys/options.cxx | 89 +++++++++++++++++++++++++++++++++ tests/unit/sys/test_options.cxx | 17 +++++-- 3 files changed, 107 insertions(+), 4 deletions(-) diff --git a/include/options.hxx b/include/options.hxx index 3f44ba5813..6dc9d37d39 100644 --- a/include/options.hxx +++ b/include/options.hxx @@ -48,6 +48,7 @@ class Options; #include "bout/deprecated.hxx" #include "field2d.hxx" #include "field3d.hxx" +#include "fieldperp.hxx" #include #include @@ -177,7 +178,7 @@ public: /// The type used to store values using ValueType = - bout::utils::variant, Matrix, Tensor>; /// The type used to store attributes @@ -696,6 +697,7 @@ template<> inline void Options::assign<>(const char *val, const std::string sour // Note: Field assignments don't check for previous assignment (always force) template<> void Options::assign<>(Field2D val, const std::string source); template<> void Options::assign<>(Field3D val, const std::string source); +template<> void Options::assign<>(FieldPerp val, const std::string source); template<> void Options::assign<>(Array val, const std::string source); template<> void Options::assign<>(Matrix val, const std::string source); template<> void Options::assign<>(Tensor val, const std::string source); @@ -710,6 +712,7 @@ template <> BoutReal Options::as(const BoutReal& similar_to) const; template <> bool Options::as(const bool& similar_to) const; template <> Field2D Options::as(const Field2D& similar_to) const; template <> Field3D Options::as(const Field3D& similar_to) const; +template <> FieldPerp Options::as(const FieldPerp& similar_to) const; /// Define for reading options which passes the variable name #define OPTION(options, var, def) \ diff --git a/src/sys/options.cxx b/src/sys/options.cxx index f93183c372..14ac099044 100644 --- a/src/sys/options.cxx +++ b/src/sys/options.cxx @@ -161,6 +161,13 @@ void Options::assign<>(Field3D val, std::string source) { is_value = true; } template <> +void Options::assign<>(FieldPerp val, std::string source) { + value = std::move(val); + attributes["source"] = std::move(source); + value_used = false; + is_value = true; +} +template <> void Options::assign<>(Array val, std::string source) { value = std::move(val); attributes["source"] = std::move(source); @@ -453,6 +460,88 @@ template <> Field2D Options::as(const Field2D& similar_to) const { full_name.c_str()); } +template <> +FieldPerp Options::as(const FieldPerp& similar_to) const { + if (!is_value) { + throw BoutException("Option %s has no value", full_name.c_str()); + } + + // Mark value as used + value_used = true; + + if (bout::utils::holds_alternative(value)) { + FieldPerp stored_value = bout::utils::get(value); + + // Check that meta-data is consistent + ASSERT1(areFieldsCompatible(stored_value, similar_to)); + + return stored_value; + } + + try { + BoutReal scalar_value = + bout::utils::variantStaticCastOrThrow(value); + + // Get metadata from similar_to, fill field with scalar_value + return filledFrom(similar_to, scalar_value); + } catch (const std::bad_cast&) { + + const CELL_LOC location = hasAttribute("cell_location") + ? CELL_LOCFromString(attributes.at("cell_location")) + : similar_to.getLocation(); + + // Convert from a string using FieldFactory + if (bout::utils::holds_alternative(value)) { + return FieldFactory::get()->createPerp(bout::utils::get(value), this, + similar_to.getMesh(), location); + } else if (bout::utils::holds_alternative>(value)) { + auto localmesh = similar_to.getMesh(); + if (!localmesh) { + throw BoutException("mesh must be supplied when converting Tensor to Field3D"); + } + + // Get a reference, to try and avoid copying + const auto& matrix = bout::utils::get>(value); + + // Check if the dimension sizes are the same as a FieldPerp + if (matrix.shape() == std::make_tuple(localmesh->LocalNx, localmesh->LocalNz)) { + const auto y_direction = + hasAttribute("direction_y") + ? YDirectionTypeFromString(attributes.at("direction_y")) + : similar_to.getDirectionY(); + const auto z_direction = + hasAttribute("direction_z") + ? ZDirectionTypeFromString(attributes.at("direction_z")) + : similar_to.getDirectionZ(); + + auto result = FieldPerp(matrix.getData(), localmesh, location, -1, + {y_direction, z_direction}); + + // Set the index after creating the field so as to not + // duplicate the code in `FieldPerp::setIndexFromGlobal` + if (hasAttribute("yindex_global")) { + result.setIndexFromGlobal(attributes.at("yindex_global")); + } else if (similar_to.getIndex() == -1) { + // If `yindex_global` attribute wasn't present (might be an + // older file), and `similar_to` doesn't have its index set + // (might not have been passed, so be default constructed), + // use the no-boundary form so that we get a default value + // on a grid cell + result.setIndex(localmesh->getLocalYIndexNoBoundaries(0)); + } else { + result.setIndex(similar_to.getIndex()); + } + return result; + } + // If dimension sizes not the same, may be able + // to select a region from it using Mesh e.g. if this + // is from the input grid file. + } + } + throw BoutException(_("Value for option %s cannot be converted to a Field3D"), + full_name.c_str()); +} + // Note: This is defined here rather than in the header // to avoid using as before specialising it. bool Options::operator==(const char* other) const { diff --git a/tests/unit/sys/test_options.cxx b/tests/unit/sys/test_options.cxx index d0241ae5e5..2bada633b7 100644 --- a/tests/unit/sys/test_options.cxx +++ b/tests/unit/sys/test_options.cxx @@ -1023,7 +1023,7 @@ TEST_F(OptionsTest, TypeAttributeInt) { Options option; option = "42"; - // Casting to bool should modify the "type" attribute + // Casting to int should modify the "type" attribute int value = option.withDefault(-1); EXPECT_EQ(value, 42); @@ -1034,7 +1034,7 @@ TEST_F(OptionsTest, TypeAttributeField2D) { Options option; option = "42"; - // Casting to bool should modify the "type" attribute + // Casting to Field2D should modify the "type" attribute Field2D value = option.withDefault(Field2D(-1, bout::globals::mesh)); EXPECT_EQ(value(0,0), 42); @@ -1045,13 +1045,24 @@ TEST_F(OptionsTest, TypeAttributeField3D) { Options option; option = "42"; - // Casting to bool should modify the "type" attribute + // Casting to Field3D should modify the "type" attribute Field3D value = option.withDefault(Field3D(-1, bout::globals::mesh)); EXPECT_EQ(value(0,0,0), 42); EXPECT_EQ(option.attributes["type"].as(), "Field3D"); } +TEST_F(OptionsTest, TypeAttributeFieldPerp) { + Options option; + option = "36"; + + // Casting to FieldPerp should modify the "type" attribute + FieldPerp value = option.withDefault(FieldPerp(-1, bout::globals::mesh)); + + EXPECT_EQ(value(0,0,0), 36); + EXPECT_EQ(option.attributes["type"].as(), "FieldPerp"); +} + TEST_F(OptionsTest, DocString) { Options option; From 92adb9645db2e64d0b05a5450fcadb97b8e99d87 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 24 Nov 2020 11:35:56 +0000 Subject: [PATCH 420/428] Add ability to read/write FieldPerp to OptionsNetCDF --- src/sys/options/options_netcdf.cxx | 33 +++++++++++++++++++ tests/integrated/test-options-netcdf/runtest | 6 +++- .../test-options-netcdf.cxx | 9 +++-- tests/unit/sys/test_options_netcdf.cxx | 20 ++++++++++- 4 files changed, 63 insertions(+), 5 deletions(-) diff --git a/src/sys/options/options_netcdf.cxx b/src/sys/options/options_netcdf.cxx index cff0d76780..a1aec026ac 100644 --- a/src/sys/options/options_netcdf.cxx +++ b/src/sys/options/options_netcdf.cxx @@ -184,6 +184,11 @@ NcType NcTypeVisitor::operator()(const Field3D& UNUSED(t)) { return operator()(0.0); } +template <> +NcType NcTypeVisitor::operator()(const FieldPerp& UNUSED(t)) { + return operator()(0.0); +} + /// Visit a variant type, returning dimensions struct NcDimVisitor { NcDimVisitor(NcGroup& group) : group(group) {} @@ -247,6 +252,17 @@ std::vector NcDimVisitor::operator()(const Field3D& value) { return {xdim, ydim, zdim}; } +template <> +std::vector NcDimVisitor::operator()(const FieldPerp& value) { + auto xdim = findDimension(group, "x", value.getNx()); + ASSERT0(!xdim.isNull()); + + auto zdim = findDimension(group, "z", value.getNz()); + ASSERT0(!zdim.isNull()); + + return {xdim, zdim}; +} + /// Visit a variant type, and put the data into a NcVar struct NcPutVarVisitor { NcPutVarVisitor(NcVar& var) : var(var) {} @@ -290,6 +306,18 @@ void NcPutVarVisitor::operator()(const Field3D& value) { var.putAtt("cell_location", toString(value.getLocation())); } +template <> +void NcPutVarVisitor::operator()(const FieldPerp& value) { + // Pointer to data. Assumed to be contiguous array + var.putVar(&value(0, 0)); + + // Set cell location attribute + var.putAtt("cell_location", toString(value.getLocation())); + var.putAtt("direction_y", toString(value.getDirectionY())); + var.putAtt("direction_z", toString(value.getDirectionZ())); + var.putAtt("yindex_global", ncInt, value.getGlobalIndex()); +} + /// Visit a variant type, and put the data into a NcVar struct NcPutVarCountVisitor { NcPutVarCountVisitor(NcVar& var, const std::vector& start, @@ -321,6 +349,11 @@ void NcPutVarCountVisitor::operator()(const Field3D& value) { // Pointer to data. Assumed to be contiguous array var.putVar(start, count, &value(0, 0, 0)); } +template <> +void NcPutVarCountVisitor::operator()(const FieldPerp& value) { + // Pointer to data. Assumed to be contiguous array + var.putVar(start, count, &value(0, 0)); +} /// Visit a variant type, and put the data into an attributute struct NcPutAttVisitor { diff --git a/tests/integrated/test-options-netcdf/runtest b/tests/integrated/test-options-netcdf/runtest index c1f7095d39..8f9d00d8b8 100755 --- a/tests/integrated/test-options-netcdf/runtest +++ b/tests/integrated/test-options-netcdf/runtest @@ -41,7 +41,7 @@ with DataFile("test-out.nc") as f: assert result["string"] == "hello" print("Checking saved settings.ini") - + # Check the settings.ini file, coming from BOUT.inp # which is converted to NetCDF, read in, then written again settings = BoutOptionsFile("settings.ini") @@ -54,15 +54,19 @@ print("Checking saved fields.nc") with DataFile("fields.nc") as f: assert f["f2d"].shape == (5,6) # Field2D assert f["f3d"].shape == (5,6,2) # Field3D + assert f["fperp"].shape == (5, 2) # FieldPerp assert np.allclose(f["f2d"], 1.0) assert np.allclose(f["f3d"], 2.0) + assert np.allclose(f["fperp"], 3.0) print("Checking saved fields2.nc") with DataFile("fields2.nc") as f: assert f["f2d"].shape == (5,6) # Field2D assert f["f3d"].shape == (5,6,2) # Field3D + assert f["fperp"].shape == (5, 2) # FieldPerp assert np.allclose(f["f2d"], 1.0) assert np.allclose(f["f3d"], 2.0) + assert np.allclose(f["fperp"], 3.0) print(" => Passed") diff --git a/tests/integrated/test-options-netcdf/test-options-netcdf.cxx b/tests/integrated/test-options-netcdf/test-options-netcdf.cxx index 3c019a33dd..7bc49cfc6d 100644 --- a/tests/integrated/test-options-netcdf/test-options-netcdf.cxx +++ b/tests/integrated/test-options-netcdf/test-options-netcdf.cxx @@ -40,19 +40,22 @@ int main(int argc, char** argv) { Options fields; fields["f2d"] = Field2D(1.0); fields["f3d"] = Field3D(2.0); + fields["fperp"] = FieldPerp(3.0); OptionsNetCDF("fields.nc").write(fields); /////////////////////////// // Read fields Options fields_in = OptionsNetCDF("fields.nc").read(); - - auto f2d = fields_in["f2d"].as(mesh); - auto f3d = fields_in["f3d"].as(mesh); + + auto f2d = fields_in["f2d"].as(bout::globals::mesh); + auto f3d = fields_in["f3d"].as(bout::globals::mesh); + auto fperp = fields_in["fperp"].as(bout::globals::mesh); Options fields2; fields2["f2d"] = f2d; fields2["f3d"] = f3d; + fields2["fperp"] = fperp; // Write out again OptionsNetCDF("fields2.nc").write(fields2); diff --git a/tests/unit/sys/test_options_netcdf.cxx b/tests/unit/sys/test_options_netcdf.cxx index 2bc84841ba..c630c53191 100644 --- a/tests/unit/sys/test_options_netcdf.cxx +++ b/tests/unit/sys/test_options_netcdf.cxx @@ -217,5 +217,23 @@ TEST_F(OptionsNetCDFTest, Field3DWriteCellYLow) { EXPECT_EQ(data["f3d"].attributes["cell_location"].as(), toString(CELL_YLOW)); } +TEST_F(OptionsNetCDFTest, FieldPerpWriteCellCentre) { + { + Options options; + FieldPerp fperp(3.0); + fperp.setIndex(2); + options["fperp"] = fperp; + + // Write file + OptionsNetCDF(filename).write(options); + } + + // Read file + Options data = OptionsNetCDF(filename).read(); + + EXPECT_EQ(data["fperp"].attributes["cell_location"].as(), + toString(CELL_CENTRE)); + EXPECT_EQ(data["fperp"].attributes["yindex_global"].as(), 2); +} -#endif // NCDF4 +#endif // BOUT_HAS_NETCDF From 52a0cb61292534571e4530df4cfa41ac077387a5 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 24 Nov 2020 16:28:08 +0000 Subject: [PATCH 421/428] Fix error message in Options::as --- src/sys/options.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sys/options.cxx b/src/sys/options.cxx index 14ac099044..ecc967590d 100644 --- a/src/sys/options.cxx +++ b/src/sys/options.cxx @@ -497,7 +497,7 @@ FieldPerp Options::as(const FieldPerp& similar_to) const { } else if (bout::utils::holds_alternative>(value)) { auto localmesh = similar_to.getMesh(); if (!localmesh) { - throw BoutException("mesh must be supplied when converting Tensor to Field3D"); + throw BoutException("mesh must be supplied when converting Matrix to FieldPerp"); } // Get a reference, to try and avoid copying From c431fa1726c974f19f9d18a06d6e083b7044ff50 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 24 Nov 2020 17:05:50 +0000 Subject: [PATCH 422/428] Fix FieldPerp equality unit tests (didn't set y-index) Also check that y-indices match in operator== --- include/fieldperp.hxx | 9 +++++---- src/field/fieldperp.cxx | 6 +++--- tests/unit/field/test_fieldperp.cxx | 9 +++++++++ 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/include/fieldperp.hxx b/include/fieldperp.hxx index b4229dc07c..ca4cb565ef 100644 --- a/include/fieldperp.hxx +++ b/include/fieldperp.hxx @@ -362,11 +362,12 @@ inline std::string toString<>(const FieldPerp& UNUSED(val)) { return ""; } -/// Test if two fields are the same, by calculating -/// the minimum absolute difference between them -bool operator==(const FieldPerp &a, const FieldPerp &b); +/// Test if two fields are the same, by checking that they are defined +/// at the same y-index, and if the minimum absolute difference +/// between them is less than 1e-10 +bool operator==(const FieldPerp& a, const FieldPerp& b); /// Output a string describing a FieldPerp to a stream -std::ostream& operator<<(std::ostream &out, const FieldPerp &value); +std::ostream& operator<<(std::ostream& out, const FieldPerp& value); #endif diff --git a/src/field/fieldperp.cxx b/src/field/fieldperp.cxx index 3fc8d0697d..141e46aa61 100644 --- a/src/field/fieldperp.cxx +++ b/src/field/fieldperp.cxx @@ -206,14 +206,14 @@ void invalidateGuards(FieldPerp &var) { } #endif -bool operator==(const FieldPerp &a, const FieldPerp &b) { +bool operator==(const FieldPerp& a, const FieldPerp& b) { if (!a.isAllocated() || !b.isAllocated()) { return false; } - return min(abs(a - b)) < 1e-10; + return (a.getIndex() == b.getIndex()) and (min(abs(a - b)) < 1e-10); } -std::ostream& operator<<(std::ostream &out, const FieldPerp &value) { +std::ostream& operator<<(std::ostream& out, const FieldPerp& value) { out << toString(value); return out; } diff --git a/tests/unit/field/test_fieldperp.cxx b/tests/unit/field/test_fieldperp.cxx index 9b09566422..31857077b0 100644 --- a/tests/unit/field/test_fieldperp.cxx +++ b/tests/unit/field/test_fieldperp.cxx @@ -1805,12 +1805,16 @@ TEST_F(FieldPerpTest, OperatorStream) { TEST_F(FieldPerpTest, Equality) { FieldPerp field1 = 1.; + field1.setIndex(1); FieldPerp field2 = 1.; + field2.setIndex(1); EXPECT_TRUE(field1 == field2); } TEST_F(FieldPerpTest, Inequality) { FieldPerp field1 = 1.; + field1.setIndex(2); + FieldPerp field2{}; EXPECT_FALSE(field1 == field2); @@ -1818,7 +1822,12 @@ TEST_F(FieldPerpTest, Inequality) { EXPECT_FALSE(field2 == field3); FieldPerp field4 = 1.00001; + field4.setIndex(2); EXPECT_FALSE(field1 == field4); + + FieldPerp field5 = 1.; + field5.setIndex(3); + EXPECT_FALSE(field1 == field5); } From 9e987e6b656ced8a6c207579e489a3d51eca918e Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Tue, 24 Nov 2020 17:11:03 +0000 Subject: [PATCH 423/428] Normalise location in Field constructor --- src/field/field.cxx | 66 +++++++++++++++++++++++++++-------------- src/field/field3d.cxx | 2 -- src/field/fieldperp.cxx | 3 +- 3 files changed, 45 insertions(+), 26 deletions(-) diff --git a/src/field/field.cxx b/src/field/field.cxx index adc7c11985..86848bfddc 100644 --- a/src/field/field.cxx +++ b/src/field/field.cxx @@ -32,42 +32,64 @@ #include #include -Field::Field(Mesh *localmesh, CELL_LOC location_in, - DirectionTypes directions_in) - : fieldmesh(localmesh==nullptr ? bout::globals::mesh : localmesh), - location(location_in), directions(directions_in) { +namespace bout { +/// Make sure \p location is a sensible value for \p mesh +/// +/// Throws if checks are enabled and trying to use a staggered +/// location on a non-staggered mesh +CELL_LOC normaliseLocation(CELL_LOC location, Mesh* mesh) { + AUTO_TRACE(); - // Need to check for nullptr again, because the fieldmesh might still be - // nullptr if the global mesh hasn't been initialized yet - if (fieldmesh != nullptr) { - // sets fieldCoordinates by getting Coordinates for our location from - // fieldmesh - getCoordinates(); + // CELL_DEFAULT always means CELL_CENTRE + if (location == CELL_DEFAULT) { + return CELL_CENTRE; } -} -void Field::setLocation(CELL_LOC new_location) { - AUTO_TRACE(); - if (getMesh()->StaggerGrids) { - if (new_location == CELL_VSHIFT) { + // No mesh means we can't check if we're using staggered grids, so + // we'll have to trust the user in this case. This can happen if + // we're making a field before the global mesh has been initialised + // -- probably not good, but possible. + if (mesh == nullptr) { + return location; + } + + if (mesh->StaggerGrids) { + if (location == CELL_VSHIFT) { throw BoutException( "Field: CELL_VSHIFT cell location only makes sense for vectors"); } - if (new_location == CELL_DEFAULT) { - new_location = CELL_CENTRE; - } - - location = new_location; + return location; } else { #if CHECK > 0 - if (new_location != CELL_CENTRE && new_location != CELL_DEFAULT) { + if (location != CELL_CENTRE) { throw BoutException("Field: Trying to set off-centre location on " "non-staggered grid\n" " Did you mean to enable staggered grids?"); } #endif - location = CELL_CENTRE; + return CELL_CENTRE; } +} +} // namespace bout + +Field::Field(Mesh* localmesh, CELL_LOC location_in, DirectionTypes directions_in) + : fieldmesh(localmesh == nullptr ? bout::globals::mesh : localmesh), + location(bout::normaliseLocation(location_in, fieldmesh)), + directions(directions_in) { + + // Need to check for nullptr again, because the fieldmesh might still be + // nullptr if the global mesh hasn't been initialized yet + if (fieldmesh != nullptr) { + // sets fieldCoordinates by getting Coordinates for our location from + // fieldmesh + getCoordinates(); + } +} + +void Field::setLocation(CELL_LOC new_location) { + AUTO_TRACE(); + + location = bout::normaliseLocation(new_location, getMesh()); fieldCoordinates = nullptr; // Sets correct fieldCoordinates pointer and ensures Coordinates object is diff --git a/src/field/field3d.cxx b/src/field/field3d.cxx index ea9527fe05..6e7afed21c 100644 --- a/src/field/field3d.cxx +++ b/src/field/field3d.cxx @@ -102,8 +102,6 @@ Field3D::Field3D(Array data_in, Mesh* localmesh, CELL_LOC datalocation nz = fieldmesh->LocalNz; ASSERT1(data.size() == nx * ny * nz); - - setLocation(datalocation); } Field3D::~Field3D() { delete deriv; } diff --git a/src/field/fieldperp.cxx b/src/field/fieldperp.cxx index 141e46aa61..6c8452d555 100644 --- a/src/field/fieldperp.cxx +++ b/src/field/fieldperp.cxx @@ -52,10 +52,9 @@ FieldPerp::FieldPerp(Array data_in, Mesh* localmesh, CELL_LOC location int yindex_in, DirectionTypes directions) : Field(localmesh, location_in, directions), yindex(yindex_in), nx(fieldmesh->LocalNx), nz(fieldmesh->LocalNz), data(std::move(data_in)) { + TRACE("FieldPerp: Copy constructor from Array and Mesh"); ASSERT1(data.size() == nx * nz); - - setLocation(location_in); } FieldPerp& FieldPerp::allocate() { From 8f6536c099bf8c00cad8823a695ad0927c40f404 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Wed, 4 Aug 2021 17:10:14 +0100 Subject: [PATCH 424/428] Initialise MPI for tests and fix FakeMesh `FieldPerp::getGlobalIndex` calls `Mesh::hasBndrylowery` which calls `MPI_Allreduce` and `Mesh::getXcomm`. These fixes were already in `next` when the `OptionsNetCDF` test with `FieldPerp` was added there --- tests/unit/bout_test_main.cxx | 7 +++++++ tests/unit/test_extras.hxx | 4 ++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/unit/bout_test_main.cxx b/tests/unit/bout_test_main.cxx index e72bc5a785..6983fb04d1 100644 --- a/tests/unit/bout_test_main.cxx +++ b/tests/unit/bout_test_main.cxx @@ -10,6 +10,9 @@ GTEST_API_ int main(int argc, char** argv) { // setting fft_measure to false bout::fft::fft_init(false); + // MPI initialisationn + BoutComm::setArgs(argc, argv); + printf("Running main() from bout_test_main.cxx\n"); testing::InitGoogleTest(&argc, argv); int result = RUN_ALL_TESTS(); @@ -19,5 +22,9 @@ GTEST_API_ int main(int argc, char** argv) { Array::cleanup(); Array::cleanup(); Array::cleanup(); + + // MPI communicator, including MPI_Finalize() + BoutComm::cleanup(); + return result; } diff --git a/tests/unit/test_extras.hxx b/tests/unit/test_extras.hxx index a3893dc2e3..d1a893c1f9 100644 --- a/tests/unit/test_extras.hxx +++ b/tests/unit/test_extras.hxx @@ -222,8 +222,8 @@ public: int UNUSED(tag)) override { return nullptr; } - MPI_Comm getXcomm(int UNUSED(jy)) const override { return MPI_COMM_NULL; } - MPI_Comm getYcomm(int UNUSED(jx)) const override { return MPI_COMM_NULL; } + MPI_Comm getXcomm(int UNUSED(jy)) const override { return BoutComm::get(); } + MPI_Comm getYcomm(int UNUSED(jx)) const override { return BoutComm::get(); } bool periodicY(int UNUSED(jx)) const override { return true; } bool periodicY(int UNUSED(jx), BoutReal& UNUSED(ts)) const override { return true; } std::pair hasBranchCutLower(int UNUSED(jx)) const override { From 2ee02ffbc19d327648935b11b8d0364f06687da0 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 6 Aug 2021 15:29:30 +0100 Subject: [PATCH 425/428] CMake: Bump version --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 29e8deee56..7d841597c8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,7 +8,7 @@ endif() # CMake currently doesn't support proper semver # Set the version here, strip the prerelease identifier to use in `project` -set(BOUT_FULL_VERSION 4.4.0-alpha) +set(BOUT_FULL_VERSION 4.4.0) string(REGEX REPLACE "^([0-9]+\.[0-9]+\.[0-9]+)-.*" "\\1" BOUT_CMAKE_ACCEPTABLE_VERSION ${BOUT_FULL_VERSION}) project(BOUT++ From 6b17600be856259d220f9a2b504b2f645e1c3e1c Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 6 Aug 2021 15:31:07 +0100 Subject: [PATCH 426/428] Bump DOI and release date for v4.4.0 --- CITATION.cff | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index b6ba415c61..46ec1ad324 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -165,10 +165,10 @@ authors: given-names: Toby version: 4.4.0 -date-released: 2020-10-19 +date-released: 2020-08-06 repository-code: https://github.com/boutproject/BOUT-dev url: http://boutproject.github.io/ -doi: 10.5281/zenodo.5142504 +doi: 10.5281/zenodo.5167527 license: 'LGPL-3.0-or-later' references: - type: article From 5c5a8d3ff5f172e0f06e0036df5599eef06361f8 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 6 Aug 2021 15:33:07 +0100 Subject: [PATCH 427/428] Update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99ec0f0997..21c912df52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,11 @@ **Merged pull requests:** +- Add ability to use FieldPerp in Options (v.4.4) [\#2395](https://github.com/boutproject/BOUT-dev/pull/2395) ([ZedThree](https://github.com/users/ZedThree)) +- Fix some HDF5 related issues and add Mesh::getLocal{X,Y,Z}Index (v4.4) [\#2394](https://github.com/boutproject/BOUT-dev/pull/2394) ([ZedThree](https://github.com/users/ZedThree)) +- Add Mesh::getRegion for use in generic code (v4.4) [\#2393](https://github.com/boutproject/BOUT-dev/pull/2393) ([ZedThree](https://github.com/users/ZedThree)) +- Adding an adaptive, arbitrary order, Adams-Bashforth solver (v4.4) [\#2392](https://github.com/boutproject/BOUT-dev/pull/2392) ([ZedThree](https://github.com/users/ZedThree)) +- Allow PETSc options to be passed from BOUT.inp (v4.4) [\#2391](https://github.com/boutproject/BOUT-dev/pull/2391) ([ZedThree](https://github.com/users/ZedThree)) - Add new contributors [\#2386](https://github.com/boutproject/BOUT-dev/pull/2386) ([dschwoerer](https://github.com/users/dschwoerer)) - Update locale [\#2385](https://github.com/boutproject/BOUT-dev/pull/2385) ([dschwoerer](https://github.com/users/dschwoerer)) - Fix RTD [\#2384](https://github.com/boutproject/BOUT-dev/pull/2384) ([dschwoerer](https://github.com/users/dschwoerer)) From 45d3877b3a12e5c4f5fab39798c7f7533c7668c2 Mon Sep 17 00:00:00 2001 From: Peter Hill Date: Fri, 6 Aug 2021 15:48:34 +0100 Subject: [PATCH 428/428] Update change summary --- change_summary.md | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/change_summary.md b/change_summary.md index 7bdf9e3007..49c439761b 100644 --- a/change_summary.md +++ b/change_summary.md @@ -3,6 +3,28 @@ This is a slightly more readable, and therefore incomplete, summary of the changes from the full [changelog](CHANGELOG.md) +4.4.0 is a feature release. The main new features are: +- The CMake support has been expanded, and will replace the autotools build + system in the next major release. See [the CMake installation + instructions](https://bout-dev.readthedocs.io/en/latest/user_docs/installing.html#cmake) + for details on building with CMake. +- Output files can now handle `FieldPerp`, `std::vector`, and + `std::string`, while input options can now handle `FieldPerp`. +- Output and evolving variables can now have an optional description +- The `boutdata` and `boututils` python libraries have been moved to standalone + packages. BOUT++ still comes bundled with them as submodules, but you can now + install them separately. +- Staggered grids now work with `InvertPar` and split flux derivatives. +- User code can set default values that override the library's default values. +- Timing information can be output in a table at the end of the run with + `time_report:show` +- Some basic provenance information tracking has been enabled: each run now + generates a random unique ID. Hypnotoad grid files that contain a UUID will + also be tracked. +- A new time solver has been added, an adaptive, arbitrary order Adams-Bashforth + solver. +- FFTs can use the `FFTW_EXHAUSTIVE` input option. + 4.3.3 is a bugfix release: - Better documentation and tests - Fix `shiftOutput` for aligned fields @@ -153,7 +175,7 @@ Other changes are mostly housekeeping changes for the BOUT++ project. namespace. This should help ensure we play nice with other libraries, as well as logically group related things across parts of the codebase - + [mpark]: https://github.com/mpark/variant [xlc]: https://bout-dev.readthedocs.io/en/latest/user_docs/advanced_install.html#issues [scorep]: https://www.vi-hps.org/projects/score-p/