Changeset 17fcbe7 for src


Ignore:
Timestamp:
Nov 16, 2012, 2:13:47 PM (12 years ago)
Author:
Frederik Heber <heber@…>
Branches:
Action_Thermostats, Add_AtomRandomPerturbation, Add_FitFragmentPartialChargesAction, Add_RotateAroundBondAction, Add_SelectAtomByNameAction, Added_ParseSaveFragmentResults, AddingActions_SaveParseParticleParameters, Adding_Graph_to_ChangeBondActions, Adding_MD_integration_tests, Adding_ParticleName_to_Atom, Adding_StructOpt_integration_tests, AtomFragments, Automaking_mpqc_open, AutomationFragmentation_failures, Candidate_v1.5.4, Candidate_v1.6.0, Candidate_v1.6.1, ChangeBugEmailaddress, ChangingTestPorts, ChemicalSpaceEvaluator, CombiningParticlePotentialParsing, Combining_Subpackages, Debian_Package_split, Debian_package_split_molecuildergui_only, Disabling_MemDebug, Docu_Python_wait, EmpiricalPotential_contain_HomologyGraph, EmpiricalPotential_contain_HomologyGraph_documentation, Enable_parallel_make_install, Enhance_userguide, Enhanced_StructuralOptimization, Enhanced_StructuralOptimization_continued, Example_ManyWaysToTranslateAtom, Exclude_Hydrogens_annealWithBondGraph, FitPartialCharges_GlobalError, Fix_BoundInBox_CenterInBox_MoleculeActions, Fix_ChargeSampling_PBC, Fix_ChronosMutex, Fix_FitPartialCharges, Fix_FitPotential_needs_atomicnumbers, Fix_ForceAnnealing, Fix_IndependentFragmentGrids, Fix_ParseParticles, Fix_ParseParticles_split_forward_backward_Actions, Fix_PopActions, Fix_QtFragmentList_sorted_selection, Fix_Restrictedkeyset_FragmentMolecule, Fix_StatusMsg, Fix_StepWorldTime_single_argument, Fix_Verbose_Codepatterns, Fix_fitting_potentials, Fixes, ForceAnnealing_goodresults, ForceAnnealing_oldresults, ForceAnnealing_tocheck, ForceAnnealing_with_BondGraph, ForceAnnealing_with_BondGraph_continued, ForceAnnealing_with_BondGraph_continued_betteresults, ForceAnnealing_with_BondGraph_contraction-expansion, FragmentAction_writes_AtomFragments, FragmentMolecule_checks_bonddegrees, GeometryObjects, Gui_Fixes, Gui_displays_atomic_force_velocity, ImplicitCharges, IndependentFragmentGrids, IndependentFragmentGrids_IndividualZeroInstances, IndependentFragmentGrids_IntegrationTest, IndependentFragmentGrids_Sole_NN_Calculation, JobMarket_RobustOnKillsSegFaults, JobMarket_StableWorkerPool, JobMarket_unresolvable_hostname_fix, MoreRobust_FragmentAutomation, ODR_violation_mpqc_open, PartialCharges_OrthogonalSummation, PdbParser_setsAtomName, PythonUI_with_named_parameters, QtGui_reactivate_TimeChanged_changes, Recreated_GuiChecks, Rewrite_FitPartialCharges, RotateToPrincipalAxisSystem_UndoRedo, SaturateAtoms_findBestMatching, SaturateAtoms_singleDegree, StoppableMakroAction, Subpackage_CodePatterns, Subpackage_JobMarket, Subpackage_LinearAlgebra, Subpackage_levmar, Subpackage_mpqc_open, Subpackage_vmg, Switchable_LogView, ThirdParty_MPQC_rebuilt_buildsystem, TrajectoryDependenant_MaxOrder, TremoloParser_IncreasedPrecision, TremoloParser_MultipleTimesteps, TremoloParser_setsAtomName, Ubuntu_1604_changes, stable
Children:
a1436b
Parents:
08cc62
git-author:
Frederik Heber <heber@…> (08/08/12 11:05:24)
git-committer:
Frederik Heber <heber@…> (11/16/12 14:13:47)
Message:

FIX: Correcting normalization in InterfaceVMGJob among others.

  • with lots of help from Julian Iseringhausen.
File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/Jobs/InterfaceVMGJob.cpp

    r08cc62 r17fcbe7  
    4545#include "base/vector.hpp"
    4646#include "base/math.hpp"
    47 #ifdef HAVE_MPI
    48 #include "comm/comm_mpi.hpp"
    49 #else
    50 #include "comm/comm_serial.hpp"
    51 #endif
     47#include "comm/comm.hpp"
    5248#include "grid/grid.hpp"
    5349#include "grid/multigrid.hpp"
     50#include "mg.hpp"
    5451
    5552#include "InterfaceVMGJob.hpp"
     
    10097
    10198  Grid& grid = multigrid(multigrid.MaxLevel());
    102   grid.ClearBoundary();
     99  //grid.ClearBoundary(); // we don't have a boundary under periodic boundary conditions
    103100
    104101  /// 1. assign nuclei as smeared-out charges to the grid
     
    107104   * Charge assignment on the grid
    108105   */
    109 #ifdef HAVE_MPI
    110   CommMPI& comm = *dynamic_cast<CommMPI*>(VMG::MG::GetComm());
    111 #else
    112   CommSerial& comm = *dynamic_cast<CommSerial*>(VMG::MG::GetComm());
    113 #endif
     106  Comm& comm = *MG::GetComm();
    114107  Grid& particle_grid = comm.GetParticleGrid();
    115108
     
    119112      VMG::MG::GetFactory().GetObjectStorageVal<int>("PARTICLE_NEAR_FIELD_CELLS")));
    120113
     114  // create smeared-out particle charges on particle_grid via splines
    121115  LOG(1, "INFO: Creating particle grid for " << particles.size() << " particles.");
    122116  for (std::list<Particle::Particle>::iterator iter = particles.begin();
     
    127121  }
    128122
    129   double particle_charges = 0.0;
    130   for (std::list<Particle::Particle>::iterator iter=particles.begin(); iter!=particles.end(); ++iter)
    131     particle_charges += iter->Charge();
    132   particle_charges = MG::GetComm()->GlobalSumRoot(particle_charges);
    133   LOG(1, "INFO: Total charge is " << particle_charges << ".");
    134 
    135123  // Communicate charges over halo
    136124  comm.CommFromGhosts(particle_grid);
    137125
    138   // Assign charge values to the right hand side
    139   double normalization = 1.;
    140   if (!particles.empty()) {
    141     normalization =
    142         particle_charges/(
    143             pow(VMG::MG::GetFactory().GetObjectStorageVal<int>("PARTICLE_NEAR_FIELD_CELLS"), 3) *
    144             (double)particles.size()
    145             );
    146   }
    147   LOG(1, "INFO: Normalization is " << normalization << ".");
     126//  // for normlization sum up charges
     127//  double particle_charges = 0.0;
     128//  for (std::list<Particle::Particle>::iterator iter=particles.begin(); iter!=particles.end(); ++iter)
     129//    particle_charges += iter->Charge();
     130//  particle_charges = MG::GetComm()->GlobalSumRoot(particle_charges);
     131//  LOG(1, "INFO: Total charge is " << particle_charges << ".");
     132
     133  // add sampled electron charge density onto grid
     134  std::vector<double>::const_iterator sample_iter = sampled_input.begin();
     135  for (Grid::iterator iter = particle_grid.Iterators().Local().Begin();
     136      iter != particle_grid.Iterators().Local().End();
     137      ++iter)
     138    particle_grid(*iter) -= (*sample_iter++);
     139/*
     140  for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
     141    for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
     142      for (i.Z()=grid.Local().Begin().Z(); i.Z()<grid.Local().End().Z(); ++i.Z())
     143        particle_grid(i) -= (*sample_iter++); //temp_grid(i);
     144*/
     145  assert( sample_iter == sampled_input.end() );
     146
     147  // add particle_grid onto grid
    148148  for (int i=0; i<grid.Local().Size().X(); ++i)
    149149    for (int j=0; j<grid.Local().Size().Y(); ++j)
     
    151151  grid(grid.Local().Begin().X() + i,
    152152       grid.Local().Begin().Y() + j,
    153        grid.Local().Begin().Z() + k) = normalization * 4.0 * VMG::Math::pi *
     153       grid.Local().Begin().Z() + k) = 4.0 * VMG::Math::pi *
    154154    particle_grid.GetVal(particle_grid.Local().Begin().X() + i,
    155155             particle_grid.Local().Begin().Y() + j,
     
    160160  const Index begin_local = grid.Global().LocalBegin() - grid.Local().HaloSize1();
    161161
     162  // print debugging info on grid size
    162163  LOG(1, "INFO: Mesh has extent " << grid.Extent().MeshWidth() << ".");
    163164  const int gridpoints = pow(2, level);
    164165  LOG(1, "INFO: gridpoints on finest level are " << gridpoints << ".");
    165   assert( (grid.Extent().MeshWidth().X() * gridpoints) == 1 );
    166   assert( (grid.Extent().MeshWidth().Y() * gridpoints) == 1 );
    167   assert( (grid.Extent().MeshWidth().Z() * gridpoints) == 1 );
    168166  LOG(1, "INFO: "
    169167      << "X in [" << grid.Local().Begin().X() << "," << grid.Local().End().X() << "],"
     
    171169      << "Z in [" << grid.Local().Begin().Z() << "," << grid.Local().End().Z() << "].");
    172170
    173   const double element_volume =
    174       grid.Extent().MeshWidth().X() * grid.Extent().MeshWidth().Y() * grid.Extent().MeshWidth().Z();
    175   std::vector<double>::const_iterator sample_iter = sampled_input.begin();
    176   for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
    177     for (i.Y()=grid.Local().Begin().Y(); i.Y()<grid.Local().End().Y(); ++i.Y())
    178       for (i.Z()=grid.Local().Begin().Z(); i.Z()<grid.Local().End().Z(); ++i.Z())
    179         grid(i) -= (*sample_iter++) * element_volume; //temp_grid(i);
    180   assert( sample_iter == sampled_input.end() );
    181 
     171  // calculate sum over grid times h^3 as check, should be roughly zero
     172  const double element_volume = grid.Extent().MeshWidth().Product();
    182173  double charge_sum = 0.0;
    183174  for (Grid::iterator grid_iter = grid.Iterators().Local().Begin();
     
    185176      ++grid_iter)
    186177    charge_sum += grid.GetVal(*grid_iter);
    187   charge_sum = MG::GetComm()->GlobalSum(charge_sum);
    188   MG::GetComm()->PrintStringOnce("Grid charge sum: %e", charge_sum);
     178  charge_sum = element_volume * comm.GlobalSum(charge_sum);
     179  comm.PrintStringOnce("Grid charge integral: %e", charge_sum/(4.0 * VMG::Math::pi));
    189180
    190181  // print input grid to vtk
    191   VMG::MG::GetComm()->PrintGrid(grid, "grid after ImportRightHandSide");
     182  comm.PrintGrid(grid, "grid after ImportRightHandSide");
    192183
    193184//  delete temp_grid;
     
    197188{
    198189  // grid now contains the sough-for potential
    199   VMG::Comm* comm = VMG::MG::GetComm();
     190  VMG::Comm& comm = *VMG::MG::GetComm();
    200191
    201192  const Index begin_local = grid.Global().LocalBegin() - grid.Local().HaloSize1();
     
    203194
    204195  // print output grid to vtk
    205   comm->PrintGrid(grid, "grid after ExportSolution");
    206 
     196  comm.PrintGrid(grid, "grid after ExportSolution");
     197
     198  // obtain sampled potential from grid
    207199  returndata.sampled_potential.sampled_grid.clear();
    208200  for (i.X()=grid.Local().Begin().X(); i.X()<grid.Local().End().X(); ++i.X())
     
    212204      }
    213205
     206  // calculate integral over potential as long-range energy contribution
     207  const double element_volume =
     208      grid.Extent().MeshWidth().X() * grid.Extent().MeshWidth().Y() * grid.Extent().MeshWidth().Z();
    214209  Grid::iterator grid_iter;
    215210  double potential_sum = 0.0;
    216211  for (grid_iter=grid.Iterators().Local().Begin(); grid_iter!=grid.Iterators().Local().End(); ++grid_iter)
    217212    potential_sum += grid.GetVal(*grid_iter);
    218   potential_sum = comm->GlobalSum(potential_sum);
    219   comm->PrintStringOnce("Grid potential sum: %e", potential_sum);
     213  potential_sum = element_volume * comm.GlobalSum(potential_sum);
     214  comm.PrintStringOnce("Grid potential sum: %e", potential_sum);
    220215
    221216  //Particle::CommMPI& comm = *dynamic_cast<Particle::CommMPI*>(MG::GetComm());
    222   returndata.e_long = VMG::MG::GetComm()->GlobalSumRoot(returndata.e_long);
    223 
     217  returndata.e_long = potential_sum;
    224218}
Note: See TracChangeset for help on using the changeset viewer.