- Timestamp:
- Jun 17, 2010, 11:55:29 AM (15 years ago)
- Branches:
- Action_Thermostats, Add_AtomRandomPerturbation, Add_FitFragmentPartialChargesAction, Add_RotateAroundBondAction, Add_SelectAtomByNameAction, Added_ParseSaveFragmentResults, AddingActions_SaveParseParticleParameters, Adding_Graph_to_ChangeBondActions, Adding_MD_integration_tests, Adding_ParticleName_to_Atom, Adding_StructOpt_integration_tests, AtomFragments, Automaking_mpqc_open, AutomationFragmentation_failures, Candidate_v1.5.4, Candidate_v1.6.0, Candidate_v1.6.1, ChangeBugEmailaddress, ChangingTestPorts, ChemicalSpaceEvaluator, CombiningParticlePotentialParsing, Combining_Subpackages, Debian_Package_split, Debian_package_split_molecuildergui_only, Disabling_MemDebug, Docu_Python_wait, EmpiricalPotential_contain_HomologyGraph, EmpiricalPotential_contain_HomologyGraph_documentation, Enable_parallel_make_install, Enhance_userguide, Enhanced_StructuralOptimization, Enhanced_StructuralOptimization_continued, Example_ManyWaysToTranslateAtom, Exclude_Hydrogens_annealWithBondGraph, FitPartialCharges_GlobalError, Fix_BoundInBox_CenterInBox_MoleculeActions, Fix_ChargeSampling_PBC, Fix_ChronosMutex, Fix_FitPartialCharges, Fix_FitPotential_needs_atomicnumbers, Fix_ForceAnnealing, Fix_IndependentFragmentGrids, Fix_ParseParticles, Fix_ParseParticles_split_forward_backward_Actions, Fix_PopActions, Fix_QtFragmentList_sorted_selection, Fix_Restrictedkeyset_FragmentMolecule, Fix_StatusMsg, Fix_StepWorldTime_single_argument, Fix_Verbose_Codepatterns, Fix_fitting_potentials, Fixes, ForceAnnealing_goodresults, ForceAnnealing_oldresults, ForceAnnealing_tocheck, ForceAnnealing_with_BondGraph, ForceAnnealing_with_BondGraph_continued, ForceAnnealing_with_BondGraph_continued_betteresults, ForceAnnealing_with_BondGraph_contraction-expansion, FragmentAction_writes_AtomFragments, FragmentMolecule_checks_bonddegrees, GeometryObjects, Gui_Fixes, Gui_displays_atomic_force_velocity, ImplicitCharges, IndependentFragmentGrids, IndependentFragmentGrids_IndividualZeroInstances, IndependentFragmentGrids_IntegrationTest, IndependentFragmentGrids_Sole_NN_Calculation, JobMarket_RobustOnKillsSegFaults, JobMarket_StableWorkerPool, JobMarket_unresolvable_hostname_fix, MoreRobust_FragmentAutomation, ODR_violation_mpqc_open, PartialCharges_OrthogonalSummation, PdbParser_setsAtomName, PythonUI_with_named_parameters, QtGui_reactivate_TimeChanged_changes, Recreated_GuiChecks, Rewrite_FitPartialCharges, RotateToPrincipalAxisSystem_UndoRedo, SaturateAtoms_findBestMatching, SaturateAtoms_singleDegree, StoppableMakroAction, Subpackage_CodePatterns, Subpackage_JobMarket, Subpackage_LinearAlgebra, Subpackage_levmar, Subpackage_mpqc_open, Subpackage_vmg, Switchable_LogView, ThirdParty_MPQC_rebuilt_buildsystem, TrajectoryDependenant_MaxOrder, TremoloParser_IncreasedPrecision, TremoloParser_MultipleTimesteps, TremoloParser_setsAtomName, Ubuntu_1604_changes, stable
- Children:
- fc6053
- Parents:
- e6317b
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/Helpers/MemDebug.cpp
re6317b r492279 17 17 18 18 using namespace std; 19 20 // we need our own low level mutexex, since we cannot assure the time of construction and destruction 21 // otherwise 22 #if defined(unix) || defined(__unix) 23 24 #include <pthread.h> 25 #include <cassert> 26 #define mutex_t pthread_mutex_t 27 #define mutex_init PTHREAD_MUTEX_INITIALIZER 28 #define mutex_lock(mtx) \ 29 do{\ 30 int res = pthread_mutex_lock(&(mtx));\ 31 assert(!res && "Could not lock mutex!");\ 32 }while(0) 33 34 #define mutex_unlock(mtx) \ 35 do{\ 36 int res = pthread_mutex_unlock(&(mtx));\ 37 assert(!res && "Could not unlock mutex!");\ 38 }while(0) 39 40 #else 41 # error "No thread structure defined for this plattform..." 42 #endif 19 43 20 44 #ifndef NDBEGUG … … 54 78 }; 55 79 56 boost::mutex memorylock; 80 81 mutex_t memorylock = mutex_init; 57 82 58 83 // start and end of the doubly-linked list … … 115 140 } 116 141 142 // Adds an entry to the linked list 143 void addEntry(entry_t *entry){ 144 // check if the entry is already in the list 145 if(!entry->isIgnored) 146 return; 147 148 mutex_lock(Memory::memorylock); 149 150 entry->next=0; // the created block is last in the list 151 entry->prev=Memory::end; // the created block is last in the list 152 if(!Memory::begin){ 153 // the list was empty... start a new one 154 Memory::begin=entry; 155 } 156 else { 157 // other blocks present... we can add to the last one 158 Memory::end->next=entry; 159 } 160 Memory::end=entry; 161 162 // update some global info 163 Memory::state += entry->info.nbytes; 164 if(Memory::state>Memory::max){ 165 Memory::max = Memory::state; 166 } 167 ++Memory::allocs; 168 // done with the list... it is safe to unlock now 169 mutex_unlock(Memory::memorylock); 170 entry->isIgnored = false; 171 } 172 117 173 // Deletes an entry from the linked list 118 174 void deleteEntry(entry_t *entry){ … … 120 176 return; 121 177 178 mutex_lock(memorylock); 122 179 if(entry->prev){ 123 180 entry->prev->next = entry->next; … … 135 192 end = entry->prev; 136 193 } 194 Memory::state -= entry->info.nbytes; 195 mutex_unlock(memorylock); 137 196 entry->isIgnored = true; 138 Memory::state -= entry->info.nbytes; 197 139 198 } 140 199 … … 232 291 void *operator new(size_t nbytes,const char* file, int line, const char* func) throw(std::bad_alloc) { 233 292 234 // we need to lock, so that no one changes the linked list while we are here235 boost::mutex::scoped_lock guard(Memory::memorylock);236 237 293 // to avoid allocations of 0 bytes if someone screws up 238 294 // allocation with 0 byte size are undefined behavior, so we are … … 250 306 throw std::bad_alloc(); 251 307 } 252 253 // we got the space, so update the global info254 Memory::state += nbytes;255 if(Memory::state>Memory::max){256 Memory::max = Memory::state;257 }258 Memory::allocs++;259 308 260 309 // build the entry in front of the space … … 271 320 entry->info.location = (char*)res + entrySpace; 272 321 273 // add the entry at the end of the list 274 entry->next=0; // the created block is last in the list 275 entry->prev=Memory::end; // the created block is last in the list 276 if(!Memory::begin){ 277 // the list was empty... start a new one 278 Memory::begin=entry; 279 } 280 else { 281 // other blocks present... we can add to the last one 282 Memory::end->next=entry; 283 } 284 Memory::end=entry; 322 // mark the block as not in the list (will be changed by addEntry) 323 entry->isIgnored = true; 324 Memory::addEntry(entry); 285 325 286 326 // get the checksum... 287 327 entry->checksum = Memory::calcChecksum(&entry->info); 288 // this will be set to true, when the block is removed from289 // the list for any reason290 entry->isIgnored = false;291 328 292 329 // ok, space is prepared... the user can have it. … … 299 336 300 337 void *operator new(size_t nbytes,const char* file, int line) throw(std::bad_alloc) { 301 302 // we need to lock, so that no one changes the linked list while we are here303 boost::mutex::scoped_lock guard(Memory::memorylock);304 338 305 339 // to avoid allocations of 0 bytes if someone screws up … … 318 352 throw std::bad_alloc(); 319 353 } 320 321 // we got the space, so update the global info322 Memory::state += nbytes;323 if(Memory::state>Memory::max){324 Memory::max = Memory::state;325 }326 Memory::allocs++;327 354 328 355 // build the entry in front of the space … … 337 364 entry->info.location = (char*)res + entrySpace; 338 365 339 // add the entry at the end of the list 340 entry->next=0; // the created block is last in the list 341 entry->prev=Memory::end; // the created block is last in the list 342 if(!Memory::begin){ 343 // the list was empty... start a new one 344 Memory::begin=entry; 345 } 346 else { 347 // other blocks present... we can add to the last one 348 Memory::end->next=entry; 349 } 350 Memory::end=entry; 366 // mark the block as not in the list (will be changed by addEntry) 367 entry->isIgnored = true; 368 Memory::addEntry(entry); 351 369 352 370 // get the checksum... … … 432 450 return; 433 451 } 434 435 // we need to lock, so the linked list does not changed while we are in here436 boost::mutex::scoped_lock guard(Memory::memorylock);437 452 438 453 // get the size for the entry, including alignment
Note:
See TracChangeset
for help on using the changeset viewer.