source: src/Fragmentation/Automation/FragmentScheduler.cpp@ 668b55

Action_Thermostats Add_AtomRandomPerturbation Add_FitFragmentPartialChargesAction Add_RotateAroundBondAction Add_SelectAtomByNameAction Added_ParseSaveFragmentResults AddingActions_SaveParseParticleParameters Adding_Graph_to_ChangeBondActions Adding_MD_integration_tests Adding_ParticleName_to_Atom Adding_StructOpt_integration_tests AtomFragments Automaking_mpqc_open AutomationFragmentation_failures Candidate_v1.5.4 Candidate_v1.6.0 Candidate_v1.6.1 ChangeBugEmailaddress ChangingTestPorts ChemicalSpaceEvaluator CombiningParticlePotentialParsing Combining_Subpackages Debian_Package_split Debian_package_split_molecuildergui_only Disabling_MemDebug Docu_Python_wait EmpiricalPotential_contain_HomologyGraph EmpiricalPotential_contain_HomologyGraph_documentation Enable_parallel_make_install Enhance_userguide Enhanced_StructuralOptimization Enhanced_StructuralOptimization_continued Example_ManyWaysToTranslateAtom Exclude_Hydrogens_annealWithBondGraph FitPartialCharges_GlobalError Fix_BoundInBox_CenterInBox_MoleculeActions Fix_ChargeSampling_PBC Fix_ChronosMutex Fix_FitPartialCharges Fix_FitPotential_needs_atomicnumbers Fix_ForceAnnealing Fix_IndependentFragmentGrids Fix_ParseParticles Fix_ParseParticles_split_forward_backward_Actions Fix_PopActions Fix_QtFragmentList_sorted_selection Fix_Restrictedkeyset_FragmentMolecule Fix_StatusMsg Fix_StepWorldTime_single_argument Fix_Verbose_Codepatterns Fix_fitting_potentials Fixes ForceAnnealing_goodresults ForceAnnealing_oldresults ForceAnnealing_tocheck ForceAnnealing_with_BondGraph ForceAnnealing_with_BondGraph_continued ForceAnnealing_with_BondGraph_continued_betteresults ForceAnnealing_with_BondGraph_contraction-expansion FragmentAction_writes_AtomFragments FragmentMolecule_checks_bonddegrees GeometryObjects Gui_Fixes Gui_displays_atomic_force_velocity ImplicitCharges IndependentFragmentGrids IndependentFragmentGrids_IndividualZeroInstances IndependentFragmentGrids_IntegrationTest IndependentFragmentGrids_Sole_NN_Calculation JobMarket_RobustOnKillsSegFaults JobMarket_StableWorkerPool JobMarket_unresolvable_hostname_fix MoreRobust_FragmentAutomation ODR_violation_mpqc_open PartialCharges_OrthogonalSummation PdbParser_setsAtomName PythonUI_with_named_parameters QtGui_reactivate_TimeChanged_changes Recreated_GuiChecks Rewrite_FitPartialCharges RotateToPrincipalAxisSystem_UndoRedo SaturateAtoms_findBestMatching SaturateAtoms_singleDegree StoppableMakroAction Subpackage_CodePatterns Subpackage_JobMarket Subpackage_LinearAlgebra Subpackage_levmar Subpackage_mpqc_open Subpackage_vmg Switchable_LogView ThirdParty_MPQC_rebuilt_buildsystem TrajectoryDependenant_MaxOrder TremoloParser_IncreasedPrecision TremoloParser_MultipleTimesteps TremoloParser_setsAtomName Ubuntu_1604_changes stable
Last change on this file since 668b55 was 668b55, checked in by Frederik Heber <heber@…>, 13 years ago

FragmentScheduler::shutdown() now returns bool and does not remove workers anymore.

  • Property mode set to 100644
File size: 20.7 KB
RevLine 
[72eaf7f]1/*
[cd4a6e]2 * Project: MoleCuilder
3 * Description: creates and alters molecular systems
4 * Copyright (C) 2011 University of Bonn. All rights reserved.
5 * Please see the LICENSE file or "Copyright notice" in builder.cpp for details.
6 */
7
8/*
9 * \file FragmentScheduler.cpp
10 *
11 * This file strongly follows the Serialization example from the boost::asio
12 * library (see server.cpp)
[72eaf7f]13 *
[cd4a6e]14 * Created on: Oct 19, 2011
[72eaf7f]15 * Author: heber
16 */
17
[f93842]18// include config.h
19#ifdef HAVE_CONFIG_H
20#include <config.h>
21#endif
22
[c6bcd0]23// boost asio needs specific operator new
[72eaf7f]24#include <boost/asio.hpp>
[c6bcd0]25
26#include "CodePatterns/MemDebug.hpp"
27
[c4f43e]28#include <algorithm>
[72eaf7f]29#include <boost/bind.hpp>
[9a6b895]30#include <boost/lambda/lambda.hpp>
[72eaf7f]31#include <boost/lexical_cast.hpp>
32#include <iostream>
33#include <vector>
[af3aed]34#include "Connection.hpp" // Must come before boost/serialization headers.
[72eaf7f]35#include <boost/serialization/vector.hpp>
[af3aed]36#include "CodePatterns/Info.hpp"
[b0b64c]37#include "CodePatterns/Log.hpp"
[2344a3]38#include "CodePatterns/Observer/Notification.hpp"
39#include "ControllerChoices.hpp"
[9a6b895]40#include "Operations/Servers/SendJobToWorkerOperation.hpp"
[50d095]41#include "Operations/Workers/EnrollInPoolOperation.hpp"
[ff60cfa]42#include "Jobs/MPQCCommandJob.hpp"
[d920b9]43#include "Jobs/SystemCommandJob.hpp"
[ef2767]44#include "JobId.hpp"
[72eaf7f]45
[cd4a6e]46#include "FragmentScheduler.hpp"
[72eaf7f]47
[ff60cfa]48/** Helper function to enforce binding of FragmentWorker to possible derived
49 * FragmentJob classes.
50 */
51void dummyInit() {
52 SystemCommandJob("/bin/false", "something", JobId::IllegalJob);
53 MPQCCommandJob("nofile", JobId::IllegalJob);
54}
[c7deca]55
[db03d9]56/** Constructor of class FragmentScheduler.
57 *
58 * We setup both acceptors to accept connections from workers and Controller.
59 *
60 * \param io_service io_service of the asynchronous communications
61 * \param workerport port to listen for worker connections
62 * \param controllerport port to listen for controller connections.
63 */
[2344a3]64FragmentScheduler::FragmentScheduler(boost::asio::io_service& _io_service, unsigned short workerport, unsigned short controllerport) :
65 Observer("FragmentScheduler"),
66 io_service(_io_service),
67 WorkerListener(_io_service, workerport, JobsQueue, pool,
[41c1b7]68 boost::bind(&FragmentScheduler::sendJobToWorker, boost::ref(*this), _1, _2)),
[2344a3]69 ControllerListener(_io_service, controllerport, JobsQueue,
[b15c4f]70 boost::bind(&FragmentScheduler::removeAllWorkers, boost::ref(*this)),
[2344a3]71 boost::bind(&FragmentScheduler::shutdown, boost::ref(*this))),
[ba995d]72 connection(_io_service)
[ed2c5b]73{
[b0b64c]74 Info info(__FUNCTION__);
[72eaf7f]75
[2344a3]76 // sign on to idle workers and present jobs
77 pool.signOn(this, WorkerPool::WorkerIdle);
78 JobsQueue.signOn(this, FragmentQueue::JobAdded);
79
[41c1b7]80 // listen for controller
81 ControllerListener.initiateSocket();
82
[2344a3]83 // listen for workers
84 WorkerListener.initiateSocket();
85}
86
87FragmentScheduler::~FragmentScheduler()
88{
89 // sign off
90 pool.signOff(this, WorkerPool::WorkerIdle);
91 JobsQueue.signOff(this, FragmentQueue::JobAdded);
[402bde]92}
93
[db03d9]94/** Handle a new worker connection.
95 *
[41c1b7]96 * We store the given address in the pool.
[db03d9]97 *
98 * \param e error code if something went wrong
99 * \param conn reference with the connection
100 */
[8036b7]101void FragmentScheduler::WorkerListener_t::handle_Accept(const boost::system::error_code& e, connection_ptr conn)
[ed2c5b]102{
[cd4a6e]103 Info info(__FUNCTION__);
[ed2c5b]104 if (!e)
[72eaf7f]105 {
[b0b64c]106 // Successfully accepted a new connection.
[41c1b7]107 // read address
108 conn->async_read(address,
[9a3f84]109 boost::bind(&FragmentScheduler::WorkerListener_t::handle_ReadAddress, this,
[41c1b7]110 boost::asio::placeholders::error, conn));
[9a3f84]111 }
112 else
113 {
[41c1b7]114 // An error occurred. Log it and return. Since we are not starting a new
115 // accept operation the io_service will run out of work to do and the
116 // server will exit.
117 Exitflag = ErrorFlag;
118 ELOG(0, e.message());
119 }
120}
[0bdd51b]121
[9a3f84]122/** Handle having received Worker's address
[41c1b7]123 *
124 * \param e error code if something went wrong
125 * \param conn reference with the connection
126 */
[9a3f84]127void FragmentScheduler::WorkerListener_t::handle_ReadAddress(const boost::system::error_code& e, connection_ptr conn)
[41c1b7]128{
129 Info info(__FUNCTION__);
130 if (!e)
131 {
[9a3f84]132 // Successfully accepted a new connection.
133 // read address
134 conn->async_read(choice,
135 boost::bind(&FragmentScheduler::WorkerListener_t::handle_ReadChoice, this,
136 boost::asio::placeholders::error, conn));
137 }
138 else
139 {
140 // An error occurred. Log it and return. Since we are not starting a new
141 // accept operation the io_service will run out of work to do and the
142 // server will exit.
143 Exitflag = ErrorFlag;
144 ELOG(0, e.message());
145 }
146}
147
148/** Controller callback function to read the choice for next operation.
149 *
150 * \param e error code if something went wrong
151 * \param conn reference with the connection
152 */
153void FragmentScheduler::WorkerListener_t::handle_ReadChoice(const boost::system::error_code& e, connection_ptr conn)
154{
155 Info info(__FUNCTION__);
156 if (!e)
157 {
158 LOG(1, "INFO: Received request for operation " << choice << ".");
159 // switch over the desired choice read previously
160 switch(choice) {
161 case NoWorkerOperation:
162 {
163 ELOG(1, "WorkerListener_t::handle_ReadChoice() - called with NoOperation.");
164 break;
165 }
166 case EnrollInPool:
167 {
168 if (pool.presentInPool(address)) {
169 ELOG(1, "INFO: worker "+toString(address)+" is already contained in pool.");
170 enum EnrollInPoolOperation::EnrollFlag flag = EnrollInPoolOperation::Fail;
171 conn->async_write(flag,
172 boost::bind(&FragmentScheduler::WorkerListener_t::handle_enrolled, this,
173 boost::asio::placeholders::error, conn));
174 } else {
175 // insert as its new worker
176 LOG(1, "INFO: Adding " << address << " to pool ...");
177 pool.addWorker(address);
178 enum EnrollInPoolOperation::EnrollFlag flag = EnrollInPoolOperation::Success;
179 conn->async_write(flag,
180 boost::bind(&FragmentScheduler::WorkerListener_t::handle_enrolled, this,
181 boost::asio::placeholders::error, conn));
182 break;
183 }
184 case SendResult:
185 {
186 if (pool.presentInPool(address)) {
187 // check whether its priority is busy_priority
188 if (pool.isWorkerBusy(address)) {
189 conn->async_read(result,
190 boost::bind(&FragmentScheduler::WorkerListener_t::handle_ReceiveResultFromWorker, this,
191 boost::asio::placeholders::error, conn));
192 } else {
193 ELOG(1, "Worker " << address << " trying to send result who is not marked as busy.");
194 conn->async_read(result,
195 boost::bind(&FragmentScheduler::WorkerListener_t::handle_RejectResultFromWorker, this,
196 boost::asio::placeholders::error, conn));
197 }
198 } else {
199 ELOG(1, "Worker " << address << " trying to send result who is not in pool.");
200 conn->async_read(result,
201 boost::bind(&FragmentScheduler::WorkerListener_t::handle_RejectResultFromWorker, this,
202 boost::asio::placeholders::error, conn));
203 }
204 break;
205 }
206 case RemoveFromPool:
207 {
208 if (pool.presentInPool(address)) {
209 // removing present worker
210 pool.removeWorker(address);
211 } else {
212 ELOG(1, "Shutting down Worker " << address << " not contained in pool.");
213 }
214 break;
215 }
216 default:
217 Exitflag = ErrorFlag;
218 ELOG(1, "WorkerListener_t::handle_ReadChoice() - called with no valid choice.");
219 break;
[41c1b7]220 }
[b0b64c]221 }
[9a3f84]222 // restore NoOperation choice such that choice is not read twice
223 choice = NoWorkerOperation;
[2344a3]224
225 initiateSocket();
[cd4a6e]226 }
227 else
228 {
229 // An error occurred. Log it and return. Since we are not starting a new
230 // accept operation the io_service will run out of work to do and the
231 // server will exit.
[8036b7]232 Exitflag = ErrorFlag;
[b0b64c]233 ELOG(0, e.message());
[cd4a6e]234 }
[ed2c5b]235}
[72eaf7f]236
[9a3f84]237
[41c1b7]238/** Callback function when new worker has enrolled.
[db03d9]239 *
240 * \param e error code if something went wrong
241 * \param conn reference with the connection
242 */
[41c1b7]243void FragmentScheduler::WorkerListener_t::handle_enrolled(const boost::system::error_code& e, connection_ptr conn)
[ed2c5b]244{
[41c1b7]245 Info info(__FUNCTION__);
[2344a3]246 if (e)
[41c1b7]247 {
248 // An error occurred. Log it and return. Since we are not starting a new
249 // accept operation the io_service will run out of work to do and the
250 // server will exit.
251 Exitflag = ErrorFlag;
252 ELOG(0, e.message());
253 }
[ef2767]254}
255
[db03d9]256/** Callback function when result has been received.
257 *
258 * \param e error code if something went wrong
259 * \param conn reference with the connection
260 */
[8036b7]261void FragmentScheduler::WorkerListener_t::handle_ReceiveResultFromWorker(const boost::system::error_code& e, connection_ptr conn)
[ef2767]262{
[db03d9]263 Info info(__FUNCTION__);
[35f587]264 LOG(1, "INFO: Received result for job #" << result->getId() << " ...");
[41c1b7]265
[35f587]266 // and push into queue
267 ASSERT(result->getId() != (JobId_t)JobId::NoJob,
[41c1b7]268 "WorkerListener_t::handle_ReceiveResultFromWorker() - result received has NoJob id.");
[35f587]269 ASSERT(result->getId() != (JobId_t)JobId::IllegalJob,
[41c1b7]270 "WorkerListener_t::handle_ReceiveResultFromWorker() - result received has IllegalJob id.");
[778abb]271 // place id into expected
[35f587]272 if ((result->getId() != (JobId_t)JobId::NoJob) && (result->getId() != (JobId_t)JobId::IllegalJob))
[db03d9]273 JobsQueue.pushResult(result);
[41c1b7]274
275 // mark as idle
276 pool.unmarkWorkerBusy(address);
277
[db03d9]278 // erase result
[35f587]279 result.reset();
[778abb]280 LOG(1, "INFO: JobsQueue has " << JobsQueue.getDoneJobs() << " results.");
[db03d9]281}
282
[9a3f84]283/** Callback function when result has been received.
284 *
285 * \param e error code if something went wrong
286 * \param conn reference with the connection
287 */
288void FragmentScheduler::WorkerListener_t::handle_RejectResultFromWorker(const boost::system::error_code& e, connection_ptr conn)
289{
290 Info info(__FUNCTION__);
291 // nothing to do
292 LOG(1, "INFO: Rejecting result for job #" << result->getId() << ", placing back into queue.");
293
294 JobsQueue.resubmitJob(result->getId());
295
296 LOG(1, "INFO: JobsQueue has " << JobsQueue.getDoneJobs() << " results.");
297}
298
[41c1b7]299
[db03d9]300/** Handle a new controller connection.
301 *
302 * \sa handle_ReceiveJobs()
303 * \sa handle_CheckResultState()
304 * \sa handle_SendResults()
305 *
306 * \param e error code if something went wrong
307 * \param conn reference with the connection
308 */
[8036b7]309void FragmentScheduler::ControllerListener_t::handle_Accept(const boost::system::error_code& e, connection_ptr conn)
[db03d9]310{
311 Info info(__FUNCTION__);
312 if (!e)
313 {
[778abb]314 conn->async_read(choice,
[8036b7]315 boost::bind(&FragmentScheduler::ControllerListener_t::handle_ReadChoice, this,
[778abb]316 boost::asio::placeholders::error, conn));
317 }
318 else
319 {
320 // An error occurred. Log it and return. Since we are not starting a new
321 // accept operation the io_service will run out of work to do and the
322 // server will exit.
[8036b7]323 Exitflag = ErrorFlag;
[778abb]324 ELOG(0, e.message());
325 }
326}
327
328/** Controller callback function to read the choice for next operation.
329 *
330 * \param e error code if something went wrong
331 * \param conn reference with the connection
332 */
[8036b7]333void FragmentScheduler::ControllerListener_t::handle_ReadChoice(const boost::system::error_code& e, connection_ptr conn)
[778abb]334{
335 Info info(__FUNCTION__);
336 if (!e)
337 {
[0196c6]338 bool LaunchNewAcceptor = true;
[d1dbfc]339 LOG(1, "INFO: Received request for operation " << choice << ".");
[778abb]340 // switch over the desired choice read previously
341 switch(choice) {
[38032a]342 case NoControllerOperation:
[778abb]343 {
[9a3f84]344 ELOG(1, "ControllerListener_t::handle_ReadChoice() - called with NoOperation.");
[778abb]345 break;
346 }
[d1dbfc]347 case GetNextJobId:
348 {
[c4f43e]349 LOG(1, "INFO: Receiving number of desired job ids from controller ...");
350 conn->async_read(NumberIds,
[8036b7]351 boost::bind(&FragmentScheduler::ControllerListener_t::handle_GetNextJobIdState, this,
[d1dbfc]352 boost::asio::placeholders::error, conn));
353 break;
354 }
[425fc6]355 case SendJobs:
[d1dbfc]356 {
357 // The connection::async_write() function will automatically
358 // serialize the data structure for us.
359 LOG(1, "INFO: Receiving bunch of jobs from a controller ...");
360 conn->async_read(jobs,
[8036b7]361 boost::bind(&FragmentScheduler::ControllerListener_t::handle_ReceiveJobs, this,
[d1dbfc]362 boost::asio::placeholders::error, conn));
363 break;
364 }
[778abb]365 case CheckState:
366 {
[3c4a5e]367 // first update number
[6f2bc7]368 jobInfo[0] = JobsQueue.getPresentJobs();
369 jobInfo[1] = JobsQueue.getDoneJobs();
[3c4a5e]370 // now we accept connections to check for state of calculations
[6f2bc7]371 LOG(1, "INFO: Sending state that "+toString(jobInfo[0])+" jobs are present and "+toString(jobInfo[1])+" jobs are done to controller ...");
372 conn->async_write(jobInfo,
[8036b7]373 boost::bind(&FragmentScheduler::ControllerListener_t::handle_CheckResultState, this,
[3c4a5e]374 boost::asio::placeholders::error, conn));
[778abb]375 break;
376 }
[b15c4f]377 case RemoveAll:
378 {
379 removeallWorkers();
380 break;
381 }
[9d14c3]382 case ReceiveResults:
[778abb]383 {
[35f587]384 const std::vector<FragmentResult::ptr> results = JobsQueue.getAllResults();
[778abb]385 // ... or we give the results
386 LOG(1, "INFO: Sending "+toString(results.size())+" results to controller ...");
387 conn->async_write(results,
[8036b7]388 boost::bind(&FragmentScheduler::ControllerListener_t::handle_SendResults, this,
[778abb]389 boost::asio::placeholders::error, conn));
[0196c6]390 break;
391 }
[38032a]392 case ShutdownControllerSocket:
[0196c6]393 {
[9a3f84]394 LOG(1, "INFO: Received shutdown from controller ...");
395 // only allow for shutdown when there are no more jobs in the queue
396 if (!JobsQueue.isJobPresent()) {
[668b55]397 // we shutdown? Hence, also shutdown controller
398 LaunchNewAcceptor = !shutdownAllSockets();
[9a3f84]399 } else {
400 ELOG(2, "There are still jobs waiting in the queue.");
401 }
[778abb]402 break;
[db03d9]403 }
[778abb]404 default:
[8036b7]405 Exitflag = ErrorFlag;
[9a3f84]406 ELOG(1, "ControllerListener_t::handle_ReadChoice() - called with no valid choice.");
[778abb]407 break;
408 }
[38032a]409 // restore NoControllerOperation choice such that choice is not read twice
410 choice = NoControllerOperation;
[778abb]411
[0196c6]412 if (LaunchNewAcceptor) {
413 LOG(1, "Launching new acceptor on socket.");
414 // Start an accept operation for a new Connection.
[8036b7]415 initiateSocket();
[0196c6]416 }
[db03d9]417 }
418 else
419 {
420 // An error occurred. Log it and return. Since we are not starting a new
421 // accept operation the io_service will run out of work to do and the
422 // server will exit.
[8036b7]423 Exitflag = ErrorFlag;
[db03d9]424 ELOG(0, e.message());
425 }
426}
427
428/** Controller callback function when job has been sent.
[778abb]429 *
430 * We check here whether the worker socket is accepting, if there
431 * have been no jobs we re-activate it, as it is shut down after
432 * last job.
[db03d9]433 *
434 * \param e error code if something went wrong
435 * \param conn reference with the connection
436 */
[8036b7]437void FragmentScheduler::ControllerListener_t::handle_ReceiveJobs(const boost::system::error_code& e, connection_ptr conn)
[db03d9]438{
439 Info info(__FUNCTION__);
440 // jobs are received, hence place in JobsQueue
441 if (!jobs.empty()) {
442 LOG(1, "INFO: Pushing " << jobs.size() << " jobs into queue.");
443 JobsQueue.pushJobs(jobs);
444 }
445 jobs.clear();
[ed2c5b]446}
[cd4a6e]447
[3c4a5e]448/** Controller callback function when checking on state of results.
449 *
450 * \param e error code if something went wrong
451 * \param conn reference with the connection
452 */
[8036b7]453void FragmentScheduler::ControllerListener_t::handle_CheckResultState(const boost::system::error_code& e, connection_ptr conn)
[3c4a5e]454{
455 Info info(__FUNCTION__);
456 // do nothing
[6f2bc7]457 LOG(1, "INFO: Sent that " << jobInfo << " jobs are (scheduled, done).");
[3c4a5e]458}
[778abb]459
[d1dbfc]460/** Controller callback function when checking on state of results.
461 *
462 * \param e error code if something went wrong
463 * \param conn reference with the connection
464 */
[8036b7]465void FragmentScheduler::ControllerListener_t::handle_GetNextJobIdState(const boost::system::error_code& e, connection_ptr conn)
[c4f43e]466{
467 Info info(__FUNCTION__);
468
469 std::vector<JobId_t> nextids( NumberIds, JobId::IllegalJob);
470 std::generate(nextids.begin(), nextids.end(),
471 boost::bind(&GlobalJobId::getNextId, boost::ref(globalId)));
472 LOG(1, "INFO: Sending next available job ids " << nextids << " to controller ...");
473 conn->async_write(nextids,
474 boost::bind(&FragmentScheduler::ControllerListener_t::handle_SendIds, this,
475 boost::asio::placeholders::error, conn));
476}
477
478/** Controller callback function when free job ids have been sent.
479 *
480 * \param e error code if something went wrong
481 * \param conn reference with the connection
482 */
483void FragmentScheduler::ControllerListener_t::handle_SendIds(const boost::system::error_code& e, connection_ptr conn)
[d1dbfc]484{
485 Info info(__FUNCTION__);
486 // do nothing
[c4f43e]487 LOG(1, "INFO: Ids have been sent.");
[d1dbfc]488}
489
[778abb]490/** Controller callback function when result has been received.
491 *
492 * \param e error code if something went wrong
493 * \param conn reference with the connection
494 */
[8036b7]495void FragmentScheduler::ControllerListener_t::handle_SendResults(const boost::system::error_code& e, connection_ptr conn)
[778abb]496{
497 Info info(__FUNCTION__);
498 // do nothing
499 LOG(1, "INFO: Results have been sent.");
500}
501
[41c1b7]502
503/** Helper function to send a job to worker.
[9a3f84]504 *
505 * Note that we do not set the worker as busy. We simply send it the job.
[41c1b7]506 *
507 * @param address address of worker
508 * @param job job to send
509 */
510void FragmentScheduler::sendJobToWorker(const WorkerAddress &address, FragmentJob::ptr &job)
511{
[9a3f84]512 ASSERT( pool.isWorkerBusy(address),
513 "FragmentScheduler::sendJobToWorker() - Worker "+toString(address)+" is not marked as busy.");
[41c1b7]514 LOG(1, "INFO: Sending job " << job->getId() << " to worker " << address << ".");
[9a6b895]515
516 // create op, sign on, and hand over to queue
517 AsyncOperation *sendJobOp = new SendJobToWorkerOperation(connection,job);
518 OpQueue.push_back(sendJobOp, address);
[41c1b7]519}
520
[2344a3]521/** Helper function to shutdown a single worker.
522 *
523 * We send NoJob to indicate shutdown
524 *
525 * @param address of worker to shutdown
526 */
527void FragmentScheduler::shutdownWorker(const WorkerAddress &address)
528{
[6ea7f4]529 ASSERT( !pool.isWorkerBusy(address),
530 "FragmentScheduler::sendJobToWorker() - Worker "+toString(address)+" is already busy.");
531 LOG(2, "INFO: Shutting down worker " << address << "...");
[ba995d]532 AsyncOperation *shutdownWorkerOp = new ShutdownWorkerOperation(connection);
533 OpQueue.push_back(shutdownWorkerOp, address);
[2344a3]534}
535
536/** Sends shutdown to all current workers in the pool.
537 *
538 */
539void FragmentScheduler::removeAllWorkers()
540{
[6b3a37]541 // first, sign off such that no new jobs are given to workers
542 pool.signOff(this, WorkerPool::WorkerIdle);
[befcf8]543
544 LOG(2, "DEBUG: Waiting for busy workers to finish ...");
[6b3a37]545 while (pool.hasBusyWorkers())
546 ;
547
[befcf8]548 LOG(2, "INFO: Shutting down workers ...");
549 // iterate until there are no more idle workers
550 do {
551 // get list of all idle workers
552 typedef std::vector<std::pair<std::string, std::string> > WorkerList_t;
553 WorkerList_t WorkerList = pool.getListOfIdleWorkers();
554
555 // give all workers shutdown signal
556 for (WorkerList_t::const_iterator iter = WorkerList.begin(); iter != WorkerList.end(); ++iter)
557 shutdownWorker(WorkerAddress(iter->first, iter->second));
558
559 // wait for pending shutdown operations
560 while (!OpQueue.empty())
561 ;
562 } while (pool.presentIdleWorkers());
[6b3a37]563 pool.removeAllWorkers();
[2344a3]564}
565
566/** Helper function to shutdown the server properly.
567 *
568 * \todo one should idle here until all workers have returned from
[b15c4f]569 * calculating stuff (or workers need to still listen while they are
[2344a3]570 * calculating which is probably better).
571 *
[668b55]572 * \note We only shutdown when there are no workers left
573 *
574 * @return true - doing shutdown, false - precondition not met, not shutting down
[2344a3]575 */
[668b55]576bool FragmentScheduler::shutdown()
[2344a3]577{
[668b55]578 if (!pool.presentIdleWorkers() && !pool.hasBusyWorkers()) {
579 LOG(1, "INFO: Shutting all down ...");
[2344a3]580
[668b55]581 /// close the worker listener's socket
582 WorkerListener.closeSocket();
[2344a3]583
[668b55]584 /// close the controller listener's socket
585 ControllerListener.closeSocket();
[2344a3]586
[668b55]587 /// finally, stop the io_service
588 io_service.stop();
589 return true;
590 } else {
591 ELOG(2, "There are still idle or busy workers present.");
592 return false;
593 }
[2344a3]594}
595
596/** Internal helper to send the next available job to the next idle worker.
597 *
598 */
599void FragmentScheduler::sendAvailableJobToNextIdleWorker()
600{
601 const WorkerAddress address = pool.getNextIdleWorker();
602 FragmentJob::ptr job = JobsQueue.popJob();
603 sendJobToWorker(address, job);
604}
605
606void FragmentScheduler::update(Observable *publisher)
607{
[a40c85]608 ASSERT(0, "FragmentScheduler::update() - we are not signed on for global updates.");
[2344a3]609}
610
611void FragmentScheduler::recieveNotification(Observable *publisher, Notification_ptr notification)
612{
613 if ((publisher == &pool) && (notification->getChannelNo() == WorkerPool::WorkerIdle)) {
[e032b4]614 // we have an idle worker
[2344a3]615 LOG(1, "INFO: We are notified of an idle worker.");
616 // are jobs available?
617 if (JobsQueue.isJobPresent()) {
618 sendAvailableJobToNextIdleWorker();
619 }
[e032b4]620 } else if ((publisher == &JobsQueue) && (notification->getChannelNo() == FragmentQueue::JobAdded)) {
621 // we have new jobs
[2344a3]622 LOG(1, "INFO: We are notified of a new job.");
623 // check for idle workers
624 if (pool.presentIdleWorkers()) {
625 sendAvailableJobToNextIdleWorker();
626 }
[e032b4]627 } else {
628 ASSERT(0, "FragmentScheduler::recieveNotification() - we are not signed on for updates in channel "
629 +toString(notification->getChannelNo())+".");
[2344a3]630 }
631}
632
633void FragmentScheduler::subjectKilled(Observable *publisher)
634{}
Note: See TracBrowser for help on using the repository browser.