001: /*
002: * This file or a portion of this file is licensed under the terms of
003: * the Globus Toolkit Public License, found in file GTPL, or at
004: * http://www.globus.org/toolkit/download/license.html. This notice must
005: * appear in redistributions of this file, with or without modification.
006: *
007: * Redistributions of this Software, with or without modification, must
008: * reproduce the GTPL in: (1) the Software, or (2) the Documentation or
009: * some other similar material which is provided with the Software (if
010: * any).
011: *
012: * Copyright 1999-2004 University of Chicago and The University of
013: * Southern California. All rights reserved.
014: */
015:
016: package org.griphyn.cPlanner.engine;
017:
018: import org.griphyn.cPlanner.classes.ADag;
019: import org.griphyn.cPlanner.classes.SubInfo;
020: import org.griphyn.cPlanner.classes.TransferJob;
021:
022: import org.griphyn.cPlanner.common.LogManager;
023: import org.griphyn.cPlanner.common.PegasusProperties;
024:
025: import java.util.Iterator;
026: import java.util.Set;
027:
028: /**
029: * This ends up placing the create directory jobs at the top of the graph.
030: * However instead of constricting it to an hour glass shape, this class links
031: * it to all the relevant nodes for which the create dir job is necessary. It is
032: * like that it spreads its tentacles all around. This potentially ends up
033: * putting more load on the DagMan with all the dependencies but removes the
034: * restriction of the plan progressing only when all the create directory
035: * jobs have progressed on the remote pools, as in the HourGlass model.
036: *
037: * @author Karan Vahi
038: * @author Gaurang Mehta
039: *
040: * @version $Revision: 50 $
041: */
042: public class Tentacles extends CreateDirectory {
043:
044: /**
045: * Default constructor.
046: *
047: * @param concDag The concrete dag so far.
048: * @param properties the <code>PegasusProperties</code> to be used.
049: */
050: public Tentacles(ADag concDag, PegasusProperties properties) {
051: super (concDag, properties);
052: }
053:
054: /**
055: * Adds the create directory nodes into the workflow, with each create
056: * dir job as a parent to each job that has been scheduled on that
057: * particular pool.
058: */
059: public void addCreateDirectoryNodes() {
060: Set set = this .getCreateDirSites();
061: String pool = null;
062: String jobName = null;
063: String parent = null;
064:
065: //traverse through the jobs and
066: //looking at their execution pool
067: //and create a dependency to the
068: //the correct create node
069: //we add links first and jobs later
070:
071: //remove the entry for the local pool
072: //set.remove("local");
073: SubInfo job;
074: int type;
075: boolean local;
076: for (Iterator it = mCurrentDag.vJobSubInfos.iterator(); it
077: .hasNext();) {
078: job = (SubInfo) it.next();
079: jobName = job.getName();
080: pool = job.getSiteHandle();
081:
082: //the parent in case of a transfer job
083: //is the non third party site
084: parent = (job instanceof TransferJob) ? getCreateDirJobName(((TransferJob) job)
085: .getNonThirdPartySite())
086: : getCreateDirJobName(pool);
087:
088: //put in the dependency only for transfer jobs that stage in data
089: //or are jobs running on remote sites
090: //or are compute jobs running on local site
091: type = job.getJobType();
092: local = pool.equals("local");
093: if ((job instanceof TransferJob && type == SubInfo.STAGE_IN_JOB)
094: || (!local || (type == SubInfo.COMPUTE_JOB || type == SubInfo.STAGED_COMPUTE_JOB))) {
095: mLogger.log("Adding relation " + parent + " -> "
096: + jobName, LogManager.DEBUG_MESSAGE_LEVEL);
097: mCurrentDag.addNewRelation(parent, jobName);
098: }
099: }
100:
101: //for each execution pool add
102: //a create directory node.
103: SubInfo newJob = null;
104: for (Iterator it = set.iterator(); it.hasNext();) {
105: pool = (String) it.next();
106: jobName = getCreateDirJobName(pool);
107: newJob = makeCreateDirJob(pool, jobName);
108: mCurrentDag.add(newJob);
109:
110: }
111:
112: }
113:
114: }
|