001: /**
002: * This file or a portion of this file is licensed under the terms of
003: * the Globus Toolkit Public License, found at $PEGASUS_HOME/GTPL or
004: * http://www.globus.org/toolkit/download/license.html.
005: * This notice must appear in redistributions of this file
006: * with or without modification.
007: *
008: * Redistributions of this Software, with or without modification, must reproduce
009: * the GTPL in:
010: * (1) the Software, or
011: * (2) the Documentation or
012: * some other similar material which is provided with the Software (if any).
013: *
014: * Copyright 1999-2004
015: * University of Chicago and The University of Southern California.
016: * All rights reserved.
017: */package org.griphyn.cPlanner.engine;
018:
019: import org.griphyn.cPlanner.classes.ADag;
020: import org.griphyn.cPlanner.classes.Data;
021: import org.griphyn.cPlanner.classes.SubInfo;
022:
023: import org.griphyn.cPlanner.code.gridstart.GridStartFactory;
024:
025: import org.griphyn.cPlanner.common.LogManager;
026: import org.griphyn.cPlanner.common.PegasusProperties;
027:
028: import org.griphyn.common.catalog.TransformationCatalogEntry;
029:
030: import org.griphyn.cPlanner.namespace.VDS;
031:
032: import org.griphyn.common.classes.TCType;
033:
034: import java.io.File;
035:
036: import java.util.Iterator;
037: import java.util.List;
038: import java.util.Set;
039: import java.util.Vector;
040:
041: /**
042: * This class inserts the nodes for creating the random directories on the remote
043: * execution pools. This is done when the resources have already been selected
044: * to execute the jobs in the Dag. It adds a make directory node at the top level
045: * of the graph, and all these concat to a single dummy job before branching
046: * out to the root nodes of the original/ concrete dag so far. So we end up
047: * introducing a classic X shape at the top of the graph. Hence the name
048: * HourGlass.
049: *
050: * @author Karan Vahi
051: * @author Gaurang Mehta
052: *
053: * @version $Revision: 139 $
054: */
055:
056: public class HourGlass extends CreateDirectory {
057:
058: /**
059: * The name concatenating dummy job that ensures that Condor does not start
060: * staging in before the directories are created.
061: */
062: public static final String DUMMY_CONCAT_JOB = "pegasus_concat";
063:
064: /**
065: * Default constructor.
066: *
067: * @param concDag The concrete dag so far. *
068: * @param properties the <code>PegasusProperties</code> to be used.
069: */
070: public HourGlass(ADag concDag, PegasusProperties properties) {
071: super (concDag, properties);
072: }
073:
074: /**
075: * It modifies the concrete dag passed in the constructor and adds the create
076: * random directory nodes to it at the root level. These directory nodes have
077: * a common child that acts as a concatenating job and ensures that Condor
078: * does not start staging in the data before the directories have been added.
079: * The root nodes in the unmodified dag are now chidren of this concatenating
080: * dummy job.
081: */
082: public void addCreateDirectoryNodes() {
083: Set set = this .getCreateDirSites();
084:
085: //remove the entry for the local pool
086: //set.remove("local");
087:
088: String pool = null;
089: String jobName = null;
090: SubInfo newJob = null;
091: SubInfo concatJob = null;
092:
093: //add the concat job
094: if (!set.isEmpty()) {
095: concatJob = makeDummyConcatJob();
096: introduceRootDependencies(concatJob.jobName);
097: mCurrentDag.add(concatJob);
098: }
099:
100: //for each execution pool add
101: //a create directory node.
102: for (Iterator it = set.iterator(); it.hasNext();) {
103: pool = (String) it.next();
104: jobName = getCreateDirJobName(pool);
105: newJob = makeCreateDirJob(pool, jobName);
106: mCurrentDag.add(newJob);
107:
108: //add the relation to the concat job
109: mLogMsg = "Adding relation " + jobName + " -> "
110: + concatJob.jobName;
111: mLogger.log(mLogMsg, LogManager.DEBUG_MESSAGE_LEVEL);
112: mCurrentDag.addNewRelation(jobName, concatJob.jobName);
113: }
114:
115: }
116:
117: /**
118: * It traverses through the root jobs of the dag and introduces a new super
119: * root node to it.
120: *
121: * @param newRoot the name of the job that is the new root of the graph.
122: */
123: private void introduceRootDependencies(String newRoot) {
124: Vector vRootNodes = mCurrentDag.getRootNodes();
125: Iterator it = vRootNodes.iterator();
126: String job = null;
127:
128: while (it.hasNext()) {
129: job = (String) it.next();
130: mCurrentDag.addNewRelation(newRoot, job);
131: mLogMsg = "Adding relation " + newRoot + " -> " + job;
132: mLogger.log(mLogMsg, LogManager.DEBUG_MESSAGE_LEVEL);
133:
134: }
135: }
136:
137: /**
138: * It creates a dummy concat job that is run at the local submit host.
139: * This job should run always provided the directories were created
140: * successfully.
141: *
142: * @return the dummy concat job.
143: */
144: public SubInfo makeDummyConcatJob() {
145:
146: SubInfo newJob = new SubInfo();
147: List entries = null;
148: String execPath = null;
149:
150: //jobname has the dagname and index to indicate different
151: //jobs for deferred planning
152: newJob.jobName = mCurrentDag.dagInfo.nameOfADag + "_"
153: + mCurrentDag.dagInfo.index + "_"
154: + this .DUMMY_CONCAT_JOB;
155:
156: newJob.setTransformation(this .TRANSFORMATION_NAMESPACE,
157: this .TRANSFORMATION_NAME, this .TRANSFORMATION_VERSION);
158: newJob.setDerivation(this .DERIVATION_NAMESPACE,
159: this .DERIVATION_NAME, this .DERIVATION_VERSION);
160:
161: newJob.condorUniverse = Engine.REGISTRATION_UNIVERSE;
162: //the noop job does not get run by condor
163: //even if it does, giving it the maximum
164: //possible chance
165: newJob.executable = "/bin/true";
166:
167: //construct noop keys
168: newJob.executionPool = "local";
169: newJob.jobClass = SubInfo.CREATE_DIR_JOB;
170: construct(newJob, "noop_job", "true");
171: construct(newJob, "noop_job_exit_code", "0");
172:
173: //we do not want the job to be launched
174: //by kickstart, as the job is not run actually
175: newJob.vdsNS
176: .checkKeyInNS(
177: VDS.GRIDSTART_KEY,
178: GridStartFactory.GRIDSTART_SHORT_NAMES[GridStartFactory.NO_GRIDSTART_INDEX]);
179:
180: return newJob;
181:
182: }
183:
184: /**
185: * Constructs a condor variable in the condor profile namespace
186: * associated with the job. Overrides any preexisting key values.
187: *
188: * @param job contains the job description.
189: * @param key the key of the profile.
190: * @param value the associated value.
191: */
192: private void construct(SubInfo job, String key, String value) {
193: job.condorVariables.checkKeyInNS(key, value);
194: }
195:
196: }
|