001: /*
002: * This file or a portion of this file is licensed under the terms of
003: * the Globus Toolkit Public License, found in file GTPL, or at
004: * http://www.globus.org/toolkit/download/license.html. This notice must
005: * appear in redistributions of this file, with or without modification.
006: *
007: * Redistributions of this Software, with or without modification, must
008: * reproduce the GTPL in: (1) the Software, or (2) the Documentation or
009: * some other similar material which is provided with the Software (if
010: * any).
011: *
012: * Copyright 1999-2004 University of Chicago and The University of
013: * Southern California. All rights reserved.
014: */
015: package org.griphyn.cPlanner.partitioner;
016:
017: import org.griphyn.cPlanner.common.LogManager;
018:
019: import org.griphyn.vdl.classes.LFN;
020:
021: import org.griphyn.vdl.dax.ADAG;
022: import org.griphyn.vdl.dax.Filename;
023: import org.griphyn.vdl.dax.Job;
024:
025: import org.griphyn.vdl.euryale.Callback;
026:
027: import java.io.IOException;
028: import java.util.Iterator;
029: import java.util.List;
030: import java.util.Map;
031: import java.util.Set;
032:
033: /**
034: * This class ends up writing a partitioned dax, that corresponds to one
035: * partition as defined by the Partitioner. It looks up the dax once when
036: * it is initialized, stores it in memory and then refers the memory to look
037: * up the job details for the jobs making up a particular partition.
038: *
039: * @author Karan Vahi
040: * @version $Revision: 50 $
041: */
042: public class SingleLook extends DAXWriter {
043:
044: /**
045: * The set of job id's in the partition.
046: */
047: private Set mNodeSet;
048:
049: /**
050: * A map containing the relations between the jobs making up the partition.
051: */
052: private Map mRelationsMap;
053:
054: /**
055: * The ADAG object containing the partitioned dax.
056: */
057: private ADAG mPartADAG;
058:
059: /**
060: * The number of jobs that are in the partition.
061: */
062: private int mNumOfJobs;
063:
064: /**
065: * The number of jobs about which the callback interface has knowledge.
066: */
067: private int mCurrentNum;
068:
069: /**
070: * The flag to identify that dax is in memory.
071: */
072: private boolean mDAXInMemory;
073:
074: /**
075: * The map containing all the jobs in the dax indexed by the job id.
076: */
077: private Map mJobMap;
078:
079: /**
080: * The overloaded constructor.
081: *
082: * @param daxFile the path to the dax file that is being partitioned.
083: * @param directory the directory in which the partitioned daxes are to be
084: * generated.
085: */
086: public SingleLook(String daxFile, String directory) {
087: super (daxFile, directory);
088: mDAXInMemory = false;
089: mJobMap = null;
090: }
091:
092: /**
093: * It writes out a dax consisting of the jobs as specified in the partition.
094: *
095: * @param partition the partition object containing the relations and id's
096: * of the jobs making up the partition.
097: * @param index the index of the partition.
098: *
099: * @return boolean true if dax successfully generated and written.
100: * false in case of error.
101: */
102: public boolean writePartitionDax(Partition partition, int index) {
103: Iterator it;
104: List fileList = null;
105: List parentIDs = null;
106:
107: //do the cleanup from the previous partition write
108: mPartADAG = null;
109: mNodeSet = null;
110: mRelationsMap = null;
111:
112: //get from the partition object the set of jobs
113: //and relations between them
114: mNodeSet = partition.getNodeIDs();
115: mRelationsMap = partition.getRelations();
116: mNumOfJobs = mNodeSet.size();
117:
118: //set the current number of jobs whose information we have
119: mCurrentNum = 0;
120: if (!mDAXInMemory) {
121: mLogger.log("Parsing the original DAX file",
122: LogManager.DEBUG_MESSAGE_LEVEL);
123: //dax is not in memory.
124: mJobMap = new java.util.HashMap();
125: //Callback takes care of putting dax in memory
126: Callback callback = new MyCallBackHandler();
127: org.griphyn.vdl.euryale.DAXParser d = new org.griphyn.vdl.euryale.DAXParser(
128: null);
129: d.setCallback(callback);
130:
131: //start the parsing of the dax
132: d.parse(mDaxFile);
133: mDAXInMemory = true;
134: mLogger.logCompletion("Parsing the original DAX file",
135: LogManager.DEBUG_MESSAGE_LEVEL);
136: }
137:
138: mPartADAG = new ADAG(0, index, mPartitionName);
139:
140: //get the job information for the jobs in the partiton.
141: it = mNodeSet.iterator();
142: while (it.hasNext()) {
143: String id = (String) it.next();
144: Job job = (Job) mJobMap.get(id);
145: if (job == null) {
146: throw new RuntimeException(
147: "Unable to find information about job" + id
148: + "while constructing partition");
149: }
150:
151: //add the job to ADAG
152: mPartADAG.addJob(job);
153:
154: //build up the files used by the partition
155: fileList = job.getUsesList();
156: //iterate through the file list
157: //populate it in the ADAG object
158: Iterator fileIt = fileList.iterator();
159: while (fileIt.hasNext()) {
160: Filename file = (Filename) fileIt.next();
161: mPartADAG.addFilename(file.getFilename(), (file
162: .getLink() == LFN.INPUT) ? true : false, file
163: .getTemporary(), file.getDontRegister(), file
164: .getDontTransfer());
165: }
166: }
167:
168: //put in the relations amongst
169: //jobs in the partition
170: //add the relations between the jobs in the partition to the ADAG
171: it = mRelationsMap.keySet().iterator();
172: while (it.hasNext()) {
173: String childID = (String) it.next();
174: parentIDs = (List) mRelationsMap.get(childID);
175:
176: //get all the parents of the children and populate them in the
177: //ADAG object
178: Iterator it1 = parentIDs.iterator();
179: while (it1.hasNext()) {
180: mPartADAG.addChild(childID, (String) it1.next());
181: }
182:
183: }
184: mLogger.log("Writing out the DAX File for partition "
185: + partition.getID(), LogManager.DEBUG_MESSAGE_LEVEL);
186: //do the actual writing to the file
187: this .initializeWriteHandle(index);
188: try {
189: mPartADAG.toXML(mWriteHandle, new String());
190: } catch (IOException e) {
191: mLogger.log("Error while writing out a partition dax :"
192: + e.getMessage(), LogManager.ERROR_MESSAGE_LEVEL);
193: return false;
194: }
195: this .close();
196: mLogger.logCompletion("Writing out the DAX File for partition "
197: + partition.getID(), LogManager.DEBUG_MESSAGE_LEVEL);
198:
199: //generation was successful
200: return true;
201: }
202:
203: /**
204: * The internal callback handler for the DAXParser in Euryale. It stores
205: * all the jobs making up the dax in an internal map, which is then referred
206: * to get the job information for the jobs making up the partition.
207: */
208: private class MyCallBackHandler implements Callback {
209:
210: /**
211: * The empty constructor.
212: */
213: public MyCallBackHandler() {
214:
215: }
216:
217: /**
218: * Callback when the opening tag was parsed. The attribute maps each
219: * attribute to its raw value. The callback initializes the DAG
220: * writer.
221: *
222: * @param attributes is a map of attribute key to attribute value
223: */
224: public void cb_document(Map attributes) {
225: //do nothing at the moment
226: }
227:
228: /**
229: * Callback for the filename from section 1 filenames.
230: * Does nothing as the filenames for the partitioned dax are
231: * constructed from the jobs.
232: */
233: public void cb_filename(Filename filename) {
234: //an empty implementation
235: }
236:
237: /**
238: * Callback for the job from section 2 jobs. This ends up storing all
239: * the jobs in the memory to be used for writing out the partition dax.
240: *
241: * @param job the object containing the job information.
242: */
243: public void cb_job(Job job) {
244:
245: String id = job.getID();
246: //put it in hashmap and also check for duplicate
247: if (mJobMap.put(id, job) != null) {
248: //warn for the duplicate entry
249: mLogger.log("Entry for the job already in ",
250: LogManager.WARNING_MESSAGE_LEVEL);
251: }
252:
253: if (mCurrentNum == mNumOfJobs) {
254: //exit or stop the parser.
255: cb_done();
256: }
257: }
258:
259: /**
260: * Callback for child and parent relationships from section 3.
261: * This is an empty implementation, as the Partition object
262: * contains the relations amongst the jobs making up the partition.
263: *
264: * @param child is the IDREF of the child element.
265: * @param parents is a list of IDREFs of the included parents.
266: */
267: public void cb_parents(String child, List parents) {
268: //an empty implementation
269: }
270:
271: /**
272: * Callback when the parsing of the document is done. While this state
273: * could also be determined from the return of the invocation of the
274: * parser, that return may be hidden in another place of the code.
275: * This callback can be used to free callback-specific resources.
276: */
277: public void cb_done() {
278: //an empty implementation
279: }
280:
281: }
282: }
|