001: /*
002: * This file or a portion of this file is licensed under the terms of
003: * the Globus Toolkit Public License, found in file GTPL, or at
004: * http://www.globus.org/toolkit/download/license.html. This notice must
005: * appear in redistributions of this file, with or without modification.
006: *
007: * Redistributions of this Software, with or without modification, must
008: * reproduce the GTPL in: (1) the Software, or (2) the Documentation or
009: * some other similar material which is provided with the Software (if
010: * any).
011: *
012: * Copyright 1999-2004 University of Chicago and The University of
013: * Southern California. All rights reserved.
014: */
015:
016: package org.griphyn.cPlanner.transfer.implementation;
017:
018: import org.griphyn.cPlanner.classes.SubInfo;
019: import org.griphyn.cPlanner.classes.TransferJob;
020: import org.griphyn.cPlanner.classes.NameValue;
021: import org.griphyn.cPlanner.classes.PlannerOptions;
022: import org.griphyn.cPlanner.classes.Profile;
023: import org.griphyn.cPlanner.classes.FileTransfer;
024: import org.griphyn.cPlanner.classes.SiteInfo;
025: import org.griphyn.cPlanner.classes.JobManager;
026:
027: import org.griphyn.cPlanner.common.PegasusProperties;
028: import org.griphyn.cPlanner.common.Utility;
029: import org.griphyn.cPlanner.common.LogManager;
030:
031: import org.griphyn.cPlanner.code.gridstart.GridStartFactory;
032:
033: import org.griphyn.cPlanner.namespace.Condor;
034: import org.griphyn.cPlanner.namespace.VDS;
035: import org.griphyn.cPlanner.namespace.ENV;
036:
037: import org.griphyn.cPlanner.poolinfo.PoolInfoProvider;
038: import org.griphyn.cPlanner.poolinfo.PoolMode;
039:
040: import org.griphyn.cPlanner.transfer.Implementation;
041: import org.griphyn.cPlanner.transfer.Refiner;
042:
043: import org.griphyn.common.classes.TCType;
044:
045: import org.griphyn.common.catalog.TransformationCatalog;
046: import org.griphyn.common.catalog.TransformationCatalogEntry;
047:
048: import org.griphyn.common.catalog.transformation.TCMode;
049:
050: import java.io.File;
051:
052: import java.util.Collection;
053: import java.util.List;
054: import java.util.Iterator;
055: import java.util.Set;
056: import java.util.HashSet;
057: import java.util.StringTokenizer;
058: import org.griphyn.common.util.Separator;
059:
060: /**
061: * An abstract implementation that implements some of the common functions in
062: * the Implementation Interface that are required by all the implementations.
063: *
064: * @author Karan Vahi
065: * @version $Revision: 192 $
066: */
067: public abstract class Abstract implements Implementation {
068:
069: /**
070: * The logical name of the transformation that creates directories on the
071: * remote execution pools.
072: */
073: public static final String CHANGE_XBIT_TRANSFORMATION = "dirmanager";
074:
075: /**
076: * The transformation namespace for the setXBit jobs.
077: */
078: public static final String XBIT_TRANSFORMATION_NS = "pegasus";
079:
080: /**
081: * The version number for the derivations for setXBit jobs.
082: */
083: public static final String XBIT_TRANSFORMATION_VERSION = null;
084:
085: /**
086: * The derivation namespace for the setXBit jobs.
087: */
088: public static final String XBIT_DERIVATION_NS = "pegasus";
089:
090: /**
091: * The version number for the derivations for setXBit jobs.
092: */
093: public static final String XBIT_DERIVATION_VERSION = null;
094:
095: /**
096: * The prefix for the jobs which are added to set X bit for the staged
097: * executables.
098: */
099: public static final String SET_XBIT_PREFIX = "chmod_";
100:
101: /**
102: * The prefix for the NoOP jobs that are created.
103: */
104: public static final String NOOP_PREFIX = "noop_";
105:
106: /**
107: * The path to the user proxy on the submit host (local pool), that is picked
108: * up for use in transfer of proxies.
109: */
110: protected String mLocalUserProxy;
111:
112: /**
113: * The basename of the user proxy , that is picked up for use in transfer of
114: * proxies.
115: */
116: protected String mLocalUserProxyBasename;
117:
118: /**
119: * The handle to the properties object holding the properties relevant to
120: * Pegasus.
121: */
122: protected PegasusProperties mProps;
123:
124: /**
125: * Contains the various options to the Planner as passed by the user at
126: * runtime.
127: */
128: protected PlannerOptions mPOptions;
129:
130: /**
131: * The handle to the Site Catalog. It is instantiated in this class.
132: */
133: protected PoolInfoProvider mSCHandle;
134:
135: /**
136: * The handle to the Transformation Catalog. It must be instantiated in the
137: * implementing class
138: */
139: protected TransformationCatalog mTCHandle;
140:
141: /**
142: * The handle to the refiner that loaded this implementation.
143: */
144: protected Refiner mRefiner;
145:
146: /**
147: * The logging object which is used to log all the messages.
148: *
149: * @see org.griphyn.cPlanner.common.LogManager
150: */
151: protected LogManager mLogger;
152:
153: /**
154: * The set of sites for which chmod job creation has to be disabled while
155: * doing executable staging.
156: */
157: protected Set mDisabledChmodSites;
158:
159: /**
160: * The overloaded constructor, that is called by the Factory to load the
161: * class.
162: *
163: * @param properties the properties object.
164: * @param options the options passed to the Planner.
165: */
166: public Abstract(PegasusProperties properties, PlannerOptions options) {
167: mProps = properties;
168: mPOptions = options;
169: mLogger = LogManager.getInstance();
170:
171: //build up the set of disabled chmod sites
172: mDisabledChmodSites = determineDisabledChmodSites(properties
173: .getChmodDisabledSites());
174:
175: //load the site catalog
176: String poolFile = mProps.getPoolFile();
177: String poolClass = PoolMode.getImplementingClass(mProps
178: .getPoolMode());
179: mSCHandle = PoolMode.loadPoolInstance(poolClass, poolFile,
180: PoolMode.SINGLETON_LOAD);
181: //load transformation catalog
182: mTCHandle = TCMode.loadInstance();
183: mLocalUserProxy = getPathToUserProxy();
184: mLocalUserProxyBasename = (mLocalUserProxy == null) ? null
185: : new File(mLocalUserProxy).getName();
186: }
187:
188: /**
189: * Applies priorities to the transfer jobs if a priority is specified
190: * in the properties file.
191: *
192: * @param job the transfer job .
193: */
194: public void applyPriority(TransferJob job) {
195: String priority = this .getPriority(job);
196: if (priority != null) {
197: job.condorVariables
198: .construct(Condor.PRIORITY_KEY, priority);
199: }
200: }
201:
202: /**
203: * Determines if there is a need to transfer proxy for the transfer
204: * job or not. If there is a need to transfer proxy, then the job is
205: * modified to create the correct condor commands to transfer the proxy.
206: * Proxy is usually transferred if the VDS profile TRANSFER_PROXY is set,
207: * or the job is being run in the condor vanilla universe. The proxy is
208: * transferred from the submit host (i.e site local). The location is
209: * determined from the value of the X509_USER_PROXY profile key associated
210: * in the env namespace.
211: *
212: * @param job the transfer job .
213: *
214: * @return boolean true job was modified to transfer the proxy, else
215: * false when job is not modified.
216: */
217: public boolean checkAndTransferProxy(TransferJob job) {
218: boolean transfer = false;
219: //not handling for third party transfers correctly.
220:
221: String style = job.vdsNS.containsKey(VDS.STYLE_KEY) ? (String) job.vdsNS
222: .get(VDS.STYLE_KEY)
223: : VDS.GLOBUS_STYLE;
224: String universe = job.condorVariables
225: .containsKey(Condor.UNIVERSE_KEY) ? (String) job.condorVariables
226: .get(Condor.UNIVERSE_KEY)
227: :
228: //empty
229: "";
230: boolean condition1 = job.vdsNS
231: .getBooleanValue(VDS.TRANSFER_PROXY_KEY);
232: boolean condition2 = ((style.equalsIgnoreCase(VDS.CONDOR_STYLE))
233: || (style.equalsIgnoreCase(VDS.GLIDEIN_STYLE)) || (job.executionPool
234: .equalsIgnoreCase("local") && (universe
235: .equalsIgnoreCase(Condor.VANILLA_UNIVERSE) || universe
236: .equalsIgnoreCase(Condor.STANDARD_UNIVERSE))));
237:
238: //condition1 is explicit request for transfer of proxy
239: //condition2 is determination of the glide in case
240: if (condition1 || condition2) {
241: if (mLocalUserProxyBasename != null) {
242: //set the transfer of proxy from the submit host
243: //to the remote execution pool, using internal
244: //condor transfer mechanism
245:
246: //add condor key transfer_input_files
247: //and other required condor keys
248: /*
249: job.condorVariables.checkKeyInNS(Condor.TRANSFER_IP_FILES_KEY,
250: mLocalUserProxy);
251: job.condorVariables.construct("should_transfer_files","YES");
252: job.condorVariables.construct("when_to_transfer_output","ON_EXIT");
253: */
254: job.condorVariables
255: .addIPFileForTransfer(mLocalUserProxy);
256:
257: //set the environment variable to basefile name
258: job.envVariables.checkKeyInNS(ENV.X509_USER_PROXY_KEY,
259: mLocalUserProxyBasename);
260:
261: if (!condition2) {
262: //means the transfer job is not being run in
263: //condor vanilla universe. This means, that in
264: //all probability the proxy is being transferred
265: //by gass_cache, and that does not preserve file
266: //permissions correctly
267: job.envVariables.checkKeyInNS(ENV.GRIDSTART_PREJOB,
268: "chmod 600 " + mLocalUserProxyBasename);
269: }
270: if (!condition1) {
271: //for glide in jobs also tag we are
272: //transferring proxy
273: job.vdsNS.checkKeyInNS(VDS.TRANSFER_PROXY_KEY,
274: "true");
275: }
276: //we want the transfer job to be run in the
277: //directory that Condor or GRAM decided to run
278: job.condorVariables.removeKey("remote_initialdir");
279: transfer = true;
280: }
281: }
282: return transfer;
283: }
284:
285: /**
286: * Sets the callback to the refiner, that has loaded this implementation.
287: *
288: * @param refiner the transfer refiner that loaded the implementation.
289: */
290: public void setRefiner(Refiner refiner) {
291: mRefiner = refiner;
292: }
293:
294: /**
295: * Adds the dirmanager to the workflow, that do a chmod on the files
296: * being staged.
297: *
298: * @param computeJob the computeJob for which the files are being staged.
299: * @param txJob the transfer job that is staging the files.
300: * @param execFiles the executable files that are being staged.
301: *
302: * @return boolean indicating whether any XBitJobs were succesfully added or
303: * not.
304: */
305: protected boolean addSetXBitJobs(SubInfo computeJob, SubInfo txJob,
306: Collection execFiles) {
307:
308: return this .addSetXBitJobs(computeJob, txJob.getName(),
309: execFiles, txJob.getJobType());
310: }
311:
312: /**
313: * Adds the dirmanager job to the workflow, that do a chmod on the files
314: * being staged.
315: *
316: * @param computeJob the computeJob for which the files are
317: * being staged.
318: * @param txJobName the name of the transfer job that is staging the files.
319: * @param execFiles the executable files that are being staged.
320: * @param transferClass the class of transfer job
321: *
322: * @return boolean indicating whether any XBitJobs were succesfully added or
323: * not.
324: */
325: public boolean addSetXBitJobs(SubInfo computeJob, String txJobName,
326: Collection execFiles, int transferClass) {
327:
328: boolean added = false;
329: String computeJobName = computeJob.getName();
330: String site = computeJob.getSiteHandle();
331:
332: //sanity check
333: if (execFiles == null || execFiles.isEmpty()) {
334: return added;
335: }
336: if (transferClass != SubInfo.STAGE_IN_JOB) {
337: //extra check. throw an exception
338: throw new RuntimeException("Invalid Transfer Type ("
339: + txJobName + "," + transferClass
340: + ") for staging executable files ");
341: }
342:
343: //figure out whether we need to create a chmod or noop
344: boolean noop = this .disableChmodJobCreation(site);
345:
346: //add setXBit jobs into the workflow
347: int counter = 0;
348: for (Iterator it = execFiles.iterator(); it.hasNext(); counter++) {
349: FileTransfer execFile = (FileTransfer) it.next();
350:
351: String xBitJobName = this .getSetXBitJobName(computeJobName,
352: counter);//create a chmod job
353:
354: SubInfo xBitJob = noop ? this .createNoOPJob(xBitJobName) : //create a NOOP job
355: this .createSetXBitJob(execFile, xBitJobName); //create a chmod job
356:
357: if (xBitJob == null) {
358: //error occured while creating the job
359: throw new RuntimeException(
360: "Unable to create setXBitJob "
361: + "corresponding to compute job "
362: + computeJobName + " and transfer"
363: + " job " + txJobName);
364:
365: } else {
366: added = true;
367: mRefiner.addJob(xBitJob);
368: //add the relation txJob->XBitJob->ComputeJob
369: mRefiner.addRelation(txJobName, xBitJob.getName(),
370: xBitJob.getSiteHandle(), true);
371: mRefiner.addRelation(xBitJob.getName(), computeJobName);
372: }
373: }
374:
375: return added;
376: }
377:
378: /**
379: * Adds the dirmanager job to the workflow, that do a chmod on the files
380: * being staged.
381: *
382: * @param computeJob the computeJob for which the files are
383: * being staged.
384: * @param txJobName the name of the transfer job that is staging the files.
385: * @param execFiles the executable files that are being staged.
386: * @param transferClass the class of transfer job
387: * @param xbitIndex index to be used for creating the name of XBitJob.
388: *
389: * @return boolean indicating whether any XBitJobs were succesfully added or
390: * not.
391: */
392: public boolean addSetXBitJobs(SubInfo computeJob, String txJobName,
393: Collection execFiles, int transferClass, int xbitIndex) {
394:
395: boolean added = false;
396: String computeJobName = computeJob.getName();
397: String site = computeJob.getSiteHandle();
398:
399: //sanity check
400: if (execFiles == null || execFiles.isEmpty()) {
401: return added;
402: }
403: if (transferClass != SubInfo.STAGE_IN_JOB) {
404: //extra check. throw an exception
405: throw new RuntimeException("Invalid Transfer Type ("
406: + txJobName + "," + transferClass
407: + ") for staging executable files ");
408: }
409:
410: //figure out whether we need to create a chmod or noop
411: boolean noop = this .disableChmodJobCreation(site);
412:
413: //add setXBit jobs into the workflow
414: int counter = 0;
415: for (Iterator it = execFiles.iterator(); it.hasNext(); counter++) {
416: FileTransfer execFile = (FileTransfer) it.next();
417:
418: String xBitJobName = this .getSetXBitJobName(computeJobName,
419: xbitIndex);//create a chmod job
420:
421: SubInfo xBitJob = noop ? this .createNoOPJob(xBitJobName) : //create a NOOP job
422: this .createSetXBitJob(execFile, xBitJobName); //create a chmod job
423:
424: if (xBitJob == null) {
425: //error occured while creating the job
426: throw new RuntimeException(
427: "Unable to create setXBitJob "
428: + "corresponding to compute job "
429: + computeJobName + " and transfer"
430: + " job " + txJobName);
431:
432: } else {
433: added = true;
434: mRefiner.addJob(xBitJob);
435: //add the relation txJob->XBitJob->ComputeJob
436: mRefiner.addRelation(txJobName, xBitJob.getName(),
437: xBitJob.getSiteHandle(), true);
438: mRefiner.addRelation(xBitJob.getName(), computeJobName);
439: }
440: }
441:
442: return added;
443: }
444:
445: /**
446: * Generates the name of the setXBitJob , that is unique for the given
447: * workflow.
448: *
449: * @param name the name of the compute job
450: * @param counter the index for the setXBit job.
451: *
452: * @return the name of the setXBitJob .
453: */
454: public String getSetXBitJobName(String name, int counter) {
455: StringBuffer sb = new StringBuffer();
456: sb.append(this .SET_XBIT_PREFIX).append(name).append("_")
457: .append(counter);
458:
459: return sb.toString();
460: }
461:
462: /**
463: * Generates the name of the noop job , that is unique for the given
464: * workflow.
465: *
466: * @param name the name of the compute job
467: * @param counter the index for the noop job.
468: *
469: * @return the name of the setXBitJob .
470: */
471: public String getNOOPJobName(String name, int counter) {
472: StringBuffer sb = new StringBuffer();
473: sb.append(this .NOOP_PREFIX).append(name).append("_").append(
474: counter);
475:
476: return sb.toString();
477: }
478:
479: /**
480: * It creates a NoOP job that runs on the submit host.
481: *
482: * @param name the name to be assigned to the noop job
483: *
484: * @return the noop job.
485: */
486: public SubInfo createNoOPJob(String name) {
487:
488: SubInfo newJob = new SubInfo();
489: List entries = null;
490: String execPath = null;
491:
492: //jobname has the dagname and index to indicate different
493: //jobs for deferred planning
494: newJob.setName(name);
495: newJob.setTransformation("pegasus", "noop", "1.0");
496: newJob.setDerivation("pegasus", "noop", "1.0");
497:
498: newJob.setUniverse("vanilla");
499: //the noop job does not get run by condor
500: //even if it does, giving it the maximum
501: //possible chance
502: newJob.executable = "/bin/true";
503:
504: //construct noop keys
505: newJob.setSiteHandle("local");
506: newJob.setJobType(SubInfo.CREATE_DIR_JOB);
507: construct(newJob, "noop_job", "true");
508: construct(newJob, "noop_job_exit_code", "0");
509:
510: //we do not want the job to be launched
511: //by kickstart, as the job is not run actually
512: newJob.vdsNS
513: .checkKeyInNS(
514: VDS.GRIDSTART_KEY,
515: GridStartFactory.GRIDSTART_SHORT_NAMES[GridStartFactory.NO_GRIDSTART_INDEX]);
516:
517: return newJob;
518:
519: }
520:
521: /**
522: * Creates a dirmanager job, that does a chmod on the file being staged.
523: * The file being staged should be of type executable. Though no explicit
524: * check is made for that. The staged file is the one whose X bit would be
525: * set on execution of this job. The site at which job is executed, is
526: * determined from the site associated with the destination URL.
527: *
528: * @param file the <code>FileTransfer</code> containing the file that has
529: * to be X Bit Set.
530: * @param name the name that has to be assigned to the job.
531: *
532: * @return the chmod job, else null if it is not able to be created
533: * for some reason.
534: */
535: protected SubInfo createSetXBitJob(FileTransfer file, String name) {
536: SubInfo xBitJob = new SubInfo();
537: TransformationCatalogEntry entry = null;
538: JobManager jobManager = null;
539: NameValue destURL = (NameValue) file.getDestURL();
540: String eSiteHandle = destURL.getKey();
541:
542: List entries;
543: try {
544: entries = mTCHandle.getTCEntries(
545: this .XBIT_TRANSFORMATION_NS,
546: this .CHANGE_XBIT_TRANSFORMATION,
547: this .XBIT_TRANSFORMATION_VERSION, eSiteHandle,
548: TCType.INSTALLED);
549: } catch (Exception e) {
550: //non sensical catching
551: mLogger.log("Unable to retrieve entries from TC "
552: + e.getMessage(), LogManager.ERROR_MESSAGE_LEVEL);
553: return null;
554: }
555:
556: entry = (entries == null) ? this
557: .defaultXBitTCEntry(eSiteHandle) : //try using a default one
558: (TransformationCatalogEntry) entries.get(0);
559:
560: if (entry == null) {
561: //NOW THROWN AN EXCEPTION
562:
563: //should throw a TC specific exception
564: StringBuffer error = new StringBuffer();
565: error.append("Could not find entry in tc for lfn ").append(
566: Separator.combine(this .XBIT_TRANSFORMATION_NS,
567: this .CHANGE_XBIT_TRANSFORMATION,
568: this .XBIT_TRANSFORMATION_VERSION)).append(
569: " at site ").append(eSiteHandle);
570:
571: mLogger.log(error.toString(),
572: LogManager.ERROR_MESSAGE_LEVEL);
573: throw new RuntimeException(error.toString());
574: }
575:
576: SiteInfo eSite = mSCHandle
577: .getPoolEntry(eSiteHandle, "transfer");
578: jobManager = eSite.selectJobManager("transfer", true);
579: String arguments = " -X -f "
580: + Utility.getAbsolutePath(destURL.getValue());
581:
582: xBitJob.jobName = name;
583: xBitJob.logicalName = this .CHANGE_XBIT_TRANSFORMATION;
584: xBitJob.namespace = this .XBIT_TRANSFORMATION_NS;
585: xBitJob.version = this .XBIT_TRANSFORMATION_VERSION;
586: xBitJob.dvName = this .CHANGE_XBIT_TRANSFORMATION;
587: xBitJob.dvNamespace = this .XBIT_DERIVATION_NS;
588: xBitJob.dvVersion = this .XBIT_DERIVATION_VERSION;
589: xBitJob.condorUniverse = "vanilla";
590: xBitJob.globusScheduler = jobManager.getInfo(JobManager.URL);
591: xBitJob.executable = entry.getPhysicalTransformation();
592: xBitJob.executionPool = eSiteHandle;
593: xBitJob.strargs = arguments;
594: xBitJob.jobClass = SubInfo.CREATE_DIR_JOB;
595: xBitJob.jobID = name;
596:
597: //the profile information from the pool catalog needs to be
598: //assimilated into the job.
599: xBitJob.updateProfiles(mSCHandle.getPoolProfile(eSiteHandle));
600:
601: //the profile information from the transformation
602: //catalog needs to be assimilated into the job
603: //overriding the one from pool catalog.
604: xBitJob.updateProfiles(entry);
605:
606: //the profile information from the properties file
607: //is assimilated overidding the one from transformation
608: //catalog.
609: xBitJob.updateProfiles(mProps);
610:
611: return xBitJob;
612: }
613:
614: /**
615: * Returns a default TC entry to be used in case entry is not found in the
616: * transformation catalog.
617: *
618: * @param site the site for which the default entry is required.
619: *
620: *
621: * @return the default entry.
622: */
623: private TransformationCatalogEntry defaultXBitTCEntry(String site) {
624: TransformationCatalogEntry defaultTCEntry = null;
625: //check if PEGASUS_HOME is set
626: String home = mSCHandle.getPegasusHome(site);
627: //if PEGASUS_HOME is not set, use VDS_HOME
628: home = (home == null) ? mSCHandle.getVDS_HOME(site) : home;
629:
630: //if home is still null
631: if (home == null) {
632: //cannot create default TC
633: mLogger.log("Unable to create a default entry for "
634: + Separator.combine(this .XBIT_TRANSFORMATION_NS,
635: this .CHANGE_XBIT_TRANSFORMATION,
636: this .XBIT_TRANSFORMATION_VERSION),
637: LogManager.DEBUG_MESSAGE_LEVEL);
638: //set the flag back to true
639: return defaultTCEntry;
640: }
641:
642: //remove trailing / if specified
643: home = (home.charAt(home.length() - 1) == File.separatorChar) ? home
644: .substring(0, home.length() - 1)
645: : home;
646:
647: //construct the path to it
648: StringBuffer path = new StringBuffer();
649: path.append(home).append(File.separator).append("bin").append(
650: File.separator).append(this .CHANGE_XBIT_TRANSFORMATION);
651:
652: defaultTCEntry = new TransformationCatalogEntry(
653: this .XBIT_TRANSFORMATION_NS,
654: this .CHANGE_XBIT_TRANSFORMATION,
655: this .XBIT_TRANSFORMATION_VERSION);
656:
657: defaultTCEntry.setPhysicalTransformation(path.toString());
658: defaultTCEntry.setResourceId(site);
659: defaultTCEntry.setType(TCType.INSTALLED);
660:
661: //register back into the transformation catalog
662: //so that we do not need to worry about creating it again
663: try {
664: mTCHandle.addTCEntry(defaultTCEntry, false);
665: } catch (Exception e) {
666: //just log as debug. as this is more of a performance improvement
667: //than anything else
668: mLogger.log(
669: "Unable to register in the TC the default entry "
670: + defaultTCEntry.getLogicalTransformation()
671: + " for site " + site, e,
672: LogManager.DEBUG_MESSAGE_LEVEL);
673: }
674:
675: return defaultTCEntry;
676: }
677:
678: /**
679: * Builds up a set of disabled chmod sites
680: *
681: * @param sites comma separated list of sites.
682: *
683: * @return a Set containing the site names.
684: */
685: protected Set determineDisabledChmodSites(String sites) {
686: Set s = new HashSet();
687:
688: //sanity checks
689: if (sites == null || sites.length() == 0) {
690: return s;
691: }
692:
693: for (StringTokenizer st = new StringTokenizer(sites); st
694: .hasMoreTokens();) {
695: s.add(st.nextToken());
696: }
697:
698: return s;
699: }
700:
701: /**
702: * Returns a boolean indicating whether to disable chmod job creation for
703: * a site or not.
704: *
705: * @param site the name of the site
706: *
707: * @return boolean
708: */
709: protected boolean disableChmodJobCreation(String site) {
710: return this .mDisabledChmodSites.contains(site);
711: }
712:
713: /**
714: * Returns the priority for the transfer job as specified in the properties
715: * file.
716: *
717: * @param job the Transfer job.
718: *
719: * @return the priority of the job as determined from properties, can be null
720: * if invalid value passed or property not set.
721: */
722: protected String getPriority(TransferJob job) {
723: String priority;
724: int type = job.jobClass;
725: switch (type) {
726: case SubInfo.STAGE_IN_JOB:
727: priority = mProps.getTransferStageInPriority();
728: break;
729:
730: case SubInfo.STAGE_OUT_JOB:
731: priority = mProps.getTransferStageOutPriority();
732: break;
733:
734: case SubInfo.INTER_POOL_JOB:
735: priority = mProps.getTransferInterPriority();
736: break;
737:
738: default:
739: priority = null;
740: }
741: return priority;
742: }
743:
744: /**
745: * Returns the path to the user proxy from the pool configuration file and
746: * the properties file. The value in the properties file overrides the
747: * value from the pool configuration file.
748: *
749: * @return path to user proxy on local pool.
750: * null if no path is found.
751: */
752: protected String getPathToUserProxy() {
753: List l = mSCHandle.getPoolProfile("local", Profile.ENV);
754: String proxy = null;
755:
756: if (l != null) {
757: //try to get the path to the proxy on local pool
758: for (Iterator it = l.iterator(); it.hasNext();) {
759: Profile p = (Profile) it.next();
760: proxy = p.getProfileKey().equalsIgnoreCase(
761: ENV.X509_USER_PROXY_KEY) ? p.getProfileValue()
762: : proxy;
763: }
764: }
765:
766: //overload from the properties file
767: ENV env = new ENV();
768: env.checkKeyInNS(mProps, "local");
769: proxy = env.containsKey(ENV.X509_USER_PROXY_KEY) ? (String) env
770: .get(ENV.X509_USER_PROXY_KEY) : proxy;
771:
772: return proxy;
773: }
774:
775: /**
776: * Constructs a condor variable in the condor profile namespace
777: * associated with the job. Overrides any preexisting key values.
778: *
779: * @param job contains the job description.
780: * @param key the key of the profile.
781: * @param value the associated value.
782: */
783: protected void construct(SubInfo job, String key, String value) {
784: job.condorVariables.checkKeyInNS(key, value);
785: }
786:
787: }
|