001: /*
002: * JBoss, Home of Professional Open Source.
003: * Copyright 2006, Red Hat Middleware LLC, and individual contributors
004: * as indicated by the @author tags. See the copyright.txt file in the
005: * distribution for a full listing of individual contributors.
006: *
007: * This is free software; you can redistribute it and/or modify it
008: * under the terms of the GNU Lesser General Public License as
009: * published by the Free Software Foundation; either version 2.1 of
010: * the License, or (at your option) any later version.
011: *
012: * This software is distributed in the hope that it will be useful,
013: * but WITHOUT ANY WARRANTY; without even the implied warranty of
014: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
015: * Lesser General Public License for more details.
016: *
017: * You should have received a copy of the GNU Lesser General Public
018: * License along with this software; if not, write to the Free
019: * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
020: * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
021: */
022: package org.jboss.ha.framework.server;
023:
024: import java.io.File;
025: import java.net.MalformedURLException;
026: import java.net.URL;
027: import java.util.*;
028: import javax.management.MBeanServer;
029: import javax.management.ObjectName;
030:
031: import org.jboss.deployment.scanner.URLDeploymentScanner;
032: import org.jboss.ha.framework.interfaces.HAPartition;
033: import org.jboss.system.server.ServerConfig;
034: import org.jboss.system.server.ServerConfigLocator;
035: import org.jboss.system.server.ServerConfigUtil;
036: import org.jboss.ha.framework.server.ClusterFileTransfer.ClusterFileTransferException;
037:
038: /**
039: *
040: * @author <a href="mailto:andreas@jboss.org">Andreas Schaefer</a>
041: * @author <a href="mailto:bill@jboss.org">Bill Burke</a>
042: * @version $Revision: 57254 $
043: *
044: * <p><b>20021014 andreas schaefer:</b>
045: * <ul>
046: * <li>Initial import
047: * </ul>
048: * <p><b>20020809 bill burke:</b>
049: * <ul>
050: * <li>Rewrote as a Scanner instead. Also on boot-up asks cluster for deployable files
051: * </ul>
052: */
053: public class FarmMemberService extends URLDeploymentScanner implements
054: FarmMemberServiceMBean {
055: private MBeanServer mServer;
056: protected ObjectName mDeployerName = null;
057: protected ObjectName mClusterPartitionName = null;
058: protected ClusterPartitionMBean mClusterPartition = null;
059: protected String mBackgroundPartition = ServerConfigUtil
060: .getDefaultPartitionName();
061: private File mTempDirectory;
062: private ClusterFileTransfer mFileTransfer;
063:
064: protected final static String SERVICE_NAME = "FarmMemberService";
065: protected HashMap parentDUMap = new HashMap();
066:
067: protected ArrayList remotelyDeployed = new ArrayList();
068: protected ArrayList remotelyUndeployed = new ArrayList();
069:
070: public String getPartitionName() {
071: return mBackgroundPartition;
072: }
073:
074: public void setPartitionName(String pPartitionName) {
075: if ((getState() != STARTED) && (getState() != STARTING)) {
076: mBackgroundPartition = pPartitionName;
077: }
078: }
079:
080: public ClusterPartitionMBean getClusterPartition() {
081: return mClusterPartition;
082: }
083:
084: public void setClusterPartition(
085: ClusterPartitionMBean clusterPartition) {
086: if ((getState() != STARTED) && (getState() != STARTING)) {
087: this .mClusterPartition = clusterPartition;
088: }
089: }
090:
091: /** Backward compatibility, mapped to the URLs attribute of URLDeploymentScannerMBean
092: * @deprecated
093: */
094: public void setFarmDeployDirectory(String urls)
095: throws MalformedURLException {
096: super .setURLs(urls);
097: }
098:
099: /** Backward compatibility, but ignored as it does nothing.
100: * @deprecated
101: */
102: public void setScannerName(String name) {
103: log.warn("ScannerName does nothing");
104: }
105:
106: public void setDeployer(ObjectName deployerName) {
107: super .setDeployer(deployerName);
108:
109: // Cache the deployer name so we can reset it in case of restart
110: this .mDeployerName = deployerName;
111: }
112:
113: // Service implementation ----------------------------------------
114:
115: public String getName() {
116: return "Farm Member Service";
117: }
118:
119: /**
120: * Saves the MBeanServer reference, create the Farm Member Name and
121: * add its Notification Listener to listen for Deployment / Undeployment
122: * notifications from the {@link org.jboss.deployment.MainDeployer MainDeployer}.
123: */
124: public ObjectName preRegister(MBeanServer pServer, ObjectName pName)
125: throws Exception {
126: mServer = pServer;
127: return super .preRegister(pServer, pName);
128: }
129:
130: /**
131: * Looks up the Server Config instance to figure out the
132: * temp-directory and the farm-deploy-directory
133: **/
134: protected void createService() throws Exception {
135: // AbstractDeploymentScanner drops ref to the deployer in destroyService(),
136: // so if this is a "re-create" following a destroy, reestablish it
137: if (deployer == null && mDeployerName != null)
138: setDeployer(mDeployerName); // causes JMX lookup of deployer
139:
140: super .createService();
141:
142: ServerConfig lConfig = ServerConfigLocator.locate();
143: mTempDirectory = lConfig.getServerTempDir();
144:
145: createUnexistingLocalDir();
146: }
147:
148: /**
149: * Register itself as RPC-Handler to the HA-Partition
150: * and add the farm deployment directory to the scanner
151: **/
152: protected void startService() throws Exception {
153: try {
154: log.debug("registerRPCHandler");
155:
156: HAPartition lHAPartition = null;
157: if (mClusterPartition != null) {
158: lHAPartition = mClusterPartition.getHAPartition();
159: mBackgroundPartition = lHAPartition.getPartitionName();
160: }
161:
162: mClusterPartitionName = new ObjectName("jboss:service="
163: + mBackgroundPartition);
164:
165: if (lHAPartition == null) {
166: // Old style config with PartitionName was used -- have to
167: // look up the partition in JMX
168: lHAPartition = (HAPartition) mServer.getAttribute(
169: mClusterPartitionName, "HAPartition");
170: }
171:
172: lHAPartition.registerRPCHandler(SERVICE_NAME, this );
173:
174: mFileTransfer = new ClusterFileTransfer(lHAPartition,
175: buildParentFolderMapping());
176:
177: ArrayList response = lHAPartition
178: .callMethodOnCoordinatorNode(SERVICE_NAME,
179: "farmDeployments", new Object[] {},
180: new Class[] {}, true);
181:
182: log.debug("Found " + response.size()
183: + " farmDeployments responses");
184: for (int i = 0; i < response.size(); i++) {
185: Object map = response.get(i);
186: if (map != null && map instanceof HashMap) {
187: HashMap farmed = (HashMap) map;
188: pullNewDeployments(lHAPartition, farmed);
189: }
190: }
191:
192: // scan before we enable the thread, so JBoss version shows up afterwards
193: scannerThread.doScan();
194:
195: // enable scanner thread if we are enabled
196: scannerThread.setEnabled(isScanEnabled());
197: } catch (Exception e) {
198: this .logException(e);
199: throw e;
200: }
201: }
202:
203: protected void pullNewDeployments(HAPartition partition,
204: HashMap farmed) {
205: log.info("**** pullNewDeployments ****");
206: Iterator it = farmed.keySet().iterator();
207: while (it.hasNext()) {
208: String depName = (String) it.next();
209: DeployedURL du = (DeployedURL) parentDUMap.get(depName);
210: Date last = (Date) farmed.get(depName);
211: if (du != null) {
212: Date theLast = new Date(du.getFile().lastModified());
213: if (!theLast.before(last)) {
214: continue;
215: }
216: }
217:
218: String parentName = depName.substring(0, depName
219: .indexOf('/'));
220: File destFile = new File(depName);
221: try {
222: mFileTransfer.pull(destFile, parentName);
223: synchronized (remotelyDeployed) {
224: remotelyDeployed.add(destFile.getName());
225: }
226: } catch (ClusterFileTransferException e) {
227: // log the exception and continue with the next deployment
228: this .logException(e);
229: }
230: }
231: }
232:
233: // return mapping of Farming folder names to the File
234: private Map buildParentFolderMapping() {
235: Map map = new HashMap();
236: URL[] urls = (URL[]) urlList.toArray(new URL[] {});
237: for (int i = 0; i < urlList.size(); i++) {
238: if (urls[i].getProtocol().equals("file")) {
239: File file = new File(urls[i].getFile());
240: if (file.isDirectory()) {
241: map.put(file.getName(), file);
242: }
243:
244: }
245: }
246: return map;
247: }
248:
249: protected File findParent(String parentName) {
250: URL[] urls = (URL[]) urlList.toArray(new URL[] {});
251: for (int i = 0; i < urlList.size(); i++) {
252: if (urls[i].getProtocol().equals("file")) {
253: File file = new File(urls[i].getFile());
254: if (file.isDirectory()) {
255: if (file.getName().equals(parentName))
256: return file;
257: }
258: }
259: }
260: return null;
261: }
262:
263: public HashMap farmDeployments() {
264: log.debug("farmDeployments request, parentDUMap.size="
265: + parentDUMap.size());
266: Iterator it = parentDUMap.keySet().iterator();
267: HashMap farmed = new HashMap();
268: while (it.hasNext()) {
269: String key = (String) it.next();
270: DeployedURL du = (DeployedURL) parentDUMap.get(key);
271: farmed.put(key, new Date(du.getFile().lastModified()));
272: }
273: return farmed;
274: }
275:
276: public void farmDeploy(String parentName, File destFile, Date date) {
277: try {
278: File parent = findParent(parentName);
279: if (parent == null) {
280: log.info("Could not find parent: " + parentName
281: + " for deployment: " + destFile + ", data: "
282: + date);
283: return;
284: }
285:
286: String fullName = parentName + "/" + destFile.getName();
287:
288: DeployedURL du = null;
289: synchronized (parentDUMap) {
290: du = (DeployedURL) parentDUMap.get(fullName);
291: }
292: boolean deployIt = false;
293: if (du == null) {
294: deployIt = true;
295: } else {
296: Date lastChanged = new Date(du.getFile().lastModified());
297: deployIt = lastChanged.before(date);
298: }
299:
300: if (deployIt) {
301: // we remember this deployment to avoid recursive farm calls!
302: //
303: synchronized (remotelyDeployed) {
304: remotelyDeployed.add(fullName);
305: }
306:
307: log.info("farmDeployment(), deploy locally: "
308: + fullName);
309: // Adjust the date and move the file to /farm
310: // but delete it first if already there
311: File tempFile = new File(this .mTempDirectory, destFile
312: .getName());
313: File lFarmFile = new File(parent, destFile.getName());
314: if (lFarmFile.exists()) {
315: if (!lFarmFile.delete()) {
316: log
317: .info("could not delete target file for farm deployment "
318: + lFarmFile.getName());
319: }
320: }
321: tempFile.setLastModified(date.getTime());
322:
323: if (!ClusterFileTransfer.localMove(tempFile, lFarmFile)) {
324: log.info("Could not move " + tempFile + " to "
325: + lFarmFile);
326: }
327: } else {
328: log
329: .info(fullName
330: + " is already deployed by farm service on this node");
331: }
332: } catch (Exception e) {
333: logException(e);
334: }
335: }
336:
337: public void farmUndeploy(String parentName, String fileName) {
338: try {
339: // First check if file is already deployed
340: log.info("doUndeployment(), File: " + parentName + "/"
341: + fileName);
342: File parent = findParent(parentName);
343: if (parent == null) {
344: log.info("Could not find parent: " + parentName
345: + " for undeployment: " + fileName);
346: return;
347: }
348: File deployed = new File(parent, fileName);
349: if (deployed.exists()) {
350: // we remember this undeployment to avoid recursive farm calls!
351: //
352: synchronized (remotelyUndeployed) {
353: String fullName = parentName + "/" + fileName;
354: remotelyUndeployed.add(fullName);
355: }
356:
357: if (deployed.delete())
358: log.info("farmUndeployment(), removed file "
359: + deployed);
360: else
361: log
362: .info("farmUndeployment(), could not remove file "
363: + deployed);
364: }
365: } catch (Exception e) {
366: logException(e);
367: }
368: }
369:
370: protected void deploy(final DeployedURL du) {
371: super .deploy(du);
372: File file = du.getFile();
373: File parent = file.getParentFile();
374: if (parent == null)
375: return;
376:
377: String fullName = parent.getName() + "/" + file.getName();
378: synchronized (parentDUMap) {
379: parentDUMap.put(fullName, du);
380: }
381:
382: try {
383: // We check if we must do a remote call or not: maybe the deploy
384: // is already the consequence of a farm call! (avoid recusivity!)
385: //
386: boolean consequenceOfRemoteCall = false;
387: synchronized (remotelyDeployed) {
388: consequenceOfRemoteCall = remotelyDeployed
389: .remove(fullName);
390: }
391:
392: if (getState() == STARTING)
393: return;
394:
395: if (!consequenceOfRemoteCall) {
396: Date fileDate = new Date(file.lastModified());
397: HAPartition lHAPartition = (HAPartition) mServer
398: .getAttribute(mClusterPartitionName,
399: "HAPartition");
400: this .mFileTransfer.push(file, parent.getName(), true);
401:
402: lHAPartition.callMethodOnCluster(SERVICE_NAME,
403: "farmDeploy", new Object[] { parent.getName(),
404: file, fileDate }, new Class[] {
405: String.class, File.class, Date.class },
406: true);
407: }
408: } catch (ClusterFileTransferException e) {
409: logException(e);
410: } catch (Exception ex) {
411: logException(ex);
412: }
413:
414: }
415:
416: protected void undeploy(final DeployedURL du) {
417:
418: File file = du.getFile();
419: File parent = file.getParentFile();
420: String parentName = parent.getName();
421: String fileName = file.getName();
422: super .undeploy(du);
423:
424: String fullName = parent.getName() + "/" + file.getName();
425: synchronized (parentDUMap) {
426: parentDUMap.remove(fullName);
427: }
428:
429: if (getState() == STOPPING)
430: return;
431:
432: try {
433: // We check if we must do a remote call or not: maybe the undeploy
434: // is already the consequence of a farm call! (avoid recusivity!)
435: //
436: boolean consequenceOfRemoteCall = false;
437: synchronized (remotelyUndeployed) {
438: consequenceOfRemoteCall = remotelyUndeployed
439: .remove(fullName);
440: }
441:
442: if (!consequenceOfRemoteCall) {
443: HAPartition lHAPartition = (HAPartition) mServer
444: .getAttribute(mClusterPartitionName,
445: "HAPartition");
446: lHAPartition.callMethodOnCluster(SERVICE_NAME,
447: "farmUndeploy", new Object[] { parentName,
448: fileName }, new Class[] { String.class,
449: String.class }, true);
450: }
451: } catch (Exception ex) {
452: logException(ex);
453: }
454: }
455:
456: /**
457: * Go through the myriad of nested JMX exception to pull out the true
458: * exception if possible and log it.
459: *
460: * @param e The exception to be logged.
461: */
462: private void logException(Throwable e) {
463: if (e instanceof javax.management.RuntimeErrorException) {
464: e = ((javax.management.RuntimeErrorException) e)
465: .getTargetError();
466: } else if (e instanceof javax.management.RuntimeMBeanException) {
467: e = ((javax.management.RuntimeMBeanException) e)
468: .getTargetException();
469: } else if (e instanceof javax.management.RuntimeOperationsException) {
470: e = ((javax.management.RuntimeOperationsException) e)
471: .getTargetException();
472: } else if (e instanceof javax.management.MBeanException) {
473: e = ((javax.management.MBeanException) e)
474: .getTargetException();
475: } else if (e instanceof javax.management.ReflectionException) {
476: e = ((javax.management.ReflectionException) e)
477: .getTargetException();
478: }
479:
480: log.error(e);
481: }
482:
483: protected void createUnexistingLocalDir() {
484: if (this .urlList != null) {
485: Iterator iter = this .urlList.iterator();
486: while (iter.hasNext()) {
487: URL url = null;
488: try {
489: url = (URL) iter.next();
490: if (url.getProtocol().equals("file")) {
491: File targetDir = new File(url.getFile());
492: if (!targetDir.exists())
493: targetDir.mkdirs();
494: }
495: } catch (Exception e) {
496: log.info(
497: "Problem while creating a farm directory: "
498: + url, e);
499: }
500: }
501: }
502: }
503: }
|