001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.hdfs.server.blockmanagement;
019    
020    import java.util.ArrayList;
021    import java.util.Collection;
022    import java.util.Collections;
023    import java.util.HashMap;
024    import java.util.HashSet;
025    import java.util.Iterator;
026    import java.util.LinkedList;
027    import java.util.List;
028    import java.util.Map;
029    import java.util.Queue;
030    import java.util.Set;
031    
032    import com.google.common.annotations.VisibleForTesting;
033    
034    import org.apache.commons.logging.Log;
035    import org.apache.commons.logging.LogFactory;
036    import org.apache.hadoop.classification.InterfaceAudience;
037    import org.apache.hadoop.classification.InterfaceStability;
038    import org.apache.hadoop.hdfs.StorageType;
039    import org.apache.hadoop.hdfs.protocol.Block;
040    import org.apache.hadoop.hdfs.protocol.DatanodeID;
041    import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
042    import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
043    import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
044    import org.apache.hadoop.hdfs.server.protocol.StorageReport;
045    import org.apache.hadoop.hdfs.util.EnumCounters;
046    import org.apache.hadoop.hdfs.util.LightWeightHashSet;
047    import org.apache.hadoop.util.IntrusiveCollection;
048    import org.apache.hadoop.util.Time;
049    
050    import com.google.common.annotations.VisibleForTesting;
051    
052    /**
053     * This class extends the DatanodeInfo class with ephemeral information (eg
054     * health, capacity, what blocks are associated with the Datanode) that is
055     * private to the Namenode, ie this class is not exposed to clients.
056     */
057    @InterfaceAudience.Private
058    @InterfaceStability.Evolving
059    public class DatanodeDescriptor extends DatanodeInfo {
060      public static final Log LOG = LogFactory.getLog(DatanodeDescriptor.class);
061      public static final DatanodeDescriptor[] EMPTY_ARRAY = {};
062    
063      // Stores status of decommissioning.
064      // If node is not decommissioning, do not use this object for anything.
065      public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
066      
067      /** Block and targets pair */
068      @InterfaceAudience.Private
069      @InterfaceStability.Evolving
070      public static class BlockTargetPair {
071        public final Block block;
072        public final DatanodeStorageInfo[] targets;    
073    
074        BlockTargetPair(Block block, DatanodeStorageInfo[] targets) {
075          this.block = block;
076          this.targets = targets;
077        }
078      }
079    
080      /** A BlockTargetPair queue. */
081      private static class BlockQueue<E> {
082        private final Queue<E> blockq = new LinkedList<E>();
083    
084        /** Size of the queue */
085        synchronized int size() {return blockq.size();}
086    
087        /** Enqueue */
088        synchronized boolean offer(E e) { 
089          return blockq.offer(e);
090        }
091    
092        /** Dequeue */
093        synchronized List<E> poll(int numBlocks) {
094          if (numBlocks <= 0 || blockq.isEmpty()) {
095            return null;
096          }
097    
098          List<E> results = new ArrayList<E>();
099          for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) {
100            results.add(blockq.poll());
101          }
102          return results;
103        }
104    
105        /**
106         * Returns <tt>true</tt> if the queue contains the specified element.
107         */
108        boolean contains(E e) {
109          return blockq.contains(e);
110        }
111    
112        synchronized void clear() {
113          blockq.clear();
114        }
115      }
116    
117      private final Map<String, DatanodeStorageInfo> storageMap = 
118          new HashMap<String, DatanodeStorageInfo>();
119    
120      /**
121       * A list of CachedBlock objects on this datanode.
122       */
123      public static class CachedBlocksList extends IntrusiveCollection<CachedBlock> {
124        public enum Type {
125          PENDING_CACHED,
126          CACHED,
127          PENDING_UNCACHED
128        }
129    
130        private final DatanodeDescriptor datanode;
131    
132        private final Type type;
133    
134        CachedBlocksList(DatanodeDescriptor datanode, Type type) {
135          this.datanode = datanode;
136          this.type = type;
137        }
138    
139        public DatanodeDescriptor getDatanode() {
140          return datanode;
141        }
142    
143        public Type getType() {
144          return type;
145        }
146      }
147    
148      /**
149       * The blocks which we want to cache on this DataNode.
150       */
151      private final CachedBlocksList pendingCached = 
152          new CachedBlocksList(this, CachedBlocksList.Type.PENDING_CACHED);
153    
154      /**
155       * The blocks which we know are cached on this datanode.
156       * This list is updated by periodic cache reports.
157       */
158      private final CachedBlocksList cached = 
159          new CachedBlocksList(this, CachedBlocksList.Type.CACHED);
160    
161      /**
162       * The blocks which we want to uncache on this DataNode.
163       */
164      private final CachedBlocksList pendingUncached = 
165          new CachedBlocksList(this, CachedBlocksList.Type.PENDING_UNCACHED);
166    
167      public CachedBlocksList getPendingCached() {
168        return pendingCached;
169      }
170    
171      public CachedBlocksList getCached() {
172        return cached;
173      }
174    
175      public CachedBlocksList getPendingUncached() {
176        return pendingUncached;
177      }
178    
179      /**
180       * The time when the last batch of caching directives was sent, in
181       * monotonic milliseconds.
182       */
183      private long lastCachingDirectiveSentTimeMs;
184    
185      // isAlive == heartbeats.contains(this)
186      // This is an optimization, because contains takes O(n) time on Arraylist
187      public boolean isAlive = false;
188      public boolean needKeyUpdate = false;
189    
190      
191      // A system administrator can tune the balancer bandwidth parameter
192      // (dfs.balance.bandwidthPerSec) dynamically by calling
193      // "dfsadmin -setBalanacerBandwidth <newbandwidth>", at which point the
194      // following 'bandwidth' variable gets updated with the new value for each
195      // node. Once the heartbeat command is issued to update the value on the
196      // specified datanode, this value will be set back to 0.
197      private long bandwidth;
198    
199      /** A queue of blocks to be replicated by this datanode */
200      private final BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>();
201      /** A queue of blocks to be recovered by this datanode */
202      private final BlockQueue<BlockInfoUnderConstruction> recoverBlocks =
203                                    new BlockQueue<BlockInfoUnderConstruction>();
204      /** A set of blocks to be invalidated by this datanode */
205      private final LightWeightHashSet<Block> invalidateBlocks = new LightWeightHashSet<Block>();
206    
207      /* Variables for maintaining number of blocks scheduled to be written to
208       * this storage. This count is approximate and might be slightly bigger
209       * in case of errors (e.g. datanode does not report if an error occurs
210       * while writing the block).
211       */
212      private EnumCounters<StorageType> currApproxBlocksScheduled
213          = new EnumCounters<StorageType>(StorageType.class);
214      private EnumCounters<StorageType> prevApproxBlocksScheduled
215          = new EnumCounters<StorageType>(StorageType.class);
216      private long lastBlocksScheduledRollTime = 0;
217      private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
218      private int volumeFailures = 0;
219      
220      /** 
221       * When set to true, the node is not in include list and is not allowed
222       * to communicate with the namenode
223       */
224      private boolean disallowed = false;
225    
226      // HB processing can use it to tell if it is the first HB since DN restarted
227      private boolean heartbeatedSinceRegistration = false;
228    
229      // The number of replication work pending before targets are determined
230      private int PendingReplicationWithoutTargets = 0;
231    
232      /**
233       * DatanodeDescriptor constructor
234       * @param nodeID id of the data node
235       */
236      public DatanodeDescriptor(DatanodeID nodeID) {
237        super(nodeID);
238        updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0);
239      }
240    
241      /**
242       * DatanodeDescriptor constructor
243       * @param nodeID id of the data node
244       * @param networkLocation location of the data node in network
245       */
246      public DatanodeDescriptor(DatanodeID nodeID, 
247                                String networkLocation) {
248        super(nodeID, networkLocation);
249        updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0);
250      }
251    
252      @VisibleForTesting
253      public DatanodeStorageInfo getStorageInfo(String storageID) {
254        synchronized (storageMap) {
255          return storageMap.get(storageID);
256        }
257      }
258      DatanodeStorageInfo[] getStorageInfos() {
259        synchronized (storageMap) {
260          final Collection<DatanodeStorageInfo> storages = storageMap.values();
261          return storages.toArray(new DatanodeStorageInfo[storages.size()]);
262        }
263      }
264    
265      public StorageReport[] getStorageReports() {
266        final DatanodeStorageInfo[] infos = getStorageInfos();
267        final StorageReport[] reports = new StorageReport[infos.length];
268        for(int i = 0; i < infos.length; i++) {
269          reports[i] = infos[i].toStorageReport();
270        }
271        return reports;
272      }
273    
274      boolean hasStaleStorages() {
275        synchronized (storageMap) {
276          for (DatanodeStorageInfo storage : storageMap.values()) {
277            if (storage.areBlockContentsStale()) {
278              return true;
279            }
280          }
281          return false;
282        }
283      }
284    
285      /**
286       * Remove block from the list of blocks belonging to the data-node. Remove
287       * data-node from the block.
288       */
289      boolean removeBlock(BlockInfo b) {
290        final DatanodeStorageInfo s = b.findStorageInfo(this);
291        // if block exists on this datanode
292        if (s != null) {
293          return s.removeBlock(b);
294        }
295        return false;
296      }
297      
298      /**
299       * Remove block from the list of blocks belonging to the data-node. Remove
300       * data-node from the block.
301       */
302      boolean removeBlock(String storageID, BlockInfo b) {
303        DatanodeStorageInfo s = getStorageInfo(storageID);
304        if (s != null) {
305          return s.removeBlock(b);
306        }
307        return false;
308      }
309    
310      public void resetBlocks() {
311        setCapacity(0);
312        setRemaining(0);
313        setBlockPoolUsed(0);
314        setDfsUsed(0);
315        setXceiverCount(0);
316        this.invalidateBlocks.clear();
317        this.volumeFailures = 0;
318        // pendingCached, cached, and pendingUncached are protected by the
319        // FSN lock.
320        this.pendingCached.clear();
321        this.cached.clear();
322        this.pendingUncached.clear();
323      }
324      
325      public void clearBlockQueues() {
326        synchronized (invalidateBlocks) {
327          this.invalidateBlocks.clear();
328          this.recoverBlocks.clear();
329          this.replicateBlocks.clear();
330        }
331        // pendingCached, cached, and pendingUncached are protected by the
332        // FSN lock.
333        this.pendingCached.clear();
334        this.cached.clear();
335        this.pendingUncached.clear();
336      }
337    
338      public int numBlocks() {
339        int blocks = 0;
340        for (DatanodeStorageInfo entry : getStorageInfos()) {
341          blocks += entry.numBlocks();
342        }
343        return blocks;
344      }
345    
346      /**
347       * Updates stats from datanode heartbeat.
348       */
349      public void updateHeartbeat(StorageReport[] reports, long cacheCapacity,
350          long cacheUsed, int xceiverCount, int volFailures) {
351        updateHeartbeatState(reports, cacheCapacity, cacheUsed, xceiverCount,
352            volFailures);
353        heartbeatedSinceRegistration = true;
354      }
355    
356      /**
357       * process datanode heartbeat or stats initialization.
358       */
359      public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity,
360          long cacheUsed, int xceiverCount, int volFailures) {
361        long totalCapacity = 0;
362        long totalRemaining = 0;
363        long totalBlockPoolUsed = 0;
364        long totalDfsUsed = 0;
365        Set<DatanodeStorageInfo> failedStorageInfos = null;
366    
367        // Decide if we should check for any missing StorageReport and mark it as
368        // failed. There are different scenarios.
369        // 1. When DN is running, a storage failed. Given the current DN
370        //    implementation doesn't add recovered storage back to its storage list
371        //    until DN restart, we can assume volFailures won't decrease
372        //    during the current DN registration session.
373        //    When volumeFailures == this.volumeFailures, it implies there is no
374        //    state change. No need to check for failed storage. This is an
375        //    optimization.
376        // 2. After DN restarts, volFailures might not increase and it is possible
377        //    we still have new failed storage. For example, admins reduce
378        //    available storages in configuration. Another corner case
379        //    is the failed volumes might change after restart; a) there
380        //    is one good storage A, one restored good storage B, so there is
381        //    one element in storageReports and that is A. b) A failed. c) Before
382        //    DN sends HB to NN to indicate A has failed, DN restarts. d) After DN
383        //    restarts, storageReports has one element which is B.
384        boolean checkFailedStorages = (volFailures > this.volumeFailures) ||
385            !heartbeatedSinceRegistration;
386    
387        if (checkFailedStorages) {
388          LOG.info("Number of failed storage changes from "
389              + this.volumeFailures + " to " + volFailures);
390          failedStorageInfos = new HashSet<DatanodeStorageInfo>(
391              storageMap.values());
392        }
393    
394        setCacheCapacity(cacheCapacity);
395        setCacheUsed(cacheUsed);
396        setXceiverCount(xceiverCount);
397        setLastUpdate(Time.now());    
398        this.volumeFailures = volFailures;
399        for (StorageReport report : reports) {
400          DatanodeStorageInfo storage = updateStorage(report.getStorage());
401          if (checkFailedStorages) {
402            failedStorageInfos.remove(storage);
403          }
404    
405          storage.receivedHeartbeat(report);
406          totalCapacity += report.getCapacity();
407          totalRemaining += report.getRemaining();
408          totalBlockPoolUsed += report.getBlockPoolUsed();
409          totalDfsUsed += report.getDfsUsed();
410        }
411        rollBlocksScheduled(getLastUpdate());
412    
413        // Update total metrics for the node.
414        setCapacity(totalCapacity);
415        setRemaining(totalRemaining);
416        setBlockPoolUsed(totalBlockPoolUsed);
417        setDfsUsed(totalDfsUsed);
418        if (checkFailedStorages) {
419          updateFailedStorage(failedStorageInfos);
420        }
421      }
422    
423      private void updateFailedStorage(
424          Set<DatanodeStorageInfo> failedStorageInfos) {
425        for (DatanodeStorageInfo storageInfo : failedStorageInfos) {
426          if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
427            LOG.info(storageInfo + " failed.");
428            storageInfo.setState(DatanodeStorage.State.FAILED);
429          }
430        }
431      }
432    
433      private static class BlockIterator implements Iterator<BlockInfo> {
434        private int index = 0;
435        private final List<Iterator<BlockInfo>> iterators;
436        
437        private BlockIterator(final DatanodeStorageInfo... storages) {
438          List<Iterator<BlockInfo>> iterators = new ArrayList<Iterator<BlockInfo>>();
439          for (DatanodeStorageInfo e : storages) {
440            iterators.add(e.getBlockIterator());
441          }
442          this.iterators = Collections.unmodifiableList(iterators);
443        }
444    
445        @Override
446        public boolean hasNext() {
447          update();
448          return !iterators.isEmpty() && iterators.get(index).hasNext();
449        }
450    
451        @Override
452        public BlockInfo next() {
453          update();
454          return iterators.get(index).next();
455        }
456        
457        @Override
458        public void remove() {
459          throw new UnsupportedOperationException("Remove unsupported.");
460        }
461        
462        private void update() {
463          while(index < iterators.size() - 1 && !iterators.get(index).hasNext()) {
464            index++;
465          }
466        }
467      }
468    
469      Iterator<BlockInfo> getBlockIterator() {
470        return new BlockIterator(getStorageInfos());
471      }
472      Iterator<BlockInfo> getBlockIterator(final String storageID) {
473        return new BlockIterator(getStorageInfo(storageID));
474      }
475    
476      void incrementPendingReplicationWithoutTargets() {
477        PendingReplicationWithoutTargets++;
478      }
479    
480      void decrementPendingReplicationWithoutTargets() {
481        PendingReplicationWithoutTargets--;
482      }
483    
484      /**
485       * Store block replication work.
486       */
487      void addBlockToBeReplicated(Block block, DatanodeStorageInfo[] targets) {
488        assert(block != null && targets != null && targets.length > 0);
489        replicateBlocks.offer(new BlockTargetPair(block, targets));
490      }
491    
492      /**
493       * Store block recovery work.
494       */
495      void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
496        if(recoverBlocks.contains(block)) {
497          // this prevents adding the same block twice to the recovery queue
498          BlockManager.LOG.info(block + " is already in the recovery queue");
499          return;
500        }
501        recoverBlocks.offer(block);
502      }
503    
504      /**
505       * Store block invalidation work.
506       */
507      void addBlocksToBeInvalidated(List<Block> blocklist) {
508        assert(blocklist != null && blocklist.size() > 0);
509        synchronized (invalidateBlocks) {
510          for(Block blk : blocklist) {
511            invalidateBlocks.add(blk);
512          }
513        }
514      }
515    
516      /**
517       * The number of work items that are pending to be replicated
518       */
519      int getNumberOfBlocksToBeReplicated() {
520        return PendingReplicationWithoutTargets + replicateBlocks.size();
521      }
522    
523      /**
524       * The number of block invalidation items that are pending to 
525       * be sent to the datanode
526       */
527      int getNumberOfBlocksToBeInvalidated() {
528        synchronized (invalidateBlocks) {
529          return invalidateBlocks.size();
530        }
531      }
532    
533      public List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
534        return replicateBlocks.poll(maxTransfers);
535      }
536    
537      public BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) {
538        List<BlockInfoUnderConstruction> blocks = recoverBlocks.poll(maxTransfers);
539        if(blocks == null)
540          return null;
541        return blocks.toArray(new BlockInfoUnderConstruction[blocks.size()]);
542      }
543    
544      /**
545       * Remove the specified number of blocks to be invalidated
546       */
547      public Block[] getInvalidateBlocks(int maxblocks) {
548        synchronized (invalidateBlocks) {
549          Block[] deleteList = invalidateBlocks.pollToArray(new Block[Math.min(
550              invalidateBlocks.size(), maxblocks)]);
551          return deleteList.length == 0 ? null : deleteList;
552        }
553      }
554    
555      /**
556       * @return Approximate number of blocks currently scheduled to be written 
557       */
558      public long getRemaining(StorageType t) {
559        long remaining = 0;
560        for(DatanodeStorageInfo s : getStorageInfos()) {
561          if (s.getStorageType() == t) {
562            remaining += s.getRemaining();
563          }
564        }
565        return remaining;    
566      }
567    
568      /**
569       * @return Approximate number of blocks currently scheduled to be written 
570       * to the given storage type of this datanode.
571       */
572      public int getBlocksScheduled(StorageType t) {
573        return (int)(currApproxBlocksScheduled.get(t)
574            + prevApproxBlocksScheduled.get(t));
575      }
576    
577      /**
578       * @return Approximate number of blocks currently scheduled to be written 
579       * to this datanode.
580       */
581      public int getBlocksScheduled() {
582        return (int)(currApproxBlocksScheduled.sum()
583            + prevApproxBlocksScheduled.sum());
584      }
585    
586      /** Increment the number of blocks scheduled. */
587      void incrementBlocksScheduled(StorageType t) {
588        currApproxBlocksScheduled.add(t, 1);;
589      }
590      
591      /** Decrement the number of blocks scheduled. */
592      void decrementBlocksScheduled(StorageType t) {
593        if (prevApproxBlocksScheduled.get(t) > 0) {
594          prevApproxBlocksScheduled.subtract(t, 1);
595        } else if (currApproxBlocksScheduled.get(t) > 0) {
596          currApproxBlocksScheduled.subtract(t, 1);
597        } 
598        // its ok if both counters are zero.
599      }
600      
601      /** Adjusts curr and prev number of blocks scheduled every few minutes. */
602      private void rollBlocksScheduled(long now) {
603        if (now - lastBlocksScheduledRollTime > BLOCKS_SCHEDULED_ROLL_INTERVAL) {
604          prevApproxBlocksScheduled.set(currApproxBlocksScheduled);
605          currApproxBlocksScheduled.reset();
606          lastBlocksScheduledRollTime = now;
607        }
608      }
609      
610      @Override
611      public int hashCode() {
612        // Super implementation is sufficient
613        return super.hashCode();
614      }
615      
616      @Override
617      public boolean equals(Object obj) {
618        // Sufficient to use super equality as datanodes are uniquely identified
619        // by DatanodeID
620        return (this == obj) || super.equals(obj);
621      }
622    
623      /** Decommissioning status */
624      public class DecommissioningStatus {
625        private int underReplicatedBlocks;
626        private int decommissionOnlyReplicas;
627        private int underReplicatedInOpenFiles;
628        private long startTime;
629        
630        synchronized void set(int underRep,
631            int onlyRep, int underConstruction) {
632          if (isDecommissionInProgress() == false) {
633            return;
634          }
635          underReplicatedBlocks = underRep;
636          decommissionOnlyReplicas = onlyRep;
637          underReplicatedInOpenFiles = underConstruction;
638        }
639    
640        /** @return the number of under-replicated blocks */
641        public synchronized int getUnderReplicatedBlocks() {
642          if (isDecommissionInProgress() == false) {
643            return 0;
644          }
645          return underReplicatedBlocks;
646        }
647        /** @return the number of decommission-only replicas */
648        public synchronized int getDecommissionOnlyReplicas() {
649          if (isDecommissionInProgress() == false) {
650            return 0;
651          }
652          return decommissionOnlyReplicas;
653        }
654        /** @return the number of under-replicated blocks in open files */
655        public synchronized int getUnderReplicatedInOpenFiles() {
656          if (isDecommissionInProgress() == false) {
657            return 0;
658          }
659          return underReplicatedInOpenFiles;
660        }
661        /** Set start time */
662        public synchronized void setStartTime(long time) {
663          startTime = time;
664        }
665        /** @return start time */
666        public synchronized long getStartTime() {
667          if (isDecommissionInProgress() == false) {
668            return 0;
669          }
670          return startTime;
671        }
672      }  // End of class DecommissioningStatus
673    
674      /**
675       * Set the flag to indicate if this datanode is disallowed from communicating
676       * with the namenode.
677       */
678      public void setDisallowed(boolean flag) {
679        disallowed = flag;
680      }
681      /** Is the datanode disallowed from communicating with the namenode? */
682      public boolean isDisallowed() {
683        return disallowed;
684      }
685    
686      /**
687       * @return number of failed volumes in the datanode.
688       */
689      public int getVolumeFailures() {
690        return volumeFailures;
691      }
692    
693      /**
694       * @param nodeReg DatanodeID to update registration for.
695       */
696      @Override
697      public void updateRegInfo(DatanodeID nodeReg) {
698        super.updateRegInfo(nodeReg);
699        
700        // must re-process IBR after re-registration
701        for(DatanodeStorageInfo storage : getStorageInfos()) {
702          storage.setBlockReportCount(0);
703        }
704        heartbeatedSinceRegistration = false;
705      }
706    
707      /**
708       * @return balancer bandwidth in bytes per second for this datanode
709       */
710      public long getBalancerBandwidth() {
711        return this.bandwidth;
712      }
713    
714      /**
715       * @param bandwidth balancer bandwidth in bytes per second for this datanode
716       */
717      public void setBalancerBandwidth(long bandwidth) {
718        this.bandwidth = bandwidth;
719      }
720    
721      @Override
722      public String dumpDatanode() {
723        StringBuilder sb = new StringBuilder(super.dumpDatanode());
724        int repl = replicateBlocks.size();
725        if (repl > 0) {
726          sb.append(" ").append(repl).append(" blocks to be replicated;");
727        }
728        int inval = invalidateBlocks.size();
729        if (inval > 0) {
730          sb.append(" ").append(inval).append(" blocks to be invalidated;");      
731        }
732        int recover = recoverBlocks.size();
733        if (recover > 0) {
734          sb.append(" ").append(recover).append(" blocks to be recovered;");
735        }
736        return sb.toString();
737      }
738    
739      DatanodeStorageInfo updateStorage(DatanodeStorage s) {
740        synchronized (storageMap) {
741          DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
742          if (storage == null) {
743            LOG.info("Adding new storage ID " + s.getStorageID() +
744                     " for DN " + getXferAddr());
745            storage = new DatanodeStorageInfo(this, s);
746            storageMap.put(s.getStorageID(), storage);
747          } else if (storage.getState() != s.getState() ||
748                     storage.getStorageType() != s.getStorageType()) {
749            // For backwards compatibility, make sure that the type and
750            // state are updated. Some reports from older datanodes do
751            // not include these fields so we may have assumed defaults.
752            // This check can be removed in the next major release after
753            // 2.4.
754            storage.updateFromStorage(s);
755            storageMap.put(storage.getStorageID(), storage);
756          }
757          return storage;
758        }
759      }
760    
761      /**
762       * @return   The time at which we last sent caching directives to this 
763       *           DataNode, in monotonic milliseconds.
764       */
765      public long getLastCachingDirectiveSentTimeMs() {
766        return this.lastCachingDirectiveSentTimeMs;
767      }
768    
769      /**
770       * @param time  The time at which we last sent caching directives to this 
771       *              DataNode, in monotonic milliseconds.
772       */
773      public void setLastCachingDirectiveSentTimeMs(long time) {
774        this.lastCachingDirectiveSentTimeMs = time;
775      }
776      
777      /**
778       * checks whether atleast first block report has been received
779       * @return
780       */
781      public boolean checkBlockReportReceived() {
782        if(this.getStorageInfos().length == 0) {
783          return false;
784        }
785        for(DatanodeStorageInfo storageInfo: this.getStorageInfos()) {
786          if(storageInfo.getBlockReportCount() == 0 )
787            return false;
788        }
789        return true;
790     }
791    }
792