diff -rN -u old-trunk/src/allmydata/immutable/downloader/finder.py new-trunk/src/allmydata/immutable/downloader/finder.py
--- old-trunk/src/allmydata/immutable/downloader/finder.py	2010-09-07 00:58:36.000000000 -0600
+++ new-trunk/src/allmydata/immutable/downloader/finder.py	2010-09-07 00:58:42.000000000 -0600
@@ -49,14 +49,17 @@
         self._lp = log.msg(format="ShareFinder[si=%(si)s] starting",
                            si=self._si_prefix,
                            level=log.NOISY, parent=logparent, umid="2xjj2A")
+        log.msg("xxx %s.__init__(%s, %s, %s, %s, %s, %s)" % (self, storage_broker, verifycap, node, download_status, logparent, max_outstanding_requests))
 
     def update_num_segments(self):
+        log.msg("xxx %s.update_num_segments()" % (self,))
         (numsegs, authoritative) = self.node.get_num_segments()
         assert authoritative
         for cs in self._commonshares.values():
             cs.set_authoritative_num_segments(numsegs)
 
     def start_finding_servers(self):
+        log.msg("xxx %s.start_finding_servers()" % (self,))
         # don't get servers until somebody uses us: creating the
         # ImmutableFileNode should not cause work to happen yet. Test case is
         # test_dirnode, which creates us with storage_broker=None
@@ -72,6 +75,7 @@
         return log.msg(*args, **kwargs)
 
     def stop(self):
+        log.msg("xxx %s.stop()" % (self,))
         self.running = False
         while self.overdue_timers:
             req,t = self.overdue_timers.popitem()
@@ -79,6 +83,7 @@
 
     # called by our parent CiphertextDownloader
     def hungry(self):
+        log.msg("xxx %s.hungry()" % (self,))
         self.log(format="ShareFinder[si=%(si)s] hungry",
                  si=self._si_prefix, level=log.NOISY, umid="NywYaQ")
         self.start_finding_servers()
@@ -87,6 +92,7 @@
 
     # internal methods
     def loop(self):
+        log.msg("xxx %s.loop()" % (self,))
         pending_s = ",".join([idlib.shortnodeid_b2a(rt.peerid)
                               for rt in self.pending_requests]) # sort?
         self.log(format="ShareFinder loop: running=%(running)s"
@@ -114,6 +120,7 @@
             self.send_request(server)
             # we loop again to get parallel queries. The check above will
             # prevent us from looping forever.
+            log.msg("xxx %s.loop() => loop again to get parallel queries" % (self,))
             eventually(self.loop)
             return
 
@@ -130,6 +137,7 @@
         self.share_consumer.no_more_shares()
 
     def send_request(self, server):
+        log.msg("xxx %s.send_request(%s)" % (self, server))
         peerid, rref = server
         req = RequestToken(peerid)
         self.pending_requests.add(req)
@@ -152,6 +160,7 @@
         d.addCallback(incidentally, eventually, self.loop)
 
     def _request_retired(self, req):
+        log.msg("xxx %s._request_retired(%s)" % (self, req))
         self.pending_requests.discard(req)
         self.overdue_requests.discard(req)
         if req in self.overdue_timers:
@@ -159,6 +168,7 @@
             del self.overdue_timers[req]
 
     def overdue(self, req):
+        log.msg("xxx %s.overdue(%s)" % (self, req))
         del self.overdue_timers[req]
         assert req in self.pending_requests # paranoia, should never be false
         self.overdue_requests.add(req)
@@ -166,6 +176,7 @@
 
     def _got_response(self, buckets, server_version, peerid, req, d_ev,
                       time_sent, lp):
+        log.msg("xxx %s._got_response(%s, %s, %s, %s, %s, %s, %s)" % (self, buckets, server_version, peerid, req, d_ev, time_sent, lp))
         shnums = sorted([shnum for shnum in buckets])
         time_received = now()
         d_ev.finished(shnums, time_received)
@@ -187,6 +198,7 @@
         self._deliver_shares(shares)
 
     def _create_share(self, shnum, bucket, server_version, peerid, dyhb_rtt):
+        log.msg("xxx %s._create_share(%s, %s, %s, %s, %s)" % (self, shnum, bucket, server_version, peerid, dyhb_rtt))
         if shnum in self._commonshares:
             cs = self._commonshares[shnum]
         else:
@@ -215,6 +227,7 @@
         return s
 
     def _deliver_shares(self, shares):
+        log.msg("xxx %s._deliver_shares(%s)" % (self, shares))
         # they will call hungry() again if they want more
         self._hungry = False
         shares_s = ",".join([str(sh) for sh in shares])
@@ -223,9 +236,8 @@
         eventually(self.share_consumer.got_shares, shares)
 
     def _got_error(self, f, peerid, req, d_ev, lp):
+        log.msg("xxx %s._got_error(%s, %s, %s, %s, %s)" % (self, f, peerid, req, d_ev, lp))
         d_ev.finished("error", now())
         self.log(format="got error from [%(peerid)s]",
                  peerid=idlib.shortnodeid_b2a(peerid), failure=f,
                  level=log.UNUSUAL, parent=lp, umid="zUKdCw")
-
-
diff -rN -u old-trunk/src/allmydata/immutable/downloader/node.py new-trunk/src/allmydata/immutable/downloader/node.py
--- old-trunk/src/allmydata/immutable/downloader/node.py	2010-09-07 00:58:36.000000000 -0600
+++ new-trunk/src/allmydata/immutable/downloader/node.py	2010-09-07 00:58:42.000000000 -0600
@@ -221,10 +221,12 @@
 
     # called by our child ShareFinder
     def got_shares(self, shares):
+        log.msg("xxx %s.got_shares(%s)" % (self, shares))
         self._shares.update(shares)
         if self._active_segment:
             self._active_segment.add_shares(shares)
     def no_more_shares(self):
+        log.msg("xxx %s.no_more_shares() ; _active_segment: %s" % (self, self._active_segment))
         self._no_more_shares = True
         if self._active_segment:
             self._active_segment.no_more_shares()
diff -rN -u old-trunk/src/allmydata/immutable/downloader/share.py new-trunk/src/allmydata/immutable/downloader/share.py
--- old-trunk/src/allmydata/immutable/downloader/share.py	2010-09-07 00:58:36.000000000 -0600
+++ new-trunk/src/allmydata/immutable/downloader/share.py	2010-09-07 00:58:42.000000000 -0600
@@ -83,6 +83,7 @@
 
         self._requested_blocks = [] # (segnum, set(observer2..))
         ver = server_version["http://allmydata.org/tahoe/protocols/storage/v1"]
+        log.msg( "zzz ver: %r" % (ver,))
         self._overrun_ok = ver["tolerates-immutable-read-overrun"]
         # If _overrun_ok and we guess the offsets correctly, we can get
         # everything in one RTT. If _overrun_ok and we guess wrong, we might
diff -rN -u old-trunk/src/allmydata/test/test_immutable.py new-trunk/src/allmydata/test/test_immutable.py
--- old-trunk/src/allmydata/test/test_immutable.py	2010-09-07 00:58:38.000000000 -0600
+++ new-trunk/src/allmydata/test/test_immutable.py	2010-09-07 00:58:43.000000000 -0600
@@ -1,10 +1,83 @@
 from allmydata.test import common
 from allmydata.interfaces import NotEnoughSharesError
 from allmydata.util.consumer import download_to_data
+from allmydata import uri
 from twisted.internet import defer
 from twisted.trial import unittest
 import random
 
+from foolscap.api import eventually
+from allmydata.util import log
+
+from allmydata.immutable.downloader import finder
+
+import mock
+
+class TestShareFinder(unittest.TestCase):
+    def test_sharefinder_last_request_provides_last_share(self):
+        # ticket #1191
+
+        # Suppose that K=2 and you send two DYHB requests, the first
+        # response offers one share, and then the second offers one
+        # share. Don't give up after you've received the second DYHB
+        # response and before you've realized that the response
+        # contains an offer of a share.
+
+        rcap = uri.CHKFileURI('a'*32, 'a'*32, 2, 99, 100)
+        vcap = rcap.get_verify_cap()
+
+        class MockServer(object):
+            def __init__(self, buckets):
+                self.version = {
+                    'http://allmydata.org/tahoe/protocols/storage/v1': {
+                        "tolerates-immutable-read-overrun": True
+                        }
+                    }
+                self.buckets = buckets
+            def callRemote(self, methname, *args, **kwargs):
+                log.msg("yyy 2 %s.callRemote(%s, %s, %s)" % (self, methname, args, kwargs))
+                d = defer.Deferred()
+                eventually(eventually, d.callback, self.buckets)
+                return d
+
+        mockserver1 = MockServer({1: mock.Mock()})
+        mockserver2 = MockServer({2: mock.Mock()})
+        mockstoragebroker = mock.Mock()
+        mockstoragebroker.get_servers_for_index.return_value = [ ('ms1', mockserver1), ('ms2', mockserver2) ]
+        mockdownloadstatus = mock.Mock()
+        mocknode = mock.Mock()
+        class MockNode(object):
+            def __init__(self, testcase):
+                self.testcase = testcase
+                self.got = 0
+                self.finished_d = defer.Deferred()
+                self.got_shares_d = defer.Deferred()
+                self.segment_size = 78
+                self.guessed_segment_size = 78
+            def when_finished(self):
+                return self.finished_d
+            def when_got_shares(self):
+                return self.got_shares_d
+            def get_num_segments(self):
+                return (2, True)
+            def _calculate_sizes(self, guessed_segment_size):
+                return {'block_size': 3, 'num_segments': 2}
+            def no_more_shares(self):
+                self.testcase.fail("The node was told by the share finder that it is destined to remain hungry.")
+            def got_shares(self, shares):
+                self.got += 1
+                log.msg("yyy 3 %s.got_shares(%s) got: %s" % (self, shares, self.got))
+                if self.got == 1:
+                    self.got_shares_d.callback(None)
+                elif self.got == 2:
+                    self.finished_d.callback(None)
+        mocknode = MockNode(self)
+
+        s = finder.ShareFinder(mockstoragebroker, vcap, mocknode, mockdownloadstatus)
+        s.hungry()
+
+        return mocknode.when_finished()
+
 class Test(common.ShareManglingMixin, common.ShouldFailMixin, unittest.TestCase):
     def test_test_code(self):
         # The following process of stashing the shares, running
