| 1 | """ |
|---|
| 2 | Ported to Python 3. |
|---|
| 3 | """ |
|---|
| 4 | |
|---|
| 5 | import os, base64 |
|---|
| 6 | from ..common import AsyncTestCase |
|---|
| 7 | from testtools.matchers import HasLength |
|---|
| 8 | from twisted.internet import defer |
|---|
| 9 | from foolscap.logging import log |
|---|
| 10 | from allmydata import uri |
|---|
| 11 | from allmydata.crypto import rsa |
|---|
| 12 | from allmydata.interfaces import NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION |
|---|
| 13 | from allmydata.util import fileutil |
|---|
| 14 | from allmydata.util.hashutil import ssk_writekey_hash, ssk_pubkey_fingerprint_hash |
|---|
| 15 | from allmydata.mutable.common import \ |
|---|
| 16 | MODE_CHECK, MODE_WRITE, MODE_READ, \ |
|---|
| 17 | UncoordinatedWriteError, \ |
|---|
| 18 | NotEnoughServersError |
|---|
| 19 | from allmydata.mutable.publish import MutableData |
|---|
| 20 | from allmydata.storage.common import storage_index_to_dir |
|---|
| 21 | from ..no_network import GridTestMixin |
|---|
| 22 | from .. import common_util as testutil |
|---|
| 23 | from ..common_util import DevNullDictionary |
|---|
| 24 | |
|---|
| 25 | class SameKeyGenerator: |
|---|
| 26 | def __init__(self, pubkey, privkey): |
|---|
| 27 | self.pubkey = pubkey |
|---|
| 28 | self.privkey = privkey |
|---|
| 29 | def generate(self, keysize=None): |
|---|
| 30 | return defer.succeed( (self.pubkey, self.privkey) ) |
|---|
| 31 | |
|---|
| 32 | class FirstServerGetsKilled: |
|---|
| 33 | done = False |
|---|
| 34 | def notify(self, retval, wrapper, methname): |
|---|
| 35 | if not self.done: |
|---|
| 36 | wrapper.broken = True |
|---|
| 37 | self.done = True |
|---|
| 38 | return retval |
|---|
| 39 | |
|---|
| 40 | class FirstServerGetsDeleted: |
|---|
| 41 | def __init__(self): |
|---|
| 42 | self.done = False |
|---|
| 43 | self.silenced = None |
|---|
| 44 | def notify(self, retval, wrapper, methname): |
|---|
| 45 | if not self.done: |
|---|
| 46 | # this query will work, but later queries should think the share |
|---|
| 47 | # has been deleted |
|---|
| 48 | self.done = True |
|---|
| 49 | self.silenced = wrapper |
|---|
| 50 | return retval |
|---|
| 51 | if wrapper == self.silenced: |
|---|
| 52 | assert methname == "slot_testv_and_readv_and_writev" |
|---|
| 53 | return (True, {}) |
|---|
| 54 | return retval |
|---|
| 55 | |
|---|
| 56 | class Problems(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin): |
|---|
| 57 | def do_publish_surprise(self, version): |
|---|
| 58 | self.basedir = "mutable/Problems/test_publish_surprise_%s" % version |
|---|
| 59 | self.set_up_grid() |
|---|
| 60 | nm = self.g.clients[0].nodemaker |
|---|
| 61 | d = nm.create_mutable_file(MutableData(b"contents 1"), |
|---|
| 62 | version=version) |
|---|
| 63 | def _created(n): |
|---|
| 64 | d = defer.succeed(None) |
|---|
| 65 | d.addCallback(lambda res: n.get_servermap(MODE_WRITE)) |
|---|
| 66 | def _got_smap1(smap): |
|---|
| 67 | # stash the old state of the file |
|---|
| 68 | self.old_map = smap |
|---|
| 69 | d.addCallback(_got_smap1) |
|---|
| 70 | # then modify the file, leaving the old map untouched |
|---|
| 71 | d.addCallback(lambda res: log.msg("starting winning write")) |
|---|
| 72 | d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) |
|---|
| 73 | # now attempt to modify the file with the old servermap. This |
|---|
| 74 | # will look just like an uncoordinated write, in which every |
|---|
| 75 | # single share got updated between our mapupdate and our publish |
|---|
| 76 | d.addCallback(lambda res: log.msg("starting doomed write")) |
|---|
| 77 | d.addCallback(lambda res: |
|---|
| 78 | self.shouldFail(UncoordinatedWriteError, |
|---|
| 79 | "test_publish_surprise", None, |
|---|
| 80 | n.upload, |
|---|
| 81 | MutableData(b"contents 2a"), self.old_map)) |
|---|
| 82 | return d |
|---|
| 83 | d.addCallback(_created) |
|---|
| 84 | return d |
|---|
| 85 | |
|---|
| 86 | def test_publish_surprise_sdmf(self): |
|---|
| 87 | return self.do_publish_surprise(SDMF_VERSION) |
|---|
| 88 | |
|---|
| 89 | def test_publish_surprise_mdmf(self): |
|---|
| 90 | return self.do_publish_surprise(MDMF_VERSION) |
|---|
| 91 | |
|---|
| 92 | def test_retrieve_surprise(self): |
|---|
| 93 | self.basedir = "mutable/Problems/test_retrieve_surprise" |
|---|
| 94 | self.set_up_grid() |
|---|
| 95 | nm = self.g.clients[0].nodemaker |
|---|
| 96 | d = nm.create_mutable_file(MutableData(b"contents 1"*4000)) |
|---|
| 97 | def _created(n): |
|---|
| 98 | d = defer.succeed(None) |
|---|
| 99 | d.addCallback(lambda res: n.get_servermap(MODE_READ)) |
|---|
| 100 | def _got_smap1(smap): |
|---|
| 101 | # stash the old state of the file |
|---|
| 102 | self.old_map = smap |
|---|
| 103 | d.addCallback(_got_smap1) |
|---|
| 104 | # then modify the file, leaving the old map untouched |
|---|
| 105 | d.addCallback(lambda res: log.msg("starting winning write")) |
|---|
| 106 | d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) |
|---|
| 107 | # now attempt to retrieve the old version with the old servermap. |
|---|
| 108 | # This will look like someone has changed the file since we |
|---|
| 109 | # updated the servermap. |
|---|
| 110 | d.addCallback(lambda res: log.msg("starting doomed read")) |
|---|
| 111 | d.addCallback(lambda res: |
|---|
| 112 | self.shouldFail(NotEnoughSharesError, |
|---|
| 113 | "test_retrieve_surprise", |
|---|
| 114 | "ran out of servers: have 0 of 1", |
|---|
| 115 | n.download_version, |
|---|
| 116 | self.old_map, |
|---|
| 117 | self.old_map.best_recoverable_version(), |
|---|
| 118 | )) |
|---|
| 119 | return d |
|---|
| 120 | d.addCallback(_created) |
|---|
| 121 | return d |
|---|
| 122 | |
|---|
| 123 | |
|---|
| 124 | def test_unexpected_shares(self): |
|---|
| 125 | # upload the file, take a servermap, shut down one of the servers, |
|---|
| 126 | # upload it again (causing shares to appear on a new server), then |
|---|
| 127 | # upload using the old servermap. The last upload should fail with an |
|---|
| 128 | # UncoordinatedWriteError, because of the shares that didn't appear |
|---|
| 129 | # in the servermap. |
|---|
| 130 | self.basedir = "mutable/Problems/test_unexpected_shares" |
|---|
| 131 | self.set_up_grid() |
|---|
| 132 | nm = self.g.clients[0].nodemaker |
|---|
| 133 | d = nm.create_mutable_file(MutableData(b"contents 1")) |
|---|
| 134 | def _created(n): |
|---|
| 135 | d = defer.succeed(None) |
|---|
| 136 | d.addCallback(lambda res: n.get_servermap(MODE_WRITE)) |
|---|
| 137 | def _got_smap1(smap): |
|---|
| 138 | # stash the old state of the file |
|---|
| 139 | self.old_map = smap |
|---|
| 140 | # now shut down one of the servers |
|---|
| 141 | peer0 = list(smap.make_sharemap()[0])[0].get_serverid() |
|---|
| 142 | self.g.remove_server(peer0) |
|---|
| 143 | # then modify the file, leaving the old map untouched |
|---|
| 144 | log.msg("starting winning write") |
|---|
| 145 | return n.overwrite(MutableData(b"contents 2")) |
|---|
| 146 | d.addCallback(_got_smap1) |
|---|
| 147 | # now attempt to modify the file with the old servermap. This |
|---|
| 148 | # will look just like an uncoordinated write, in which every |
|---|
| 149 | # single share got updated between our mapupdate and our publish |
|---|
| 150 | d.addCallback(lambda res: log.msg("starting doomed write")) |
|---|
| 151 | d.addCallback(lambda res: |
|---|
| 152 | self.shouldFail(UncoordinatedWriteError, |
|---|
| 153 | "test_surprise", None, |
|---|
| 154 | n.upload, |
|---|
| 155 | MutableData(b"contents 2a"), self.old_map)) |
|---|
| 156 | return d |
|---|
| 157 | d.addCallback(_created) |
|---|
| 158 | return d |
|---|
| 159 | |
|---|
| 160 | def test_multiply_placed_shares(self): |
|---|
| 161 | self.basedir = "mutable/Problems/test_multiply_placed_shares" |
|---|
| 162 | self.set_up_grid() |
|---|
| 163 | nm = self.g.clients[0].nodemaker |
|---|
| 164 | d = nm.create_mutable_file(MutableData(b"contents 1")) |
|---|
| 165 | # remove one of the servers and reupload the file. |
|---|
| 166 | def _created(n): |
|---|
| 167 | self._node = n |
|---|
| 168 | |
|---|
| 169 | servers = self.g.get_all_serverids() |
|---|
| 170 | self.ss = self.g.remove_server(servers[len(servers)-1]) |
|---|
| 171 | |
|---|
| 172 | new_server = self.g.make_server(len(servers)-1) |
|---|
| 173 | self.g.add_server(len(servers)-1, new_server) |
|---|
| 174 | |
|---|
| 175 | return self._node.download_best_version() |
|---|
| 176 | d.addCallback(_created) |
|---|
| 177 | d.addCallback(lambda data: MutableData(data)) |
|---|
| 178 | d.addCallback(lambda data: self._node.overwrite(data)) |
|---|
| 179 | |
|---|
| 180 | # restore the server we removed earlier, then download+upload |
|---|
| 181 | # the file again |
|---|
| 182 | def _overwritten(ign): |
|---|
| 183 | self.g.add_server(len(self.g.servers_by_number), self.ss) |
|---|
| 184 | return self._node.download_best_version() |
|---|
| 185 | d.addCallback(_overwritten) |
|---|
| 186 | d.addCallback(lambda data: MutableData(data)) |
|---|
| 187 | d.addCallback(lambda data: self._node.overwrite(data)) |
|---|
| 188 | d.addCallback(lambda ignored: |
|---|
| 189 | self._node.get_servermap(MODE_CHECK)) |
|---|
| 190 | def _overwritten_again(smap): |
|---|
| 191 | # Make sure that all shares were updated by making sure that |
|---|
| 192 | # there aren't any other versions in the sharemap. |
|---|
| 193 | self.assertThat(smap.recoverable_versions(), HasLength(1)) |
|---|
| 194 | self.assertThat(smap.unrecoverable_versions(), HasLength(0)) |
|---|
| 195 | d.addCallback(_overwritten_again) |
|---|
| 196 | return d |
|---|
| 197 | |
|---|
| 198 | def test_bad_server(self): |
|---|
| 199 | # Break one server, then create the file: the initial publish should |
|---|
| 200 | # complete with an alternate server. Breaking a second server should |
|---|
| 201 | # not prevent an update from succeeding either. |
|---|
| 202 | self.basedir = "mutable/Problems/test_bad_server" |
|---|
| 203 | self.set_up_grid() |
|---|
| 204 | nm = self.g.clients[0].nodemaker |
|---|
| 205 | |
|---|
| 206 | # to make sure that one of the initial peers is broken, we have to |
|---|
| 207 | # get creative. We create an RSA key and compute its storage-index. |
|---|
| 208 | # Then we make a KeyGenerator that always returns that one key, and |
|---|
| 209 | # use it to create the mutable file. This will get easier when we can |
|---|
| 210 | # use #467 static-server-selection to disable permutation and force |
|---|
| 211 | # the choice of server for share[0]. |
|---|
| 212 | |
|---|
| 213 | d = nm.key_generator.generate() |
|---|
| 214 | def _got_key(keypair): |
|---|
| 215 | (pubkey, privkey) = keypair |
|---|
| 216 | nm.key_generator = SameKeyGenerator(pubkey, privkey) |
|---|
| 217 | pubkey_s = rsa.der_string_from_verifying_key(pubkey) |
|---|
| 218 | privkey_s = rsa.der_string_from_signing_key(privkey) |
|---|
| 219 | u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s), |
|---|
| 220 | ssk_pubkey_fingerprint_hash(pubkey_s)) |
|---|
| 221 | self._storage_index = u.get_storage_index() |
|---|
| 222 | d.addCallback(_got_key) |
|---|
| 223 | def _break_peer0(res): |
|---|
| 224 | si = self._storage_index |
|---|
| 225 | servers = nm.storage_broker.get_servers_for_psi(si) |
|---|
| 226 | self.g.break_server(servers[0].get_serverid()) |
|---|
| 227 | self.server1 = servers[1] |
|---|
| 228 | d.addCallback(_break_peer0) |
|---|
| 229 | # now "create" the file, using the pre-established key, and let the |
|---|
| 230 | # initial publish finally happen |
|---|
| 231 | d.addCallback(lambda res: nm.create_mutable_file(MutableData(b"contents 1"))) |
|---|
| 232 | # that ought to work |
|---|
| 233 | def _got_node(n): |
|---|
| 234 | d = n.download_best_version() |
|---|
| 235 | d.addCallback(lambda res: self.assertEqual(res, b"contents 1")) |
|---|
| 236 | # now break the second peer |
|---|
| 237 | def _break_peer1(res): |
|---|
| 238 | self.g.break_server(self.server1.get_serverid()) |
|---|
| 239 | d.addCallback(_break_peer1) |
|---|
| 240 | d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) |
|---|
| 241 | # that ought to work too |
|---|
| 242 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 243 | d.addCallback(lambda res: self.assertEqual(res, b"contents 2")) |
|---|
| 244 | def _explain_error(f): |
|---|
| 245 | print(f) |
|---|
| 246 | if f.check(NotEnoughServersError): |
|---|
| 247 | print("first_error:", f.value.first_error) |
|---|
| 248 | return f |
|---|
| 249 | d.addErrback(_explain_error) |
|---|
| 250 | return d |
|---|
| 251 | d.addCallback(_got_node) |
|---|
| 252 | return d |
|---|
| 253 | |
|---|
| 254 | def test_bad_server_overlap(self): |
|---|
| 255 | # like test_bad_server, but with no extra unused servers to fall back |
|---|
| 256 | # upon. This means that we must re-use a server which we've already |
|---|
| 257 | # used. If we don't remember the fact that we sent them one share |
|---|
| 258 | # already, we'll mistakenly think we're experiencing an |
|---|
| 259 | # UncoordinatedWriteError. |
|---|
| 260 | |
|---|
| 261 | # Break one server, then create the file: the initial publish should |
|---|
| 262 | # complete with an alternate server. Breaking a second server should |
|---|
| 263 | # not prevent an update from succeeding either. |
|---|
| 264 | self.basedir = "mutable/Problems/test_bad_server_overlap" |
|---|
| 265 | self.set_up_grid() |
|---|
| 266 | nm = self.g.clients[0].nodemaker |
|---|
| 267 | sb = nm.storage_broker |
|---|
| 268 | |
|---|
| 269 | peerids = [s.get_serverid() for s in sb.get_connected_servers()] |
|---|
| 270 | self.g.break_server(peerids[0]) |
|---|
| 271 | |
|---|
| 272 | d = nm.create_mutable_file(MutableData(b"contents 1")) |
|---|
| 273 | def _created(n): |
|---|
| 274 | d = n.download_best_version() |
|---|
| 275 | d.addCallback(lambda res: self.assertEqual(res, b"contents 1")) |
|---|
| 276 | # now break one of the remaining servers |
|---|
| 277 | def _break_second_server(res): |
|---|
| 278 | self.g.break_server(peerids[1]) |
|---|
| 279 | d.addCallback(_break_second_server) |
|---|
| 280 | d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) |
|---|
| 281 | # that ought to work too |
|---|
| 282 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 283 | d.addCallback(lambda res: self.assertEqual(res, b"contents 2")) |
|---|
| 284 | return d |
|---|
| 285 | d.addCallback(_created) |
|---|
| 286 | return d |
|---|
| 287 | |
|---|
| 288 | def test_publish_all_servers_bad(self): |
|---|
| 289 | # Break all servers: the publish should fail |
|---|
| 290 | self.basedir = "mutable/Problems/test_publish_all_servers_bad" |
|---|
| 291 | self.set_up_grid() |
|---|
| 292 | nm = self.g.clients[0].nodemaker |
|---|
| 293 | for s in nm.storage_broker.get_connected_servers(): |
|---|
| 294 | s.get_rref().broken = True |
|---|
| 295 | |
|---|
| 296 | d = self.shouldFail(NotEnoughServersError, |
|---|
| 297 | "test_publish_all_servers_bad", |
|---|
| 298 | "ran out of good servers", |
|---|
| 299 | nm.create_mutable_file, MutableData(b"contents")) |
|---|
| 300 | return d |
|---|
| 301 | |
|---|
| 302 | def test_publish_no_servers(self): |
|---|
| 303 | # no servers at all: the publish should fail |
|---|
| 304 | self.basedir = "mutable/Problems/test_publish_no_servers" |
|---|
| 305 | self.set_up_grid(num_servers=0) |
|---|
| 306 | nm = self.g.clients[0].nodemaker |
|---|
| 307 | |
|---|
| 308 | d = self.shouldFail(NotEnoughServersError, |
|---|
| 309 | "test_publish_no_servers", |
|---|
| 310 | "Ran out of non-bad servers", |
|---|
| 311 | nm.create_mutable_file, MutableData(b"contents")) |
|---|
| 312 | return d |
|---|
| 313 | |
|---|
| 314 | |
|---|
| 315 | def test_privkey_query_error(self): |
|---|
| 316 | # when a servermap is updated with MODE_WRITE, it tries to get the |
|---|
| 317 | # privkey. Something might go wrong during this query attempt. |
|---|
| 318 | # Exercise the code in _privkey_query_failed which tries to handle |
|---|
| 319 | # such an error. |
|---|
| 320 | self.basedir = "mutable/Problems/test_privkey_query_error" |
|---|
| 321 | self.set_up_grid(num_servers=20) |
|---|
| 322 | nm = self.g.clients[0].nodemaker |
|---|
| 323 | nm._node_cache = DevNullDictionary() # disable the nodecache |
|---|
| 324 | |
|---|
| 325 | # we need some contents that are large enough to push the privkey out |
|---|
| 326 | # of the early part of the file |
|---|
| 327 | LARGE = b"These are Larger contents" * 2000 # about 50KB |
|---|
| 328 | LARGE_uploadable = MutableData(LARGE) |
|---|
| 329 | d = nm.create_mutable_file(LARGE_uploadable) |
|---|
| 330 | def _created(n): |
|---|
| 331 | self.uri = n.get_uri() |
|---|
| 332 | self.n2 = nm.create_from_cap(self.uri) |
|---|
| 333 | |
|---|
| 334 | # When a mapupdate is performed on a node that doesn't yet know |
|---|
| 335 | # the privkey, a short read is sent to a batch of servers, to get |
|---|
| 336 | # the verinfo and (hopefully, if the file is short enough) the |
|---|
| 337 | # encprivkey. Our file is too large to let this first read |
|---|
| 338 | # contain the encprivkey. Each non-encprivkey-bearing response |
|---|
| 339 | # that arrives (until the node gets the encprivkey) will trigger |
|---|
| 340 | # a second read to specifically read the encprivkey. |
|---|
| 341 | # |
|---|
| 342 | # So, to exercise this case: |
|---|
| 343 | # 1. notice which server gets a read() call first |
|---|
| 344 | # 2. tell that server to start throwing errors |
|---|
| 345 | killer = FirstServerGetsKilled() |
|---|
| 346 | for s in nm.storage_broker.get_connected_servers(): |
|---|
| 347 | s.get_rref().post_call_notifier = killer.notify |
|---|
| 348 | d.addCallback(_created) |
|---|
| 349 | |
|---|
| 350 | # now we update a servermap from a new node (which doesn't have the |
|---|
| 351 | # privkey yet, forcing it to use a separate privkey query). Note that |
|---|
| 352 | # the map-update will succeed, since we'll just get a copy from one |
|---|
| 353 | # of the other shares. |
|---|
| 354 | d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE)) |
|---|
| 355 | |
|---|
| 356 | return d |
|---|
| 357 | |
|---|
| 358 | def test_privkey_query_missing(self): |
|---|
| 359 | # like test_privkey_query_error, but the shares are deleted by the |
|---|
| 360 | # second query, instead of raising an exception. |
|---|
| 361 | self.basedir = "mutable/Problems/test_privkey_query_missing" |
|---|
| 362 | self.set_up_grid(num_servers=20) |
|---|
| 363 | nm = self.g.clients[0].nodemaker |
|---|
| 364 | LARGE = b"These are Larger contents" * 2000 # about 50KiB |
|---|
| 365 | LARGE_uploadable = MutableData(LARGE) |
|---|
| 366 | nm._node_cache = DevNullDictionary() # disable the nodecache |
|---|
| 367 | |
|---|
| 368 | d = nm.create_mutable_file(LARGE_uploadable) |
|---|
| 369 | def _created(n): |
|---|
| 370 | self.uri = n.get_uri() |
|---|
| 371 | self.n2 = nm.create_from_cap(self.uri) |
|---|
| 372 | deleter = FirstServerGetsDeleted() |
|---|
| 373 | for s in nm.storage_broker.get_connected_servers(): |
|---|
| 374 | s.get_rref().post_call_notifier = deleter.notify |
|---|
| 375 | d.addCallback(_created) |
|---|
| 376 | d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE)) |
|---|
| 377 | return d |
|---|
| 378 | |
|---|
| 379 | |
|---|
| 380 | def test_block_and_hash_query_error(self): |
|---|
| 381 | # This tests for what happens when a query to a remote server |
|---|
| 382 | # fails in either the hash validation step or the block getting |
|---|
| 383 | # step (because of batching, this is the same actual query). |
|---|
| 384 | # We need to have the storage server persist up until the point |
|---|
| 385 | # that its prefix is validated, then suddenly die. This |
|---|
| 386 | # exercises some exception handling code in Retrieve. |
|---|
| 387 | self.basedir = "mutable/Problems/test_block_and_hash_query_error" |
|---|
| 388 | self.set_up_grid(num_servers=20) |
|---|
| 389 | nm = self.g.clients[0].nodemaker |
|---|
| 390 | CONTENTS = b"contents" * 2000 |
|---|
| 391 | CONTENTS_uploadable = MutableData(CONTENTS) |
|---|
| 392 | d = nm.create_mutable_file(CONTENTS_uploadable) |
|---|
| 393 | def _created(node): |
|---|
| 394 | self._node = node |
|---|
| 395 | d.addCallback(_created) |
|---|
| 396 | d.addCallback(lambda ignored: |
|---|
| 397 | self._node.get_servermap(MODE_READ)) |
|---|
| 398 | def _then(servermap): |
|---|
| 399 | # we have our servermap. Now we set up the servers like the |
|---|
| 400 | # tests above -- the first one that gets a read call should |
|---|
| 401 | # start throwing errors, but only after returning its prefix |
|---|
| 402 | # for validation. Since we'll download without fetching the |
|---|
| 403 | # private key, the next query to the remote server will be |
|---|
| 404 | # for either a block and salt or for hashes, either of which |
|---|
| 405 | # will exercise the error handling code. |
|---|
| 406 | killer = FirstServerGetsKilled() |
|---|
| 407 | for s in nm.storage_broker.get_connected_servers(): |
|---|
| 408 | s.get_rref().post_call_notifier = killer.notify |
|---|
| 409 | ver = servermap.best_recoverable_version() |
|---|
| 410 | assert ver |
|---|
| 411 | return self._node.download_version(servermap, ver) |
|---|
| 412 | d.addCallback(_then) |
|---|
| 413 | d.addCallback(lambda data: |
|---|
| 414 | self.assertEqual(data, CONTENTS)) |
|---|
| 415 | return d |
|---|
| 416 | |
|---|
| 417 | def test_1654(self): |
|---|
| 418 | # test that the Retrieve object unconditionally verifies the block |
|---|
| 419 | # hash tree root for mutable shares. The failure mode is that |
|---|
| 420 | # carefully crafted shares can cause undetected corruption (the |
|---|
| 421 | # retrieve appears to finish successfully, but the result is |
|---|
| 422 | # corrupted). When fixed, these shares always cause a |
|---|
| 423 | # CorruptShareError, which results in NotEnoughSharesError in this |
|---|
| 424 | # 2-of-2 file. |
|---|
| 425 | self.basedir = "mutable/Problems/test_1654" |
|---|
| 426 | self.set_up_grid(num_servers=2) |
|---|
| 427 | cap = uri.from_string(TEST_1654_CAP) |
|---|
| 428 | si = cap.get_storage_index() |
|---|
| 429 | |
|---|
| 430 | for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]: |
|---|
| 431 | sharedata = base64.b64decode(share) |
|---|
| 432 | storedir = self.get_serverdir(shnum) |
|---|
| 433 | storage_path = os.path.join(storedir, "shares", |
|---|
| 434 | storage_index_to_dir(si)) |
|---|
| 435 | fileutil.make_dirs(storage_path) |
|---|
| 436 | fileutil.write(os.path.join(storage_path, "%d" % shnum), |
|---|
| 437 | sharedata) |
|---|
| 438 | |
|---|
| 439 | nm = self.g.clients[0].nodemaker |
|---|
| 440 | n = nm.create_from_cap(TEST_1654_CAP) |
|---|
| 441 | # to exercise the problem correctly, we must ensure that sh0 is |
|---|
| 442 | # processed first, and sh1 second. NoNetworkGrid has facilities to |
|---|
| 443 | # stall the first request from a single server, but it's not |
|---|
| 444 | # currently easy to extend that to stall the second request (mutable |
|---|
| 445 | # retrievals will see two: first the mapupdate, then the fetch). |
|---|
| 446 | # However, repeated executions of this run without the #1654 fix |
|---|
| 447 | # suggests that we're failing reliably even without explicit stalls, |
|---|
| 448 | # probably because the servers are queried in a fixed order. So I'm |
|---|
| 449 | # ok with relying upon that. |
|---|
| 450 | d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption", |
|---|
| 451 | "ran out of servers", |
|---|
| 452 | n.download_best_version) |
|---|
| 453 | return d |
|---|
| 454 | |
|---|
| 455 | |
|---|
| 456 | TEST_1654_CAP = b"URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a" |
|---|
| 457 | |
|---|
| 458 | TEST_1654_SH0 = b"""\ |
|---|
| 459 | VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo |
|---|
| 460 | AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA |
|---|
| 461 | AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+ |
|---|
| 462 | UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z |
|---|
| 463 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
|---|
| 464 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
|---|
| 465 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
|---|
| 466 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
|---|
| 467 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
|---|
| 468 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr |
|---|
| 469 | uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA |
|---|
| 470 | AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw |
|---|
| 471 | ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj |
|---|
| 472 | vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5 |
|---|
| 473 | CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU |
|---|
| 474 | Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL |
|---|
| 475 | FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM |
|---|
| 476 | DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP |
|---|
| 477 | AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8 |
|---|
| 478 | Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA |
|---|
| 479 | /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd |
|---|
| 480 | 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii |
|---|
| 481 | GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi |
|---|
| 482 | ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/ |
|---|
| 483 | +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy |
|---|
| 484 | bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn |
|---|
| 485 | z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF |
|---|
| 486 | eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm |
|---|
| 487 | d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u |
|---|
| 488 | dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l |
|---|
| 489 | 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF |
|---|
| 490 | wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF |
|---|
| 491 | sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L |
|---|
| 492 | eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw |
|---|
| 493 | PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u |
|---|
| 494 | CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC |
|---|
| 495 | Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4 |
|---|
| 496 | Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw |
|---|
| 497 | tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU |
|---|
| 498 | Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys |
|---|
| 499 | LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0 |
|---|
| 500 | ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J |
|---|
| 501 | jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK |
|---|
| 502 | fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v |
|---|
| 503 | DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF |
|---|
| 504 | tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M |
|---|
| 505 | 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX |
|---|
| 506 | jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s |
|---|
| 507 | TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy |
|---|
| 508 | 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7 |
|---|
| 509 | bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ |
|---|
| 510 | 72mXGlqyLyWYuAAAAAA=""" |
|---|
| 511 | |
|---|
| 512 | TEST_1654_SH1 = b"""\ |
|---|
| 513 | VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz |
|---|
| 514 | 9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA |
|---|
| 515 | AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL |
|---|
| 516 | MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc |
|---|
| 517 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
|---|
| 518 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
|---|
| 519 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
|---|
| 520 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
|---|
| 521 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA |
|---|
| 522 | AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr |
|---|
| 523 | uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA |
|---|
| 524 | AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw |
|---|
| 525 | ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj |
|---|
| 526 | vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5 |
|---|
| 527 | CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU |
|---|
| 528 | Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL |
|---|
| 529 | FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM |
|---|
| 530 | DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP |
|---|
| 531 | AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8 |
|---|
| 532 | Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA |
|---|
| 533 | /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd |
|---|
| 534 | 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii |
|---|
| 535 | GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi |
|---|
| 536 | ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/ |
|---|
| 537 | +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC |
|---|
| 538 | 98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn |
|---|
| 539 | z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF |
|---|
| 540 | eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm |
|---|
| 541 | d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u |
|---|
| 542 | dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l |
|---|
| 543 | 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF |
|---|
| 544 | wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF |
|---|
| 545 | sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L |
|---|
| 546 | eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw |
|---|
| 547 | PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u |
|---|
| 548 | CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC |
|---|
| 549 | Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4 |
|---|
| 550 | Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw |
|---|
| 551 | tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU |
|---|
| 552 | Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys |
|---|
| 553 | LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0 |
|---|
| 554 | ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J |
|---|
| 555 | jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK |
|---|
| 556 | fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v |
|---|
| 557 | DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF |
|---|
| 558 | tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M |
|---|
| 559 | 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX |
|---|
| 560 | jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s |
|---|
| 561 | TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy |
|---|
| 562 | 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7 |
|---|
| 563 | bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ |
|---|
| 564 | 72mXGlqyLyWYuAAAAAA=""" |
|---|