| 1 | """ |
|---|
| 2 | Ported to Python 3. |
|---|
| 3 | """ |
|---|
| 4 | |
|---|
| 5 | from io import StringIO |
|---|
| 6 | from twisted.internet import defer, reactor |
|---|
| 7 | from ..common import AsyncBrokenTestCase |
|---|
| 8 | from testtools.matchers import ( |
|---|
| 9 | Equals, |
|---|
| 10 | Contains, |
|---|
| 11 | HasLength, |
|---|
| 12 | Is, |
|---|
| 13 | IsInstance, |
|---|
| 14 | ) |
|---|
| 15 | from allmydata import uri, client |
|---|
| 16 | from allmydata.util.consumer import MemoryConsumer |
|---|
| 17 | from allmydata.interfaces import SDMF_VERSION, MDMF_VERSION, DownloadStopped |
|---|
| 18 | from allmydata.mutable.filenode import MutableFileNode, BackoffAgent |
|---|
| 19 | from allmydata.mutable.common import MODE_ANYTHING, MODE_WRITE, MODE_READ, UncoordinatedWriteError |
|---|
| 20 | |
|---|
| 21 | from allmydata.mutable.publish import MutableData |
|---|
| 22 | from ..test_download import PausingConsumer, PausingAndStoppingConsumer, \ |
|---|
| 23 | StoppingConsumer, ImmediatelyStoppingConsumer |
|---|
| 24 | from .. import common_util as testutil |
|---|
| 25 | from ...crypto.rsa import create_signing_keypair |
|---|
| 26 | from .util import ( |
|---|
| 27 | FakeStorage, |
|---|
| 28 | make_nodemaker_with_peers, |
|---|
| 29 | make_peer, |
|---|
| 30 | ) |
|---|
| 31 | |
|---|
| 32 | class Filenode(AsyncBrokenTestCase, testutil.ShouldFailMixin): |
|---|
| 33 | # this used to be in Publish, but we removed the limit. Some of |
|---|
| 34 | # these tests test whether the new code correctly allows files |
|---|
| 35 | # larger than the limit. |
|---|
| 36 | OLD_MAX_SEGMENT_SIZE = 3500000 |
|---|
| 37 | def setUp(self): |
|---|
| 38 | super(Filenode, self).setUp() |
|---|
| 39 | self._storage = FakeStorage() |
|---|
| 40 | self._peers = list( |
|---|
| 41 | make_peer(self._storage, n) |
|---|
| 42 | for n |
|---|
| 43 | # 10 is the default for N. We're trying to make enough servers |
|---|
| 44 | # here so that each only gets one share. |
|---|
| 45 | in range(10) |
|---|
| 46 | ) |
|---|
| 47 | self.nodemaker = make_nodemaker_with_peers(self._peers) |
|---|
| 48 | |
|---|
| 49 | def test_create(self): |
|---|
| 50 | d = self.nodemaker.create_mutable_file() |
|---|
| 51 | def _created(n): |
|---|
| 52 | self.assertThat(n, IsInstance(MutableFileNode)) |
|---|
| 53 | self.assertThat(n.get_storage_index(), Equals(n._storage_index)) |
|---|
| 54 | sb = self.nodemaker.storage_broker |
|---|
| 55 | peer0 = sorted(sb.get_all_serverids())[0] |
|---|
| 56 | shnums = self._storage._peers[peer0].keys() |
|---|
| 57 | self.assertThat(shnums, HasLength(1)) |
|---|
| 58 | d.addCallback(_created) |
|---|
| 59 | return d |
|---|
| 60 | |
|---|
| 61 | async def test_create_with_keypair(self): |
|---|
| 62 | """ |
|---|
| 63 | An SDMF can be created using a given keypair. |
|---|
| 64 | """ |
|---|
| 65 | (priv, pub) = create_signing_keypair(2048) |
|---|
| 66 | node = await self.nodemaker.create_mutable_file(keypair=(pub, priv)) |
|---|
| 67 | self.assertThat( |
|---|
| 68 | (node.get_privkey(), node.get_pubkey()), |
|---|
| 69 | Equals((priv, pub)), |
|---|
| 70 | ) |
|---|
| 71 | |
|---|
| 72 | def test_create_mdmf(self): |
|---|
| 73 | d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) |
|---|
| 74 | def _created(n): |
|---|
| 75 | self.assertThat(n, IsInstance(MutableFileNode)) |
|---|
| 76 | self.assertThat(n.get_storage_index(), Equals(n._storage_index)) |
|---|
| 77 | sb = self.nodemaker.storage_broker |
|---|
| 78 | peer0 = sorted(sb.get_all_serverids())[0] |
|---|
| 79 | shnums = self._storage._peers[peer0].keys() |
|---|
| 80 | self.assertThat(shnums, HasLength(1)) |
|---|
| 81 | d.addCallback(_created) |
|---|
| 82 | return d |
|---|
| 83 | |
|---|
| 84 | def test_single_share(self): |
|---|
| 85 | # Make sure that we tolerate publishing a single share. |
|---|
| 86 | self.nodemaker.default_encoding_parameters['k'] = 1 |
|---|
| 87 | self.nodemaker.default_encoding_parameters['happy'] = 1 |
|---|
| 88 | self.nodemaker.default_encoding_parameters['n'] = 1 |
|---|
| 89 | d = defer.succeed(None) |
|---|
| 90 | for v in (SDMF_VERSION, MDMF_VERSION): |
|---|
| 91 | d.addCallback(lambda ignored, v=v: |
|---|
| 92 | self.nodemaker.create_mutable_file(version=v)) |
|---|
| 93 | def _created(n): |
|---|
| 94 | self.assertThat(n, IsInstance(MutableFileNode)) |
|---|
| 95 | self._node = n |
|---|
| 96 | return n |
|---|
| 97 | d.addCallback(_created) |
|---|
| 98 | d.addCallback(lambda n: |
|---|
| 99 | n.overwrite(MutableData(b"Contents" * 50000))) |
|---|
| 100 | d.addCallback(lambda ignored: |
|---|
| 101 | self._node.download_best_version()) |
|---|
| 102 | d.addCallback(lambda contents: |
|---|
| 103 | self.assertThat(contents, Equals(b"Contents" * 50000))) |
|---|
| 104 | return d |
|---|
| 105 | |
|---|
| 106 | def test_max_shares(self): |
|---|
| 107 | self.nodemaker.default_encoding_parameters['n'] = 255 |
|---|
| 108 | d = self.nodemaker.create_mutable_file(version=SDMF_VERSION) |
|---|
| 109 | def _created(n): |
|---|
| 110 | self.assertThat(n, IsInstance(MutableFileNode)) |
|---|
| 111 | self.assertThat(n.get_storage_index(), Equals(n._storage_index)) |
|---|
| 112 | sb = self.nodemaker.storage_broker |
|---|
| 113 | num_shares = sum([len(self._storage._peers[x].keys()) for x \ |
|---|
| 114 | in sb.get_all_serverids()]) |
|---|
| 115 | self.assertThat(num_shares, Equals(255)) |
|---|
| 116 | self._node = n |
|---|
| 117 | return n |
|---|
| 118 | d.addCallback(_created) |
|---|
| 119 | # Now we upload some contents |
|---|
| 120 | d.addCallback(lambda n: |
|---|
| 121 | n.overwrite(MutableData(b"contents" * 50000))) |
|---|
| 122 | # ...then download contents |
|---|
| 123 | d.addCallback(lambda ignored: |
|---|
| 124 | self._node.download_best_version()) |
|---|
| 125 | # ...and check to make sure everything went okay. |
|---|
| 126 | d.addCallback(lambda contents: |
|---|
| 127 | self.assertThat(b"contents" * 50000, Equals(contents))) |
|---|
| 128 | return d |
|---|
| 129 | |
|---|
| 130 | def test_max_shares_mdmf(self): |
|---|
| 131 | # Test how files behave when there are 255 shares. |
|---|
| 132 | self.nodemaker.default_encoding_parameters['n'] = 255 |
|---|
| 133 | d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) |
|---|
| 134 | def _created(n): |
|---|
| 135 | self.assertThat(n, IsInstance(MutableFileNode)) |
|---|
| 136 | self.assertThat(n.get_storage_index(), Equals(n._storage_index)) |
|---|
| 137 | sb = self.nodemaker.storage_broker |
|---|
| 138 | num_shares = sum([len(self._storage._peers[x].keys()) for x \ |
|---|
| 139 | in sb.get_all_serverids()]) |
|---|
| 140 | self.assertThat(num_shares, Equals(255)) |
|---|
| 141 | self._node = n |
|---|
| 142 | return n |
|---|
| 143 | d.addCallback(_created) |
|---|
| 144 | d.addCallback(lambda n: |
|---|
| 145 | n.overwrite(MutableData(b"contents" * 50000))) |
|---|
| 146 | d.addCallback(lambda ignored: |
|---|
| 147 | self._node.download_best_version()) |
|---|
| 148 | d.addCallback(lambda contents: |
|---|
| 149 | self.assertThat(contents, Equals(b"contents" * 50000))) |
|---|
| 150 | return d |
|---|
| 151 | |
|---|
| 152 | def test_mdmf_filenode_cap(self): |
|---|
| 153 | # Test that an MDMF filenode, once created, returns an MDMF URI. |
|---|
| 154 | d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) |
|---|
| 155 | def _created(n): |
|---|
| 156 | self.assertThat(n, IsInstance(MutableFileNode)) |
|---|
| 157 | cap = n.get_cap() |
|---|
| 158 | self.assertThat(cap, IsInstance(uri.WriteableMDMFFileURI)) |
|---|
| 159 | rcap = n.get_readcap() |
|---|
| 160 | self.assertThat(rcap, IsInstance(uri.ReadonlyMDMFFileURI)) |
|---|
| 161 | vcap = n.get_verify_cap() |
|---|
| 162 | self.assertThat(vcap, IsInstance(uri.MDMFVerifierURI)) |
|---|
| 163 | d.addCallback(_created) |
|---|
| 164 | return d |
|---|
| 165 | |
|---|
| 166 | |
|---|
| 167 | def test_create_from_mdmf_writecap(self): |
|---|
| 168 | # Test that the nodemaker is capable of creating an MDMF |
|---|
| 169 | # filenode given an MDMF cap. |
|---|
| 170 | d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) |
|---|
| 171 | def _created(n): |
|---|
| 172 | self.assertThat(n, IsInstance(MutableFileNode)) |
|---|
| 173 | s = n.get_uri() |
|---|
| 174 | self.assertTrue(s.startswith(b"URI:MDMF")) |
|---|
| 175 | n2 = self.nodemaker.create_from_cap(s) |
|---|
| 176 | self.assertThat(n2, IsInstance(MutableFileNode)) |
|---|
| 177 | self.assertThat(n.get_storage_index(), Equals(n2.get_storage_index())) |
|---|
| 178 | self.assertThat(n.get_uri(), Equals(n2.get_uri())) |
|---|
| 179 | d.addCallback(_created) |
|---|
| 180 | return d |
|---|
| 181 | |
|---|
| 182 | |
|---|
| 183 | def test_create_from_mdmf_readcap(self): |
|---|
| 184 | d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) |
|---|
| 185 | def _created(n): |
|---|
| 186 | self.assertThat(n, IsInstance(MutableFileNode)) |
|---|
| 187 | s = n.get_readonly_uri() |
|---|
| 188 | n2 = self.nodemaker.create_from_cap(s) |
|---|
| 189 | self.assertThat(n2, IsInstance(MutableFileNode)) |
|---|
| 190 | |
|---|
| 191 | # Check that it's a readonly node |
|---|
| 192 | self.assertTrue(n2.is_readonly()) |
|---|
| 193 | d.addCallback(_created) |
|---|
| 194 | return d |
|---|
| 195 | |
|---|
| 196 | |
|---|
| 197 | def test_internal_version_from_cap(self): |
|---|
| 198 | # MutableFileNodes and MutableFileVersions have an internal |
|---|
| 199 | # switch that tells them whether they're dealing with an SDMF or |
|---|
| 200 | # MDMF mutable file when they start doing stuff. We want to make |
|---|
| 201 | # sure that this is set appropriately given an MDMF cap. |
|---|
| 202 | d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) |
|---|
| 203 | def _created(n): |
|---|
| 204 | self.uri = n.get_uri() |
|---|
| 205 | self.assertThat(n._protocol_version, Equals(MDMF_VERSION)) |
|---|
| 206 | |
|---|
| 207 | n2 = self.nodemaker.create_from_cap(self.uri) |
|---|
| 208 | self.assertThat(n2._protocol_version, Equals(MDMF_VERSION)) |
|---|
| 209 | d.addCallback(_created) |
|---|
| 210 | return d |
|---|
| 211 | |
|---|
| 212 | |
|---|
| 213 | def test_serialize(self): |
|---|
| 214 | n = MutableFileNode(None, None, {"k": 3, "n": 10}, None) |
|---|
| 215 | calls = [] |
|---|
| 216 | def _callback(*args, **kwargs): |
|---|
| 217 | self.assertThat(args, Equals((4,))) |
|---|
| 218 | self.assertThat(kwargs, Equals({"foo": 5})) |
|---|
| 219 | calls.append(1) |
|---|
| 220 | return 6 |
|---|
| 221 | d = n._do_serialized(_callback, 4, foo=5) |
|---|
| 222 | def _check_callback(res): |
|---|
| 223 | self.assertThat(res, Equals(6)) |
|---|
| 224 | self.assertThat(calls, Equals([1])) |
|---|
| 225 | d.addCallback(_check_callback) |
|---|
| 226 | |
|---|
| 227 | def _errback(): |
|---|
| 228 | raise ValueError("heya") |
|---|
| 229 | d.addCallback(lambda res: |
|---|
| 230 | self.shouldFail(ValueError, "_check_errback", "heya", |
|---|
| 231 | n._do_serialized, _errback)) |
|---|
| 232 | return d |
|---|
| 233 | |
|---|
| 234 | def test_upload_and_download(self): |
|---|
| 235 | d = self.nodemaker.create_mutable_file() |
|---|
| 236 | def _created(n): |
|---|
| 237 | d = defer.succeed(None) |
|---|
| 238 | d.addCallback(lambda res: n.get_servermap(MODE_READ)) |
|---|
| 239 | d.addCallback(lambda smap: smap.dump(StringIO())) |
|---|
| 240 | d.addCallback(lambda sio: |
|---|
| 241 | self.assertTrue("3-of-10" in sio.getvalue())) |
|---|
| 242 | d.addCallback(lambda res: n.overwrite(MutableData(b"contents 1"))) |
|---|
| 243 | d.addCallback(lambda res: self.assertThat(res, Is(None))) |
|---|
| 244 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 245 | d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 1"))) |
|---|
| 246 | d.addCallback(lambda res: n.get_size_of_best_version()) |
|---|
| 247 | d.addCallback(lambda size: |
|---|
| 248 | self.assertThat(size, Equals(len(b"contents 1")))) |
|---|
| 249 | d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) |
|---|
| 250 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 251 | d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 2"))) |
|---|
| 252 | d.addCallback(lambda res: n.get_servermap(MODE_WRITE)) |
|---|
| 253 | d.addCallback(lambda smap: n.upload(MutableData(b"contents 3"), smap)) |
|---|
| 254 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 255 | d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) |
|---|
| 256 | d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING)) |
|---|
| 257 | d.addCallback(lambda smap: |
|---|
| 258 | n.download_version(smap, |
|---|
| 259 | smap.best_recoverable_version())) |
|---|
| 260 | d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) |
|---|
| 261 | # test a file that is large enough to overcome the |
|---|
| 262 | # mapupdate-to-retrieve data caching (i.e. make the shares larger |
|---|
| 263 | # than the default readsize, which is 2000 bytes). A 15kB file |
|---|
| 264 | # will have 5kB shares. |
|---|
| 265 | d.addCallback(lambda res: n.overwrite(MutableData(b"large size file" * 1000))) |
|---|
| 266 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 267 | d.addCallback(lambda res: |
|---|
| 268 | self.assertThat(res, Equals(b"large size file" * 1000))) |
|---|
| 269 | return d |
|---|
| 270 | d.addCallback(_created) |
|---|
| 271 | return d |
|---|
| 272 | |
|---|
| 273 | |
|---|
| 274 | def test_upload_and_download_mdmf(self): |
|---|
| 275 | d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) |
|---|
| 276 | def _created(n): |
|---|
| 277 | d = defer.succeed(None) |
|---|
| 278 | d.addCallback(lambda ignored: |
|---|
| 279 | n.get_servermap(MODE_READ)) |
|---|
| 280 | def _then(servermap): |
|---|
| 281 | dumped = servermap.dump(StringIO()) |
|---|
| 282 | self.assertThat(dumped.getvalue(), Contains("3-of-10")) |
|---|
| 283 | d.addCallback(_then) |
|---|
| 284 | # Now overwrite the contents with some new contents. We want |
|---|
| 285 | # to make them big enough to force the file to be uploaded |
|---|
| 286 | # in more than one segment. |
|---|
| 287 | big_contents = b"contents1" * 100000 # about 900 KiB |
|---|
| 288 | big_contents_uploadable = MutableData(big_contents) |
|---|
| 289 | d.addCallback(lambda ignored: |
|---|
| 290 | n.overwrite(big_contents_uploadable)) |
|---|
| 291 | d.addCallback(lambda ignored: |
|---|
| 292 | n.download_best_version()) |
|---|
| 293 | d.addCallback(lambda data: |
|---|
| 294 | self.assertThat(data, Equals(big_contents))) |
|---|
| 295 | # Overwrite the contents again with some new contents. As |
|---|
| 296 | # before, they need to be big enough to force multiple |
|---|
| 297 | # segments, so that we make the downloader deal with |
|---|
| 298 | # multiple segments. |
|---|
| 299 | bigger_contents = b"contents2" * 1000000 # about 9MiB |
|---|
| 300 | bigger_contents_uploadable = MutableData(bigger_contents) |
|---|
| 301 | d.addCallback(lambda ignored: |
|---|
| 302 | n.overwrite(bigger_contents_uploadable)) |
|---|
| 303 | d.addCallback(lambda ignored: |
|---|
| 304 | n.download_best_version()) |
|---|
| 305 | d.addCallback(lambda data: |
|---|
| 306 | self.assertThat(data, Equals(bigger_contents))) |
|---|
| 307 | return d |
|---|
| 308 | d.addCallback(_created) |
|---|
| 309 | return d |
|---|
| 310 | |
|---|
| 311 | |
|---|
| 312 | def test_retrieve_producer_mdmf(self): |
|---|
| 313 | # We should make sure that the retriever is able to pause and stop |
|---|
| 314 | # correctly. |
|---|
| 315 | data = b"contents1" * 100000 |
|---|
| 316 | d = self.nodemaker.create_mutable_file(MutableData(data), |
|---|
| 317 | version=MDMF_VERSION) |
|---|
| 318 | d.addCallback(lambda node: node.get_best_mutable_version()) |
|---|
| 319 | d.addCallback(self._test_retrieve_producer, "MDMF", data) |
|---|
| 320 | return d |
|---|
| 321 | |
|---|
| 322 | # note: SDMF has only one big segment, so we can't use the usual |
|---|
| 323 | # after-the-first-write() trick to pause or stop the download. |
|---|
| 324 | # Disabled until we find a better approach. |
|---|
| 325 | def OFF_test_retrieve_producer_sdmf(self): |
|---|
| 326 | data = b"contents1" * 100000 |
|---|
| 327 | d = self.nodemaker.create_mutable_file(MutableData(data), |
|---|
| 328 | version=SDMF_VERSION) |
|---|
| 329 | d.addCallback(lambda node: node.get_best_mutable_version()) |
|---|
| 330 | d.addCallback(self._test_retrieve_producer, "SDMF", data) |
|---|
| 331 | return d |
|---|
| 332 | |
|---|
| 333 | def _test_retrieve_producer(self, version, kind, data): |
|---|
| 334 | # Now we'll retrieve it into a pausing consumer. |
|---|
| 335 | c = PausingConsumer() |
|---|
| 336 | d = version.read(c) |
|---|
| 337 | d.addCallback(lambda ign: self.assertThat(c.size, Equals(len(data)))) |
|---|
| 338 | |
|---|
| 339 | c2 = PausingAndStoppingConsumer() |
|---|
| 340 | d.addCallback(lambda ign: |
|---|
| 341 | self.shouldFail(DownloadStopped, kind+"_pause_stop", |
|---|
| 342 | "our Consumer called stopProducing()", |
|---|
| 343 | version.read, c2)) |
|---|
| 344 | |
|---|
| 345 | c3 = StoppingConsumer() |
|---|
| 346 | d.addCallback(lambda ign: |
|---|
| 347 | self.shouldFail(DownloadStopped, kind+"_stop", |
|---|
| 348 | "our Consumer called stopProducing()", |
|---|
| 349 | version.read, c3)) |
|---|
| 350 | |
|---|
| 351 | c4 = ImmediatelyStoppingConsumer() |
|---|
| 352 | d.addCallback(lambda ign: |
|---|
| 353 | self.shouldFail(DownloadStopped, kind+"_stop_imm", |
|---|
| 354 | "our Consumer called stopProducing()", |
|---|
| 355 | version.read, c4)) |
|---|
| 356 | |
|---|
| 357 | def _then(ign): |
|---|
| 358 | c5 = MemoryConsumer() |
|---|
| 359 | d1 = version.read(c5) |
|---|
| 360 | c5.producer.stopProducing() |
|---|
| 361 | return self.shouldFail(DownloadStopped, kind+"_stop_imm2", |
|---|
| 362 | "our Consumer called stopProducing()", |
|---|
| 363 | lambda: d1) |
|---|
| 364 | d.addCallback(_then) |
|---|
| 365 | return d |
|---|
| 366 | |
|---|
| 367 | def test_download_from_mdmf_cap(self): |
|---|
| 368 | # We should be able to download an MDMF file given its cap |
|---|
| 369 | d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) |
|---|
| 370 | def _created(node): |
|---|
| 371 | self.uri = node.get_uri() |
|---|
| 372 | # also confirm that the cap has no extension fields |
|---|
| 373 | pieces = self.uri.split(b":") |
|---|
| 374 | self.assertThat(pieces, HasLength(4)) |
|---|
| 375 | |
|---|
| 376 | return node.overwrite(MutableData(b"contents1" * 100000)) |
|---|
| 377 | def _then(ignored): |
|---|
| 378 | node = self.nodemaker.create_from_cap(self.uri) |
|---|
| 379 | return node.download_best_version() |
|---|
| 380 | def _downloaded(data): |
|---|
| 381 | self.assertThat(data, Equals(b"contents1" * 100000)) |
|---|
| 382 | d.addCallback(_created) |
|---|
| 383 | d.addCallback(_then) |
|---|
| 384 | d.addCallback(_downloaded) |
|---|
| 385 | return d |
|---|
| 386 | |
|---|
| 387 | |
|---|
| 388 | def test_mdmf_write_count(self): |
|---|
| 389 | """ |
|---|
| 390 | Publishing an MDMF file causes exactly one write for each share that is to |
|---|
| 391 | be published. Otherwise, we introduce undesirable semantics that are a |
|---|
| 392 | regression from SDMF. |
|---|
| 393 | """ |
|---|
| 394 | upload = MutableData(b"MDMF" * 100000) # about 400 KiB |
|---|
| 395 | d = self.nodemaker.create_mutable_file(upload, |
|---|
| 396 | version=MDMF_VERSION) |
|---|
| 397 | def _check_server_write_counts(ignored): |
|---|
| 398 | for peer in self._peers: |
|---|
| 399 | # There were enough servers for each to only get a single |
|---|
| 400 | # share. |
|---|
| 401 | self.assertEqual(peer.storage_server.queries, 1) |
|---|
| 402 | d.addCallback(_check_server_write_counts) |
|---|
| 403 | return d |
|---|
| 404 | |
|---|
| 405 | |
|---|
| 406 | def test_create_with_initial_contents(self): |
|---|
| 407 | upload1 = MutableData(b"contents 1") |
|---|
| 408 | d = self.nodemaker.create_mutable_file(upload1) |
|---|
| 409 | def _created(n): |
|---|
| 410 | d = n.download_best_version() |
|---|
| 411 | d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 1"))) |
|---|
| 412 | upload2 = MutableData(b"contents 2") |
|---|
| 413 | d.addCallback(lambda res: n.overwrite(upload2)) |
|---|
| 414 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 415 | d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 2"))) |
|---|
| 416 | return d |
|---|
| 417 | d.addCallback(_created) |
|---|
| 418 | return d |
|---|
| 419 | |
|---|
| 420 | |
|---|
| 421 | def test_create_mdmf_with_initial_contents(self): |
|---|
| 422 | initial_contents = b"foobarbaz" * 131072 # 900KiB |
|---|
| 423 | initial_contents_uploadable = MutableData(initial_contents) |
|---|
| 424 | d = self.nodemaker.create_mutable_file(initial_contents_uploadable, |
|---|
| 425 | version=MDMF_VERSION) |
|---|
| 426 | def _created(n): |
|---|
| 427 | d = n.download_best_version() |
|---|
| 428 | d.addCallback(lambda data: |
|---|
| 429 | self.assertThat(data, Equals(initial_contents))) |
|---|
| 430 | uploadable2 = MutableData(initial_contents + b"foobarbaz") |
|---|
| 431 | d.addCallback(lambda ignored: |
|---|
| 432 | n.overwrite(uploadable2)) |
|---|
| 433 | d.addCallback(lambda ignored: |
|---|
| 434 | n.download_best_version()) |
|---|
| 435 | d.addCallback(lambda data: |
|---|
| 436 | self.assertThat(data, Equals(initial_contents + |
|---|
| 437 | b"foobarbaz"))) |
|---|
| 438 | return d |
|---|
| 439 | d.addCallback(_created) |
|---|
| 440 | return d |
|---|
| 441 | |
|---|
| 442 | def test_create_with_initial_contents_function(self): |
|---|
| 443 | data = b"initial contents" |
|---|
| 444 | def _make_contents(n): |
|---|
| 445 | self.assertThat(n, IsInstance(MutableFileNode)) |
|---|
| 446 | key = n.get_writekey() |
|---|
| 447 | self.assertTrue(isinstance(key, bytes), key) |
|---|
| 448 | self.assertThat(key, HasLength(16)) # AES key size |
|---|
| 449 | return MutableData(data) |
|---|
| 450 | d = self.nodemaker.create_mutable_file(_make_contents) |
|---|
| 451 | def _created(n): |
|---|
| 452 | return n.download_best_version() |
|---|
| 453 | d.addCallback(_created) |
|---|
| 454 | d.addCallback(lambda data2: self.assertThat(data2, Equals(data))) |
|---|
| 455 | return d |
|---|
| 456 | |
|---|
| 457 | |
|---|
| 458 | def test_create_mdmf_with_initial_contents_function(self): |
|---|
| 459 | data = b"initial contents" * 100000 |
|---|
| 460 | def _make_contents(n): |
|---|
| 461 | self.assertThat(n, IsInstance(MutableFileNode)) |
|---|
| 462 | key = n.get_writekey() |
|---|
| 463 | self.assertTrue(isinstance(key, bytes), key) |
|---|
| 464 | self.assertThat(key, HasLength(16)) |
|---|
| 465 | return MutableData(data) |
|---|
| 466 | d = self.nodemaker.create_mutable_file(_make_contents, |
|---|
| 467 | version=MDMF_VERSION) |
|---|
| 468 | d.addCallback(lambda n: |
|---|
| 469 | n.download_best_version()) |
|---|
| 470 | d.addCallback(lambda data2: |
|---|
| 471 | self.assertThat(data2, Equals(data))) |
|---|
| 472 | return d |
|---|
| 473 | |
|---|
| 474 | |
|---|
| 475 | def test_create_with_too_large_contents(self): |
|---|
| 476 | BIG = b"a" * (self.OLD_MAX_SEGMENT_SIZE + 1) |
|---|
| 477 | BIG_uploadable = MutableData(BIG) |
|---|
| 478 | d = self.nodemaker.create_mutable_file(BIG_uploadable) |
|---|
| 479 | def _created(n): |
|---|
| 480 | other_BIG_uploadable = MutableData(BIG) |
|---|
| 481 | d = n.overwrite(other_BIG_uploadable) |
|---|
| 482 | return d |
|---|
| 483 | d.addCallback(_created) |
|---|
| 484 | return d |
|---|
| 485 | |
|---|
| 486 | def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which): |
|---|
| 487 | d = n.get_servermap(MODE_READ) |
|---|
| 488 | d.addCallback(lambda servermap: servermap.best_recoverable_version()) |
|---|
| 489 | d.addCallback(lambda verinfo: |
|---|
| 490 | self.assertThat(verinfo[0], Equals(expected_seqnum), which)) |
|---|
| 491 | return d |
|---|
| 492 | |
|---|
| 493 | def test_modify(self): |
|---|
| 494 | def _modifier(old_contents, servermap, first_time): |
|---|
| 495 | new_contents = old_contents + b"line2" |
|---|
| 496 | return new_contents |
|---|
| 497 | def _non_modifier(old_contents, servermap, first_time): |
|---|
| 498 | return old_contents |
|---|
| 499 | def _none_modifier(old_contents, servermap, first_time): |
|---|
| 500 | return None |
|---|
| 501 | def _error_modifier(old_contents, servermap, first_time): |
|---|
| 502 | raise ValueError("oops") |
|---|
| 503 | def _toobig_modifier(old_contents, servermap, first_time): |
|---|
| 504 | new_content = b"b" * (self.OLD_MAX_SEGMENT_SIZE + 1) |
|---|
| 505 | return new_content |
|---|
| 506 | calls = [] |
|---|
| 507 | def _ucw_error_modifier(old_contents, servermap, first_time): |
|---|
| 508 | # simulate an UncoordinatedWriteError once |
|---|
| 509 | calls.append(1) |
|---|
| 510 | if len(calls) <= 1: |
|---|
| 511 | raise UncoordinatedWriteError("simulated") |
|---|
| 512 | new_contents = old_contents + b"line3" |
|---|
| 513 | return new_contents |
|---|
| 514 | def _ucw_error_non_modifier(old_contents, servermap, first_time): |
|---|
| 515 | # simulate an UncoordinatedWriteError once, and don't actually |
|---|
| 516 | # modify the contents on subsequent invocations |
|---|
| 517 | calls.append(1) |
|---|
| 518 | if len(calls) <= 1: |
|---|
| 519 | raise UncoordinatedWriteError("simulated") |
|---|
| 520 | return old_contents |
|---|
| 521 | |
|---|
| 522 | initial_contents = b"line1" |
|---|
| 523 | d = self.nodemaker.create_mutable_file(MutableData(initial_contents)) |
|---|
| 524 | def _created(n): |
|---|
| 525 | d = n.modify(_modifier) |
|---|
| 526 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 527 | d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) |
|---|
| 528 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m")) |
|---|
| 529 | |
|---|
| 530 | d.addCallback(lambda res: n.modify(_non_modifier)) |
|---|
| 531 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 532 | d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) |
|---|
| 533 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non")) |
|---|
| 534 | |
|---|
| 535 | d.addCallback(lambda res: n.modify(_none_modifier)) |
|---|
| 536 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 537 | d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) |
|---|
| 538 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none")) |
|---|
| 539 | |
|---|
| 540 | d.addCallback(lambda res: |
|---|
| 541 | self.shouldFail(ValueError, "error_modifier", None, |
|---|
| 542 | n.modify, _error_modifier)) |
|---|
| 543 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 544 | d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) |
|---|
| 545 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err")) |
|---|
| 546 | |
|---|
| 547 | |
|---|
| 548 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 549 | d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) |
|---|
| 550 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big")) |
|---|
| 551 | |
|---|
| 552 | d.addCallback(lambda res: n.modify(_ucw_error_modifier)) |
|---|
| 553 | d.addCallback(lambda res: self.assertThat(calls, HasLength(2))) |
|---|
| 554 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 555 | d.addCallback(lambda res: self.assertThat(res, |
|---|
| 556 | Equals(b"line1line2line3"))) |
|---|
| 557 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw")) |
|---|
| 558 | |
|---|
| 559 | def _reset_ucw_error_modifier(res): |
|---|
| 560 | calls[:] = [] |
|---|
| 561 | return res |
|---|
| 562 | d.addCallback(_reset_ucw_error_modifier) |
|---|
| 563 | |
|---|
| 564 | # in practice, this n.modify call should publish twice: the first |
|---|
| 565 | # one gets a UCWE, the second does not. But our test jig (in |
|---|
| 566 | # which the modifier raises the UCWE) skips over the first one, |
|---|
| 567 | # so in this test there will be only one publish, and the seqnum |
|---|
| 568 | # will only be one larger than the previous test, not two (i.e. 4 |
|---|
| 569 | # instead of 5). |
|---|
| 570 | d.addCallback(lambda res: n.modify(_ucw_error_non_modifier)) |
|---|
| 571 | d.addCallback(lambda res: self.assertThat(calls, HasLength(2))) |
|---|
| 572 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 573 | d.addCallback(lambda res: self.assertThat(res, |
|---|
| 574 | Equals(b"line1line2line3"))) |
|---|
| 575 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw")) |
|---|
| 576 | d.addCallback(lambda res: n.modify(_toobig_modifier)) |
|---|
| 577 | return d |
|---|
| 578 | d.addCallback(_created) |
|---|
| 579 | return d |
|---|
| 580 | |
|---|
| 581 | |
|---|
| 582 | def test_modify_backoffer(self): |
|---|
| 583 | def _modifier(old_contents, servermap, first_time): |
|---|
| 584 | return old_contents + b"line2" |
|---|
| 585 | calls = [] |
|---|
| 586 | def _ucw_error_modifier(old_contents, servermap, first_time): |
|---|
| 587 | # simulate an UncoordinatedWriteError once |
|---|
| 588 | calls.append(1) |
|---|
| 589 | if len(calls) <= 1: |
|---|
| 590 | raise UncoordinatedWriteError("simulated") |
|---|
| 591 | return old_contents + b"line3" |
|---|
| 592 | def _always_ucw_error_modifier(old_contents, servermap, first_time): |
|---|
| 593 | raise UncoordinatedWriteError("simulated") |
|---|
| 594 | def _backoff_stopper(node, f): |
|---|
| 595 | return f |
|---|
| 596 | def _backoff_pauser(node, f): |
|---|
| 597 | d = defer.Deferred() |
|---|
| 598 | reactor.callLater(0.5, d.callback, None) |
|---|
| 599 | return d |
|---|
| 600 | |
|---|
| 601 | # the give-up-er will hit its maximum retry count quickly |
|---|
| 602 | giveuper = BackoffAgent() |
|---|
| 603 | giveuper._delay = 0.1 |
|---|
| 604 | giveuper.factor = 1 |
|---|
| 605 | |
|---|
| 606 | d = self.nodemaker.create_mutable_file(MutableData(b"line1")) |
|---|
| 607 | def _created(n): |
|---|
| 608 | d = n.modify(_modifier) |
|---|
| 609 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 610 | d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) |
|---|
| 611 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m")) |
|---|
| 612 | |
|---|
| 613 | d.addCallback(lambda res: |
|---|
| 614 | self.shouldFail(UncoordinatedWriteError, |
|---|
| 615 | "_backoff_stopper", None, |
|---|
| 616 | n.modify, _ucw_error_modifier, |
|---|
| 617 | _backoff_stopper)) |
|---|
| 618 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 619 | d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) |
|---|
| 620 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop")) |
|---|
| 621 | |
|---|
| 622 | def _reset_ucw_error_modifier(res): |
|---|
| 623 | calls[:] = [] |
|---|
| 624 | return res |
|---|
| 625 | d.addCallback(_reset_ucw_error_modifier) |
|---|
| 626 | d.addCallback(lambda res: n.modify(_ucw_error_modifier, |
|---|
| 627 | _backoff_pauser)) |
|---|
| 628 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 629 | d.addCallback(lambda res: self.assertThat(res, |
|---|
| 630 | Equals(b"line1line2line3"))) |
|---|
| 631 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause")) |
|---|
| 632 | |
|---|
| 633 | d.addCallback(lambda res: |
|---|
| 634 | self.shouldFail(UncoordinatedWriteError, |
|---|
| 635 | "giveuper", None, |
|---|
| 636 | n.modify, _always_ucw_error_modifier, |
|---|
| 637 | giveuper.delay)) |
|---|
| 638 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 639 | d.addCallback(lambda res: self.assertThat(res, |
|---|
| 640 | Equals(b"line1line2line3"))) |
|---|
| 641 | d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup")) |
|---|
| 642 | |
|---|
| 643 | return d |
|---|
| 644 | d.addCallback(_created) |
|---|
| 645 | return d |
|---|
| 646 | |
|---|
| 647 | def test_upload_and_download_full_size_keys(self): |
|---|
| 648 | self.nodemaker.key_generator = client.KeyGenerator() |
|---|
| 649 | d = self.nodemaker.create_mutable_file() |
|---|
| 650 | def _created(n): |
|---|
| 651 | d = defer.succeed(None) |
|---|
| 652 | d.addCallback(lambda res: n.get_servermap(MODE_READ)) |
|---|
| 653 | d.addCallback(lambda smap: smap.dump(StringIO())) |
|---|
| 654 | d.addCallback(lambda sio: |
|---|
| 655 | self.assertTrue("3-of-10" in sio.getvalue())) |
|---|
| 656 | d.addCallback(lambda res: n.overwrite(MutableData(b"contents 1"))) |
|---|
| 657 | d.addCallback(lambda res: self.assertThat(res, Is(None))) |
|---|
| 658 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 659 | d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 1"))) |
|---|
| 660 | d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) |
|---|
| 661 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 662 | d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 2"))) |
|---|
| 663 | d.addCallback(lambda res: n.get_servermap(MODE_WRITE)) |
|---|
| 664 | d.addCallback(lambda smap: n.upload(MutableData(b"contents 3"), smap)) |
|---|
| 665 | d.addCallback(lambda res: n.download_best_version()) |
|---|
| 666 | d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) |
|---|
| 667 | d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING)) |
|---|
| 668 | d.addCallback(lambda smap: |
|---|
| 669 | n.download_version(smap, |
|---|
| 670 | smap.best_recoverable_version())) |
|---|
| 671 | d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) |
|---|
| 672 | return d |
|---|
| 673 | d.addCallback(_created) |
|---|
| 674 | return d |
|---|
| 675 | |
|---|
| 676 | |
|---|
| 677 | def test_size_after_servermap_update(self): |
|---|
| 678 | # a mutable file node should have something to say about how big |
|---|
| 679 | # it is after a servermap update is performed, since this tells |
|---|
| 680 | # us how large the best version of that mutable file is. |
|---|
| 681 | d = self.nodemaker.create_mutable_file() |
|---|
| 682 | def _created(n): |
|---|
| 683 | self.n = n |
|---|
| 684 | return n.get_servermap(MODE_READ) |
|---|
| 685 | d.addCallback(_created) |
|---|
| 686 | d.addCallback(lambda ignored: |
|---|
| 687 | self.assertThat(self.n.get_size(), Equals(0))) |
|---|
| 688 | d.addCallback(lambda ignored: |
|---|
| 689 | self.n.overwrite(MutableData(b"foobarbaz"))) |
|---|
| 690 | d.addCallback(lambda ignored: |
|---|
| 691 | self.assertThat(self.n.get_size(), Equals(9))) |
|---|
| 692 | d.addCallback(lambda ignored: |
|---|
| 693 | self.nodemaker.create_mutable_file(MutableData(b"foobarbaz"))) |
|---|
| 694 | d.addCallback(_created) |
|---|
| 695 | d.addCallback(lambda ignored: |
|---|
| 696 | self.assertThat(self.n.get_size(), Equals(9))) |
|---|
| 697 | return d |
|---|