Ticket #1131: tidy-up-logging.dpatch.txt

File tidy-up-logging.dpatch.txt, 24.0 KB (added by zooko, at 2010-08-02T06:46:48Z)
Line 
13 patches for repository zooko@dev.allmydata.org:/home/darcs/tahoe-lafs/trunk:
2
3Sun Jul 18 22:47:44 MDT 2010  zooko@zooko.com
4  * trivial: rename and add in-line doc to clarify "used_peers" => "upload_servers"
5
6Mon Jul 19 02:20:00 MDT 2010  zooko@zooko.com
7  * immutable: use PrefixingLogMixin to organize logging in Tahoe2PeerSelector and add more detailed messages about peer
8
9Mon Aug  2 00:28:09 MDT 2010  zooko@zooko.com
10  * upload: tidy up logging messages
11  reformat code to be less than 100 chars wide, refactor formatting of logging messages, add log levels to some logging messages, M-x whitespace-cleanup
12
13New patches:
14
15[trivial: rename and add in-line doc to clarify "used_peers" => "upload_servers"
16zooko@zooko.com**20100719044744
17 Ignore-this: 93c42081676e0dea181e55187cfc506d
18] {
19hunk ./src/allmydata/immutable/upload.py 178
20         """
21         @return: (used_peers, already_peers), where used_peers is a set of
22                  PeerTracker instances that have agreed to hold some shares
23-                 for us (the shnum is stashed inside the PeerTracker),
24+                 for us (the shareids are stashed inside the PeerTracker),
25                  and already_peers is a dict mapping shnum to a set of peers
26                  which claim to already have the share.
27         """
28hunk ./src/allmydata/immutable/upload.py 913
29 
30     def set_shareholders(self, (used_peers, already_peers), encoder):
31         """
32-        @param used_peers: a sequence of PeerTracker objects
33+        @param used_peers: a sequence of PeerTracker objects that have agreed to hold some shares for us (the shareids are stashed inside the PeerTracker)
34         @paran already_peers: a dict mapping sharenum to a set of peerids
35                               that claim to already have this share
36         """
37replace ./src/allmydata/immutable/upload.py [A-Za-z_0-9] used_peers upload_servers
38replace ./src/allmydata/test/test_upload.py [A-Za-z_0-9] used_peers upload_servers
39replace ./src/allmydata/util/happinessutil.py [A-Za-z_0-9] used_peers upload_servers
40}
41[immutable: use PrefixingLogMixin to organize logging in Tahoe2PeerSelector and add more detailed messages about peer
42zooko@zooko.com**20100719082000
43 Ignore-this: e034c4988b327f7e138a106d913a3082
44] {
45hunk ./src/allmydata/immutable/upload.py 77
46 # TODO: actual extensions are closer to 419 bytes, so we can probably lower
47 # this.
48 
49+def pretty_print_shnum_to_servers(s):
50+    return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.iteritems() ])
51+
52 class PeerTracker:
53     def __init__(self, peerid, storage_server,
54                  sharesize, blocksize, num_segments, num_share_hashes,
55hunk ./src/allmydata/immutable/upload.py 158
56                 del self.buckets[sharenum]
57 
58 
59-class Tahoe2PeerSelector:
60+class Tahoe2PeerSelector(log.PrefixingLogMixin):
61 
62     def __init__(self, upload_id, logparent=None, upload_status=None):
63         self.upload_id = upload_id
64hunk ./src/allmydata/immutable/upload.py 169
65         self.num_peers_contacted = 0
66         self.last_failure_msg = None
67         self._status = IUploadStatus(upload_status)
68-        self._log_parent = log.msg("%s starting" % self, parent=logparent)
69+        log.PrefixingLogMixin.__init__(self, 'tahoe.immutable.upload', logparent, prefix=upload_id)
70+        self.log("starting", level=log.OPERATIONAL)
71 
72     def __repr__(self):
73         return "<Tahoe2PeerSelector for upload %s>" % self.upload_id
74hunk ./src/allmydata/immutable/upload.py 275
75             ds.append(d)
76             self.num_peers_contacted += 1
77             self.query_count += 1
78-            log.msg("asking peer %s for any existing shares for "
79-                    "upload id %s"
80-                    % (idlib.shortnodeid_b2a(peer.peerid), self.upload_id),
81-                    level=log.NOISY, parent=self._log_parent)
82+            self.log("asking peer %s for any existing shares" %
83+                     (idlib.shortnodeid_b2a(peer.peerid),),
84+                    level=log.NOISY)
85         dl = defer.DeferredList(ds)
86         dl.addCallback(lambda ign: self._loop())
87         return dl
88hunk ./src/allmydata/immutable/upload.py 289
89         Tahoe2PeerSelector._existing_shares.
90         """
91         if isinstance(res, failure.Failure):
92-            log.msg("%s got error during existing shares check: %s"
93+            self.log("%s got error during existing shares check: %s"
94                     % (idlib.shortnodeid_b2a(peer), res),
95hunk ./src/allmydata/immutable/upload.py 291
96-                    level=log.UNUSUAL, parent=self._log_parent)
97+                    level=log.UNUSUAL)
98             self.error_count += 1
99             self.bad_query_count += 1
100         else:
101hunk ./src/allmydata/immutable/upload.py 298
102             buckets = res
103             if buckets:
104                 self.peers_with_shares.add(peer)
105-            log.msg("response from peer %s: alreadygot=%s"
106+            self.log("response to get_buckets() from peer %s: alreadygot=%s"
107                     % (idlib.shortnodeid_b2a(peer), tuple(sorted(buckets))),
108hunk ./src/allmydata/immutable/upload.py 300
109-                    level=log.NOISY, parent=self._log_parent)
110+                    level=log.NOISY)
111             for bucket in buckets:
112                 self.preexisting_shares.setdefault(bucket, set()).add(peer)
113                 if self.homeless_shares and bucket in self.homeless_shares:
114hunk ./src/allmydata/immutable/upload.py 334
115             merged = merge_peers(self.preexisting_shares, self.use_peers)
116             effective_happiness = servers_of_happiness(merged)
117             if self.servers_of_happiness <= effective_happiness:
118-                msg = ("peer selection successful for %s: %s" % (self,
119-                            self._get_progress_message()))
120-                log.msg(msg, parent=self._log_parent)
121+                msg = ("server selection successful for %s: %s: pretty_print_merged: %s, self.use_peers: %s, self.preexisting_shares: %s" % (self, self._get_progress_message(), pretty_print_shnum_to_servers(merged), [', '.join(["%s: %s" % (k, idlib.shortnodeid_b2a(v._nodeid),) for k,v in p.buckets.iteritems()]) for p in self.use_peers], pretty_print_shnum_to_servers(self.preexisting_shares)))
122+                self.log(msg, level=log.OPERATIONAL)
123                 return (self.use_peers, self.preexisting_shares)
124             else:
125                 # We're not okay right now, but maybe we can fix it by
126hunk ./src/allmydata/immutable/upload.py 379
127                                           self.needed_shares,
128                                           self.servers_of_happiness,
129                                           effective_happiness)
130-                    log.msg("server selection unsuccessful for %r: %s (%s), merged=%r"
131-                            % (self, msg, self._get_progress_message(), merged), level=log.INFREQUENT)
132+                    self.log("server selection unsuccessful for %r: %s (%s), merged=%s" % (self, msg, self._get_progress_message(), pretty_print_shnum_to_servers(merged)), level=log.INFREQUENT)
133                     return self._failed("%s (%s)" % (msg, self._get_progress_message()))
134 
135         if self.uncontacted_peers:
136hunk ./src/allmydata/immutable/upload.py 402
137         elif self.contacted_peers:
138             # ask a peer that we've already asked.
139             if not self._started_second_pass:
140-                log.msg("starting second pass", parent=self._log_parent,
141+                self.log("starting second pass",
142                         level=log.NOISY)
143                 self._started_second_pass = True
144             num_shares = mathutil.div_ceil(len(self.homeless_shares),
145hunk ./src/allmydata/immutable/upload.py 440
146                                 self._get_progress_message()))
147                 if self.last_failure_msg:
148                     msg += " (%s)" % (self.last_failure_msg,)
149-                log.msg(msg, level=log.UNUSUAL, parent=self._log_parent)
150+                self.log(msg, level=log.UNUSUAL)
151                 return self._failed(msg)
152             else:
153                 # we placed enough to be happy, so we're done
154hunk ./src/allmydata/immutable/upload.py 446
155                 if self._status:
156                     self._status.set_status("Placed all shares")
157+                msg = ("server selection successful (no more servers) for %s: %s: %s" % (self,
158+                            self._get_progress_message(), pretty_print_shnum_to_servers(merged)))
159+                self.log(msg, level=log.OPERATIONAL)
160                 return (self.use_peers, self.preexisting_shares)
161 
162     def _got_response(self, res, peer, shares_to_ask, put_peer_here):
163hunk ./src/allmydata/immutable/upload.py 455
164         if isinstance(res, failure.Failure):
165             # This is unusual, and probably indicates a bug or a network
166             # problem.
167-            log.msg("%s got error during peer selection: %s" % (peer, res),
168-                    level=log.UNUSUAL, parent=self._log_parent)
169+            self.log("%s got error during peer selection: %s" % (peer, res),
170+                    level=log.UNUSUAL)
171             self.error_count += 1
172             self.bad_query_count += 1
173             self.homeless_shares = list(shares_to_ask) + self.homeless_shares
174hunk ./src/allmydata/immutable/upload.py 475
175                 self.last_failure_msg = msg
176         else:
177             (alreadygot, allocated) = res
178-            log.msg("response from peer %s: alreadygot=%s, allocated=%s"
179+            self.log("response to allocate_buckets() from peer %s: alreadygot=%s, allocated=%s"
180                     % (idlib.shortnodeid_b2a(peer.peerid),
181                        tuple(sorted(alreadygot)), tuple(sorted(allocated))),
182hunk ./src/allmydata/immutable/upload.py 478
183-                    level=log.NOISY, parent=self._log_parent)
184+                    level=log.NOISY)
185             progress = False
186             for s in alreadygot:
187                 self.preexisting_shares.setdefault(s, set()).add(peer.peerid)
188hunk ./src/allmydata/immutable/upload.py 921
189         @paran already_peers: a dict mapping sharenum to a set of peerids
190                               that claim to already have this share
191         """
192-        self.log("_send_shares, upload_servers is %s" % (upload_servers,))
193+        self.log("set_shareholders; upload_servers is %s, already_peers is %s" % ([', '.join(["%s: %s" % (k, idlib.shortnodeid_b2a(v._nodeid),) for k,v in p.buckets.iteritems()]) for p in upload_servers], already_peers))
194         # record already-present shares in self._results
195         self._results.preexisting_shares = len(already_peers)
196 
197hunk ./src/allmydata/immutable/upload.py 935
198             for shnum in peer.buckets:
199                 self._peer_trackers[shnum] = peer
200                 servermap.setdefault(shnum, set()).add(peer.peerid)
201+        self.log("set_shareholders; %s (%s) == %s (%s)" % (len(buckets), buckets, sum([len(peer.buckets) for peer in upload_servers]), [(p.buckets, p.peerid) for p in upload_servers]))
202         assert len(buckets) == sum([len(peer.buckets) for peer in upload_servers]), "%s (%s) != %s (%s)" % (len(buckets), buckets, sum([len(peer.buckets) for peer in upload_servers]), [(p.buckets, p.peerid) for p in upload_servers])
203         encoder.set_shareholders(buckets, servermap)
204 
205hunk ./src/allmydata/storage/server.py 8
206 
207 from zope.interface import implements
208 from allmydata.interfaces import RIStorageServer, IStatsProducer
209-from allmydata.util import fileutil, log, time_format
210+from allmydata.util import fileutil, idlib, log, time_format
211 import allmydata # for __full_version__
212 
213 from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir
214hunk ./src/allmydata/storage/server.py 109
215                                    expiration_sharetypes)
216         self.lease_checker.setServiceParent(self)
217 
218+    def __repr__(self):
219+        return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self.my_nodeid),)
220+
221     def add_bucket_counter(self):
222         statefile = os.path.join(self.storedir, "bucket_counter.state")
223         self.bucket_counter = BucketCountingCrawler(self, statefile)
224hunk ./src/allmydata/test/test_upload.py 14
225 from allmydata import uri, monitor, client
226 from allmydata.immutable import upload, encode
227 from allmydata.interfaces import FileTooLargeError, UploadUnhappinessError
228+from allmydata.util import log
229 from allmydata.util.assertutil import precondition
230 from allmydata.util.deferredutil import DeferredListShouldSucceed
231 from allmydata.test.no_network import GridTestMixin
232hunk ./src/allmydata/test/test_upload.py 714
233 
234 def is_happy_enough(servertoshnums, h, k):
235     """ I calculate whether servertoshnums achieves happiness level h. I do this with a naïve "brute force search" approach. (See src/allmydata/util/happinessutil.py for a better algorithm.) """
236+    print "servertoshnums: ", servertoshnums, "h: ", h, "k: ", k
237     if len(servertoshnums) < h:
238         return False
239     # print "servertoshnums: ", servertoshnums, h, k
240hunk ./src/allmydata/test/test_upload.py 803
241     def _add_server(self, server_number, readonly=False):
242         assert self.g, "I tried to find a grid at self.g, but failed"
243         ss = self.g.make_server(server_number, readonly)
244+        log.msg("just created a server, number: %s => %s" % (server_number, ss,))
245         self.g.add_server(server_number, ss)
246 
247hunk ./src/allmydata/test/test_upload.py 806
248-
249     def _add_server_with_share(self, server_number, share_number=None,
250                                readonly=False):
251         self._add_server(server_number, readonly)
252hunk ./src/allmydata/test/test_upload.py 866
253         d.addCallback(_store_shares)
254         return d
255 
256-
257     def test_configure_parameters(self):
258         self.basedir = self.mktemp()
259         hooks = {0: self._set_up_nodes_extra_config}
260}
261[upload: tidy up logging messages
262zooko@zooko.com**20100802062809
263 Ignore-this: b7d8b8360cfe4bebd8b8493ce72d15fa
264 reformat code to be less than 100 chars wide, refactor formatting of logging messages, add log levels to some logging messages, M-x whitespace-cleanup
265] {
266hunk ./src/allmydata/immutable/upload.py 158
267                 del self.buckets[sharenum]
268 
269 
270+def str_shareloc(shnum, bucketwriter):
271+    return "%s: %s" % (shnum, idlib.shortnodeid_b2a(bucketwriter._nodeid),)
272+
273 class Tahoe2PeerSelector(log.PrefixingLogMixin):
274 
275     def __init__(self, upload_id, logparent=None, upload_status=None):
276hunk ./src/allmydata/immutable/upload.py 337
277             merged = merge_peers(self.preexisting_shares, self.use_peers)
278             effective_happiness = servers_of_happiness(merged)
279             if self.servers_of_happiness <= effective_happiness:
280-                msg = ("server selection successful for %s: %s: pretty_print_merged: %s, self.use_peers: %s, self.preexisting_shares: %s" % (self, self._get_progress_message(), pretty_print_shnum_to_servers(merged), [', '.join(["%s: %s" % (k, idlib.shortnodeid_b2a(v._nodeid),) for k,v in p.buckets.iteritems()]) for p in self.use_peers], pretty_print_shnum_to_servers(self.preexisting_shares)))
281+                msg = ("server selection successful for %s: %s: pretty_print_merged: %s, "
282+                       "self.use_peers: %s, self.preexisting_shares: %s")
283+                       % (self, self._get_progress_message(),
284+                          pretty_print_shnum_to_servers(merged),
285+                          [', '.join([str_shareloc(k,v) for k,v in p.buckets.iteritems()])
286+                               for p in self.use_peers],
287+                          pretty_print_shnum_to_servers(self.preexisting_shares))
288                 self.log(msg, level=log.OPERATIONAL)
289                 return (self.use_peers, self.preexisting_shares)
290             else:
291hunk ./src/allmydata/immutable/upload.py 351
292                 # redistributing some shares. In cases where one or two
293                 # servers has, before the upload, all or most of the
294                 # shares for a given SI, this can work by allowing _loop
295-                # a chance to spread those out over the other peers,
296+                # a chance to spread those out over the other peers,
297                 delta = self.servers_of_happiness - effective_happiness
298                 shares = shares_by_server(self.preexisting_shares)
299                 # Each server in shares maps to a set of shares stored on it.
300hunk ./src/allmydata/immutable/upload.py 355
301-                # Since we want to keep at least one share on each server
302+                # Since we want to keep at least one share on each server
303                 # that has one (otherwise we'd only be making
304                 # the situation worse by removing distinct servers),
305                 # each server has len(its shares) - 1 to spread around.
306hunk ./src/allmydata/immutable/upload.py 384
307                 else:
308                     # Redistribution won't help us; fail.
309                     peer_count = len(self.peers_with_shares)
310-                    msg = failure_message(peer_count,
311+                    failmsg = failure_message(peer_count,
312                                           self.needed_shares,
313                                           self.servers_of_happiness,
314                                           effective_happiness)
315hunk ./src/allmydata/immutable/upload.py 388
316-                    self.log("server selection unsuccessful for %r: %s (%s), merged=%s" % (self, msg, self._get_progress_message(), pretty_print_shnum_to_servers(merged)), level=log.INFREQUENT)
317-                    return self._failed("%s (%s)" % (msg, self._get_progress_message()))
318+                    servmsgtempl = "server selection unsuccessful for %r: %s (%s), merged=%s"
319+                    servmsg = servmsgtempl % (
320+                        self,
321+                        failmsg,
322+                        self._get_progress_message(),
323+                        pretty_print_shnum_to_servers(merged)
324+                        )
325+                    self.log(servmsg, level=log.INFREQUENT)
326+                    return self._failed("%s (%s)" % (failmsg, self._get_progress_message()))
327 
328         if self.uncontacted_peers:
329             peer = self.uncontacted_peers.pop(0)
330hunk ./src/allmydata/immutable/upload.py 933
331 
332     def set_shareholders(self, (upload_servers, already_peers), encoder):
333         """
334-        @param upload_servers: a sequence of PeerTracker objects that have agreed to hold some shares for us (the shareids are stashed inside the PeerTracker)
335+        @param upload_servers: a sequence of PeerTracker objects that have agreed to hold some
336+            shares for us (the shareids are stashed inside the PeerTracker)
337         @paran already_peers: a dict mapping sharenum to a set of peerids
338                               that claim to already have this share
339         """
340hunk ./src/allmydata/immutable/upload.py 938
341-        self.log("set_shareholders; upload_servers is %s, already_peers is %s" % ([', '.join(["%s: %s" % (k, idlib.shortnodeid_b2a(v._nodeid),) for k,v in p.buckets.iteritems()]) for p in upload_servers], already_peers))
342+        msgtempl = "set_shareholders; upload_servers is %s, already_peers is %s"
343+        values = ([', '.join([str_shareloc(k,v) for k,v in p.buckets.iteritems()])
344+            for p in upload_servers], already_peers)
345+        self.log(msgtempl % values, level=log.OPERATIONAL)
346         # record already-present shares in self._results
347         self._results.preexisting_shares = len(already_peers)
348 
349hunk ./src/allmydata/immutable/upload.py 955
350             for shnum in peer.buckets:
351                 self._peer_trackers[shnum] = peer
352                 servermap.setdefault(shnum, set()).add(peer.peerid)
353-        self.log("set_shareholders; %s (%s) == %s (%s)" % (len(buckets), buckets, sum([len(peer.buckets) for peer in upload_servers]), [(p.buckets, p.peerid) for p in upload_servers]))
354-        assert len(buckets) == sum([len(peer.buckets) for peer in upload_servers]), "%s (%s) != %s (%s)" % (len(buckets), buckets, sum([len(peer.buckets) for peer in upload_servers]), [(p.buckets, p.peerid) for p in upload_servers])
355+        assert len(buckets) == sum([len(peer.buckets) for peer in upload_servers]),
356+            "%s (%s) != %s (%s)" % (
357+                len(buckets),
358+                buckets,
359+                sum([len(peer.buckets) for peer in upload_servers]),
360+                [(p.buckets, p.peerid) for p in upload_servers]
361+                )
362         encoder.set_shareholders(buckets, servermap)
363 
364     def _encrypted_done(self, verifycap):
365hunk ./src/allmydata/immutable/upload.py 1165
366         now = self._time_contacting_helper_start = time.time()
367         self._storage_index_elapsed = now - self._started
368         self.log(format="contacting helper for SI %(si)s..",
369-                 si=si_b2a(self._storage_index))
370+                 si=si_b2a(self._storage_index), level=log.NOISY)
371         self._upload_status.set_status("Contacting Helper")
372         d = self._helper.callRemote("upload_chk", self._storage_index)
373         d.addCallback(self._contacted_helper)
374hunk ./src/allmydata/immutable/upload.py 1176
375         elapsed = now - self._time_contacting_helper_start
376         self._elapsed_time_contacting_helper = elapsed
377         if upload_helper:
378-            self.log("helper says we need to upload")
379+            self.log("helper says we need to upload", level=log.NOISY)
380             self._upload_status.set_status("Uploading Ciphertext")
381             # we need to upload the file
382             reu = RemoteEncryptedUploadable(self._encuploadable,
383hunk ./src/allmydata/immutable/upload.py 1187
384                           upload_helper.callRemote("upload", reu))
385             # this Deferred will fire with the upload results
386             return d
387-        self.log("helper says file is already uploaded")
388+        self.log("helper says file is already uploaded", level=log.OPERATIONAL)
389         self._upload_status.set_progress(1, 1.0)
390         self._upload_status.set_results(upload_results)
391         return upload_results
392hunk ./src/allmydata/immutable/upload.py 1210
393             upload_results.sharemap = None
394 
395     def _build_verifycap(self, upload_results):
396-        self.log("upload finished, building readcap")
397+        self.log("upload finished, building readcap", level=log.OPERATIONAL)
398         self._convert_old_upload_results(upload_results)
399         self._upload_status.set_status("Building Readcap")
400         r = upload_results
401}
402
403Context:
404
405[docs: add Jacob Lyles to CREDITS
406zooko@zooko.com**20100730230500
407 Ignore-this: 9dbbd6a591b4b1a5a8dcb69b7b757792
408]
409[web: don't use %d formatting on a potentially large negative float -- there is a bug in Python 2.5 in that case
410jacob.lyles@gmail.com**20100730220550
411 Ignore-this: 7080eb4bddbcce29cba5447f8f4872ee
412 fixes #1055
413]
414[docs: fix licensing typo that was earlier fixed in [20090921164651-92b7f-7f97b58101d93dc588445c52a9aaa56a2c7ae336]
415zooko@zooko.com**20100729052923
416 Ignore-this: a975d79115911688e5469d4d869e1664
417 I wish we didn't copies of this licensing text in several different files so that changes can be accidentally omitted from some of them.
418]
419[misc/build_helpers/run-with-pythonpath.py: fix stale comment, and remove 'trial' example that is not the right way to run trial.
420david-sarah@jacaranda.org**20100726225729
421 Ignore-this: a61f55557ad69a1633bfb2b8172cce97
422]
423[docs/specifications/dirnodes.txt: 'mesh'->'grid'.
424david-sarah@jacaranda.org**20100723061616
425 Ignore-this: 887bcf921ef00afba8e05e9239035bca
426]
427[docs/specifications/dirnodes.txt: bring layer terminology up-to-date with architecture.txt, and a few other updates (e.g. note that the MAC is no longer verified, and that URIs can be unknown). Also 'Tahoe'->'Tahoe-LAFS'.
428david-sarah@jacaranda.org**20100723054703
429 Ignore-this: f3b98183e7d0a0f391225b8b93ac6c37
430]
431[__init__.py: silence DeprecationWarning about BaseException.message globally. fixes #1129
432david-sarah@jacaranda.org**20100720011939
433 Ignore-this: 38808986ba79cb2786b010504a22f89
434]
435[test_runner: test that 'tahoe --version' outputs no noise (e.g. DeprecationWarnings).
436david-sarah@jacaranda.org**20100720011345
437 Ignore-this: dd358b7b2e5d57282cbe133e8069702e
438]
439[docs: use current cap to Zooko's wiki page in example text
440zooko@zooko.com**20100721010543
441 Ignore-this: 4f36f36758f9fdbaf9eb73eac23b6652
442 fixes #1134
443]
444[TAG allmydata-tahoe-1.7.1
445zooko@zooko.com**20100719131352
446 Ignore-this: 6942056548433dc653a746703819ad8c
447]
448Patch bundle hash:
449b0908cb6006dc648a50f57ed437eab671279abf2