]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - packages/Python/lldbsuite/test/darwin_log.py
Vendor import of lldb trunk r290819:
[FreeBSD/FreeBSD.git] / packages / Python / lldbsuite / test / darwin_log.py
1 """
2 Base class for DarwinLog tests.
3 """
4
5 # System imports
6 from __future__ import print_function
7
8 import json
9 import os
10 import platform
11 import re
12 import sys
13 import threading
14
15
16 # lldb imports
17 import lldb
18 from lldb import SBProcess, SBTarget
19
20 from lldbsuite.test import decorators
21 from lldbsuite.test import lldbtest
22 from lldbsuite.test import lldbtest_config
23 from lldbsuite.test import lldbutil
24
25
26 def expand_darwinlog_command(command):
27     return "plugin structured-data darwin-log " + command
28
29
30 def expand_darwinlog_settings_set_command(command):
31     return "settings set plugin.structured-data.darwin-log." + command
32
33
34 class DarwinLogTestBase(lldbtest.TestBase):
35     """Base class for DarwinLog test cases that are pexpect-based."""
36     NO_DEBUG_INFO_TESTCASE = True
37
38     CONTINUE_REGEX = re.compile(r"Process \d+ resuming")
39
40     def setUp(self):
41         # Call super's setUp().
42         super(DarwinLogTestBase, self).setUp()
43
44         # Until other systems support this, exit
45         # early if we're not macOS version 10.12
46         # or greater.
47         version = platform.mac_ver()[0].split('.')
48         if ((int(version[0]) == 10) and (int(version[1]) < 12) or
49                 (int(version[0]) < 10)):
50             self.skipTest("DarwinLog tests currently require macOS 10.12+")
51             return
52
53         self.child = None
54         self.child_prompt = '(lldb) '
55         self.strict_sources = False
56         self.enable_process_monitor_logging = False
57
58     def run_lldb_to_breakpoint(self, exe, source_file, line,
59                                enable_command=None, settings_commands=None):
60
61         import pexpect
62         # Set self.child_prompt, which is "(lldb) ".
63         prompt = self.child_prompt
64
65         # So that the child gets torn down after the test.
66         self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec,
67                                                  self.lldbOption, exe))
68         child = self.child
69
70         # Turn on logging for what the child sends back.
71         if self.TraceOn():
72             child.logfile_read = sys.stdout
73
74         if self.enable_process_monitor_logging:
75             if platform.system() == 'Darwin':
76                 self.runCmd(
77                     "settings set target.process.extra-startup-command "
78                     "QSetLogging:bitmask=LOG_DARWIN_LOG;")
79                 self.expect_prompt()
80
81         # Run the enable command if we have one.
82         if enable_command is not None:
83             self.runCmd(enable_command)
84             self.expect_prompt()
85
86         # Disable showing of source lines at our breakpoint.
87         # This is necessary for the logging tests, because the very
88         # text we want to match for output from the running inferior
89         # will show up in the source as well.  We don't want the source
90         # output to erroneously make a match with our expected output.
91         self.runCmd("settings set stop-line-count-before 0")
92         self.expect_prompt()
93         self.runCmd("settings set stop-line-count-after 0")
94         self.expect_prompt()
95
96         # While we're debugging, turn on packet logging.
97         self.runCmd("log enable -f /tmp/packets.log gdb-remote packets")
98         self.expect_prompt()
99
100         # Prevent mirroring of NSLog/os_log content to stderr.  We want log
101         # messages to come exclusively through our log channel.
102         self.runCmd(
103             "settings set target.env-vars IDE_DISABLED_OS_ACTIVITY_DT_MODE=1")
104         self.expect_prompt()
105
106         # Run any darwin-log settings commands now, before we enable logging.
107         if settings_commands is not None:
108             for setting_command in settings_commands:
109                 self.runCmd(
110                     expand_darwinlog_settings_set_command(setting_command))
111                 self.expect_prompt()
112
113         # Set breakpoint right before the os_log() macros.  We don't
114         # set it on the os_log*() calls because these are a number of
115         # nested-scoped calls that will cause the debugger to stop
116         # multiple times on the same line.  That is difficult to match
117         # os_log() content by since it is non-deterministic what the
118         # ordering between stops and log lines will be.  This is why
119         # we stop before, and then have the process run in a sleep
120         # afterwards, so we get the log messages while the target
121         # process is "running" (sleeping).
122         child.sendline('breakpoint set -f %s -l %d' % (source_file, line))
123         child.expect_exact(prompt)
124
125         # Now run to the breakpoint that we just set.
126         child.sendline('run')
127         child.expect_exact(prompt)
128
129         # Ensure we stopped at a breakpoint.
130         self.runCmd("thread list")
131         self.expect(re.compile(r"stop reason = breakpoint"))
132
133         # Now we're ready to check if DarwinLog is available.
134         if not self.darwin_log_available():
135             self.skipTest("DarwinLog not available")
136
137     def runCmd(self, cmd):
138         self.child.sendline(cmd)
139
140     def expect_prompt(self, exactly=True):
141         self.expect(self.child_prompt, exactly=exactly)
142
143     def expect(self, pattern, exactly=False, *args, **kwargs):
144         if exactly:
145             return self.child.expect_exact(pattern, *args, **kwargs)
146         return self.child.expect(pattern, *args, **kwargs)
147
148     def darwin_log_available(self):
149         self.runCmd("plugin structured-data darwin-log status")
150         self.expect(re.compile(r"Availability: ([\S]+)"))
151         return self.child.match is not None and (
152             self.child.match.group(1) == "available")
153
154     def do_test(self, enable_options, expect_regexes=None,
155                 settings_commands=None):
156         """Test that a single fall-through reject rule rejects all logging."""
157         self.build(dictionary=self.d)
158         self.setTearDownCleanup(dictionary=self.d)
159
160         # Build the darwin-log enable command.
161         enable_cmd = expand_darwinlog_command('enable')
162         if enable_options is not None and len(enable_options) > 0:
163             enable_cmd += ' ' + ' '.join(enable_options)
164
165         exe = os.path.join(os.getcwd(), self.exe_name)
166         self.run_lldb_to_breakpoint(exe, self.source, self.line,
167                                     enable_command=enable_cmd,
168                                     settings_commands=settings_commands)
169         self.expect_prompt()
170
171         # Now go.
172         self.runCmd("process continue")
173         self.expect(self.CONTINUE_REGEX)
174
175         if expect_regexes is None:
176             # Expect matching a log line or program exit.
177             # Test methods determine which ones are valid.
178             expect_regexes = (
179                 [re.compile(r"source-log-([^-]+)-(\S+)"),
180                  re.compile(r"exited with status")
181                  ])
182         self.expect(expect_regexes)
183
184
185 def remove_add_mode_entry(log_entries):
186     """libtrace creates an "Add Mode:..." message when logging is enabled.
187     Strip this out of results since our test subjects don't create it."""
188     return [entry for entry in log_entries
189             if "message" in entry and
190             not entry["message"].startswith("Add Mode:")]
191
192
193 class DarwinLogEventBasedTestBase(lldbtest.TestBase):
194     """Base class for event-based DarwinLog tests."""
195     NO_DEBUG_INFO_TESTCASE = True
196
197     class EventListenerThread(threading.Thread):
198
199         def __init__(self, listener, process, trace_on, max_entry_count):
200             super(
201                 DarwinLogEventBasedTestBase.EventListenerThread,
202                 self).__init__()
203             self.process = process
204             self.listener = listener
205             self.trace_on = trace_on
206             self.max_entry_count = max_entry_count
207             self.exception = None
208             self.structured_data_event_count = 0
209             self.wait_seconds = 2
210             self.max_timeout_count = 4
211             self.log_entries = []
212
213         def handle_structured_data_event(self, event):
214             structured_data = SBProcess.GetStructuredDataFromEvent(event)
215             if not structured_data.IsValid():
216                 if self.trace_on:
217                     print("invalid structured data")
218                 return
219
220             # Track that we received a valid structured data event.
221             self.structured_data_event_count += 1
222
223             # Grab the individual log entries from the JSON.
224             stream = lldb.SBStream()
225             structured_data.GetAsJSON(stream)
226             dict = json.loads(stream.GetData())
227             self.log_entries.extend(dict["events"])
228             if self.trace_on:
229                 print("Structured data (raw):", stream.GetData())
230
231             # Print the pretty-printed version.
232             if self.trace_on:
233                 stream.Clear()
234                 structured_data.PrettyPrint(stream)
235                 print("Structured data (pretty print):",
236                       stream.GetData())
237
238         def done(self, timeout_count):
239             """Returns True when we're done listening for events."""
240             # See if we should consider the number of events retrieved.
241             if self.max_entry_count is not None:
242                 if len(self.log_entries) >= self.max_entry_count:
243                     # We've received the max threshold of events expected,
244                     # we can exit here.
245                     if self.trace_on:
246                         print("Event listener thread exiting due to max "
247                               "expected log entry count being reached.")
248                     return True
249
250             # If our event timeout count has exceeded our maximum timeout count,
251             # we're done.
252             if timeout_count >= self.max_timeout_count:
253                 if self.trace_on:
254                     print("Event listener thread exiting due to max number of "
255                           "WaitForEvent() timeouts being reached.")
256                 return True
257
258             # If our process is dead, we're done.
259             if not self.process.is_alive:
260                 if self.trace_on:
261                     print("Event listener thread exiting due to test inferior "
262                           "exiting.")
263                 return True
264
265             # We're not done.
266             return False
267
268         def run(self):
269             event = lldb.SBEvent()
270             try:
271                 timeout_count = 0
272
273                 # Wait up to 4 times for the event to arrive.
274                 while not self.done(timeout_count):
275                     if self.trace_on:
276                         print("Calling wait for event...")
277                     if self.listener.WaitForEvent(self.wait_seconds, event):
278                         while event.IsValid():
279                             # Check if it's a process event.
280                             if SBProcess.EventIsStructuredDataEvent(event):
281                                 self.handle_structured_data_event(event)
282                             else:
283                                 if self.trace_on:
284                                     print("ignoring unexpected event:",
285                                           lldbutil.get_description(event))
286                             # Grab the next event, if there is one.
287                             event.Clear()
288                             if not self.listener.GetNextEvent(event):
289                                 if self.trace_on:
290                                     print("listener has no more events "
291                                           "available at this time")
292                     else:
293                         if self.trace_on:
294                             print("timeout occurred waiting for event...")
295                         timeout_count += 1
296                 self.listener.Clear()
297             except Exception as e:
298                 self.exception = e
299
300     def setUp(self):
301         # Call super's setUp().
302         super(DarwinLogEventBasedTestBase, self).setUp()
303
304         # Until other systems support this, exit
305         # early if we're not macOS version 10.12
306         # or greater.
307         version = platform.mac_ver()[0].split('.')
308         if ((int(version[0]) == 10) and (int(version[1]) < 12) or
309                 (int(version[0]) < 10)):
310             self.skipTest("DarwinLog tests currently require macOS 10.12+")
311             return
312
313         # Source filename.
314         self.source = 'main.c'
315
316         # Output filename.
317         self.exe_name = 'a.out'
318         self.d = {'C_SOURCES': self.source, 'EXE': self.exe_name}
319
320         # Locate breakpoint.
321         self.line = lldbtest.line_number(self.source, '// break here')
322
323         # Enable debugserver logging of the darwin log collection
324         # mechanism.
325         self.runCmd("settings set target.process.extra-startup-command "
326                     "QSetLogging:bitmask=LOG_DARWIN_LOG;")
327
328     def darwin_log_available(self):
329         match = self.match("plugin structured-data darwin-log status",
330                            patterns=[r"Availability: ([\S]+)"])
331         return match is not None and (match.group(1) == "available")
332
333     def do_test(self, enable_options, settings_commands=None,
334                 run_enable_after_breakpoint=False, max_entry_count=None):
335         """Runs the test inferior, returning collected events.
336
337         This method runs the test inferior to the first breakpoint hit.
338         It then adds a listener for structured data events, and collects
339         all events from that point forward until end of execution of the
340         test inferior.  It then returns those events.
341
342         @return
343             A list of structured data events received, in the order they
344             were received.
345         """
346         self.build(dictionary=self.d)
347         self.setTearDownCleanup(dictionary=self.d)
348
349         exe = os.path.join(os.getcwd(), self.exe_name)
350
351         # Create a target by the debugger.
352         target = self.dbg.CreateTarget(exe)
353         self.assertTrue(target, lldbtest.VALID_TARGET)
354
355         # Run the darwin-log settings commands.
356         if settings_commands is not None:
357             for setting_command in settings_commands:
358                 self.runCmd(
359                     expand_darwinlog_settings_set_command(setting_command))
360
361         # Build the darwin-log enable command.
362         enable_cmd = expand_darwinlog_command("enable")
363         if enable_options is not None and len(enable_options) > 0:
364             enable_cmd += ' ' + ' '.join(enable_options)
365
366         # Run the darwin-log enable command now if we are not supposed
367         # to do it at the first breakpoint.  This tests the start-up
368         # code, which has the benefit of being able to set os_log-related
369         # environment variables.
370         if not run_enable_after_breakpoint:
371             self.runCmd(enable_cmd)
372
373         # Create the breakpoint.
374         breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
375         self.assertIsNotNone(breakpoint)
376         self.assertTrue(breakpoint.IsValid())
377         self.assertEqual(1, breakpoint.GetNumLocations(),
378                          "Should have found one breakpoint")
379
380         # Enable packet logging.
381         # self.runCmd("log enable -f /tmp/packets.log gdb-remote packets")
382         # self.runCmd("log enable lldb process")
383
384         # Launch the process - doesn't stop at entry.
385         process = target.LaunchSimple(None, None, os.getcwd())
386         self.assertIsNotNone(process, lldbtest.PROCESS_IS_VALID)
387
388         # Keep track of whether we're tracing output.
389         trace_on = self.TraceOn()
390
391         # Get the next thread that stops.
392         from lldbsuite.test.lldbutil import get_stopped_thread
393         thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
394
395         self.assertIsNotNone(thread, "There should be a thread stopped "
396                              "due to breakpoint")
397
398         # The process should be stopped at this point.
399         self.expect("process status", lldbtest.PROCESS_STOPPED,
400                     patterns=['Process .* stopped'])
401
402         # The stop reason of the thread should be breakpoint.
403         self.expect("thread list", lldbtest.STOPPED_DUE_TO_BREAKPOINT,
404                     substrs=['stopped', 'stop reason = breakpoint'])
405
406         # And our one and only breakpoint should have been hit.
407         self.assertEquals(breakpoint.GetHitCount(), 1)
408
409         # Check if DarwinLog is available.  This check cannot be done
410         # until after the process has started, as the feature availability
411         # comes through the stub.  The stub isn't running until
412         # the target process is running.  So this is really the earliest
413         # we can check.
414         if not self.darwin_log_available():
415             self.skipTest("DarwinLog not available")
416
417         # Now setup the structured data listener.
418         #
419         # Grab the broadcaster for the process.  We'll be attaching our
420         # listener to it.
421         broadcaster = process.GetBroadcaster()
422         self.assertIsNotNone(broadcaster)
423
424         listener = lldb.SBListener("SBStructuredData listener")
425         self.assertIsNotNone(listener)
426
427         rc = broadcaster.AddListener(
428             listener, lldb.SBProcess.eBroadcastBitStructuredData)
429         self.assertTrue(rc, "Successfully add listener to process broadcaster")
430
431         # Start the listening thread to retrieve the events.
432         # Bump up max entry count for the potentially included Add Mode:
433         # entry.
434         if max_entry_count is not None:
435             max_entry_count += 1
436         event_thread = self.EventListenerThread(listener, process, trace_on,
437                                                 max_entry_count)
438         event_thread.start()
439
440         # Continue the test inferior.  We should get any events after this.
441         process.Continue()
442
443         # Wait until the event thread terminates.
444         # print("main thread now waiting for event thread to receive events.")
445         event_thread.join()
446
447         # If the process is still alive, we kill it here.
448         if process.is_alive:
449             process.Kill()
450
451         # Fail on any exceptions that occurred during event execution.
452         if event_thread.exception is not None:
453             # Re-raise it here so it shows up as a test error.
454             raise event_thread
455
456         # Return the collected logging events.
457         return remove_add_mode_entry(event_thread.log_entries)