Skip to content

feat(proguard): Remap exception names in proguard #15795

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Nov 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions src/sentry/lang/java/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,24 @@ def preprocess_step(self, processing_task):

return True

def process_exception(self, exception):
ty = exception.get("type")
mod = exception.get("module")
if not ty or not mod:
return False

key = "%s.%s" % (mod, ty)

for view in self.mapping_views:
original = view.lookup(key)
if original != key:
new_module, new_cls = original.rsplit(".", 1)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will this work with generics that contain class paths?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No idea. At least not different to how we already do with methods

exception["module"] = new_module
exception["type"] = new_cls
return True

return False

def process_frame(self, processable_frame, processing_task):
new_module = None
new_function = None
Expand Down
43 changes: 34 additions & 9 deletions src/sentry/stacktraces/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@

logger = logging.getLogger(__name__)

StacktraceInfo = namedtuple("StacktraceInfo", ["stacktrace", "container", "platforms"])
StacktraceInfo = namedtuple(
"StacktraceInfo", ["stacktrace", "container", "platforms", "is_exception"]
)
StacktraceInfo.__hash__ = lambda x: id(x)
StacktraceInfo.__eq__ = lambda a, b: a is b
StacktraceInfo.__ne__ = lambda a, b: a is not b
Expand Down Expand Up @@ -145,6 +147,10 @@ def preprocess_frame(self, processable_frame):
"""
pass

def process_exception(self, exception):
"""Processes an exception."""
return False

def process_frame(self, processable_frame, processing_task):
"""Processes the processable frame and returns a tuple of three
lists: ``(frames, raw_frames, errors)`` where frames is the list of
Expand All @@ -162,26 +168,36 @@ def preprocess_step(self, processing_task):
return False


def find_stacktraces_in_data(data, include_raw=False):
def find_stacktraces_in_data(data, include_raw=False, with_exceptions=False):
"""Finds all stracktraces in a given data blob and returns it
together with some meta information.

If `include_raw` is True, then also raw stacktraces are included.
If `include_raw` is True, then also raw stacktraces are included. If
`with_exceptions` is set to `True` then stacktraces of the exception
are always included and the `is_exception` flag is set on that stack
info object.
"""
rv = []

def _report_stack(stacktrace, container):
if not stacktrace or not get_path(stacktrace, "frames", filter=True):
def _report_stack(stacktrace, container, is_exception=False):
if not is_exception and (not stacktrace or not get_path(stacktrace, "frames", filter=True)):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This change is indeed confusing as we're now reporting non-existing stack traces just to get to the exception container. I also don't have a better idea, since I suppose you need the stack frame's platform to decide whether you want to touch the container exception.

To be safe, you could default frames to an empty list, but since you only selectively turn this on, I could not find an invalidated invariant. At least the native plugin relies on empty stacktraces being removed, but there you don't toggle, so it's fine.

As an alternative, could we add a find_exceptions_in_data and call it in should_process_for_stacktraces as well as process_stacktraces before processing the stack traces?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would like to change the entire thing to return stacks + related data and treat missing stacks as empty stacks.

return

platforms = set(
frame.get("platform") or data.get("platform")
for frame in get_path(stacktrace, "frames", filter=True, default=())
)
rv.append(StacktraceInfo(stacktrace=stacktrace, container=container, platforms=platforms))
rv.append(
StacktraceInfo(
stacktrace=stacktrace,
container=container,
platforms=platforms,
is_exception=is_exception,
)
)

for exc in get_path(data, "exception", "values", filter=True, default=()):
_report_stack(exc.get("stacktrace"), exc)
_report_stack(exc.get("stacktrace"), exc, is_exception=with_exceptions)

_report_stack(data.get("stacktrace"), None)

Expand Down Expand Up @@ -278,7 +294,7 @@ def normalize_stacktraces_for_grouping(data, grouping_config=None):
def should_process_for_stacktraces(data):
from sentry.plugins.base import plugins

infos = find_stacktraces_in_data(data)
infos = find_stacktraces_in_data(data, with_exceptions=True)
platforms = set()
for info in infos:
platforms.update(info.platforms or ())
Expand Down Expand Up @@ -462,7 +478,7 @@ def dedup_errors(errors):


def process_stacktraces(data, make_processors=None, set_raw_stacktrace=True):
infos = find_stacktraces_in_data(data)
infos = find_stacktraces_in_data(data, with_exceptions=True)
if make_processors is None:
processors = get_processors_for_stacktraces(data, infos)
else:
Expand All @@ -486,6 +502,15 @@ def process_stacktraces(data, make_processors=None, set_raw_stacktrace=True):

# Process all stacktraces
for stacktrace_info, processable_frames in processing_task.iter_processable_stacktraces():
# Let the stacktrace processors touch the exception
if stacktrace_info.is_exception and stacktrace_info.container:
for processor in processing_task.iter_processors():
if processor.process_exception(stacktrace_info.container):
changed = True

# If the stacktrace is empty we skip it for processing
if not stacktrace_info.stacktrace:
continue
new_frames, new_raw_frames, errors = process_single_stacktrace(
processing_task, stacktrace_info, processable_frames
)
Expand Down
8 changes: 6 additions & 2 deletions tests/sentry/lang/java/test_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ def test_basic_resolving(self):
},
]
},
"type": "RuntimeException",
"module": "org.a.b",
"type": "g$a",
"value": "Shit broke yo",
}
]
Expand All @@ -105,9 +106,12 @@ def test_basic_resolving(self):
event_id = json.loads(resp.content)["id"]

event = eventstore.get_event_by_id(self.project.id, event_id)
bt = event.interfaces["exception"].values[0].stacktrace
exc = event.interfaces["exception"].values[0]
bt = exc.stacktrace
frames = bt.frames

assert exc.type == "Util$ClassContextSecurityManager"
assert exc.module == "org.slf4j.helpers"
assert frames[0].function == "getClassContext"
assert frames[0].module == "org.slf4j.helpers.Util$ClassContextSecurityManager"
assert frames[1].function == "getExtraClassContext"
Expand Down
7 changes: 7 additions & 0 deletions tests/sentry/test_stacktraces.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,13 @@ def test_find_stacktraces_skip_none(self):
},
}

infos = find_stacktraces_in_data(data, with_exceptions=True)
assert len(infos) == 4
assert sum(1 for x in infos if x.stacktrace) == 3
assert sum(1 for x in infos if x.is_exception) == 4
# XXX: The null frame is still part of this stack trace!
assert len(infos[3].stacktrace["frames"]) == 3

infos = find_stacktraces_in_data(data)
assert len(infos) == 1
# XXX: The null frame is still part of this stack trace!
Expand Down