Move DD code into its own directory (#6)

This commit is contained in:
Diego Hurtado
2020-04-08 11:39:44 -06:00
committed by GitHub
parent 72b40ba5f9
commit 5aee3ce32e
611 changed files with 0 additions and 0 deletions

View File

@ -0,0 +1,91 @@
import asyncio
import pytest
from ddtrace.context import Context
from ddtrace.internal.context_manager import CONTEXTVARS_IS_AVAILABLE
from ddtrace.contrib.asyncio import helpers
from .utils import AsyncioTestCase, mark_asyncio
@pytest.mark.skipif(
CONTEXTVARS_IS_AVAILABLE,
reason='only applicable to legacy asyncio integration'
)
class TestAsyncioHelpers(AsyncioTestCase):
"""
Ensure that helpers set the ``Context`` properly when creating
new ``Task`` or threads.
"""
@mark_asyncio
def test_set_call_context(self):
# a different Context is set for the current logical execution
task = asyncio.Task.current_task()
ctx = Context()
helpers.set_call_context(task, ctx)
assert ctx == self.tracer.get_call_context()
@mark_asyncio
def test_ensure_future(self):
# the wrapper should create a new Future that has the Context attached
@asyncio.coroutine
def future_work():
# the ctx is available in this task
ctx = self.tracer.get_call_context()
assert 1 == len(ctx._trace)
assert 'coroutine' == ctx._trace[0].name
return ctx._trace[0].name
self.tracer.trace('coroutine')
# schedule future work and wait for a result
delayed_task = helpers.ensure_future(future_work(), tracer=self.tracer)
result = yield from asyncio.wait_for(delayed_task, timeout=1)
assert 'coroutine' == result
@mark_asyncio
def test_run_in_executor_proxy(self):
# the wrapper should pass arguments and results properly
def future_work(number, name):
assert 42 == number
assert 'john' == name
return True
future = helpers.run_in_executor(self.loop, None, future_work, 42, 'john', tracer=self.tracer)
result = yield from future
assert result
@mark_asyncio
def test_run_in_executor_traces(self):
# the wrapper should create a different Context when the Thread
# is started; the new Context creates a new trace
def future_work():
# the Context is empty but the reference to the latest
# span is here to keep the parenting
ctx = self.tracer.get_call_context()
assert 0 == len(ctx._trace)
assert 'coroutine' == ctx._current_span.name
return True
span = self.tracer.trace('coroutine')
future = helpers.run_in_executor(self.loop, None, future_work, tracer=self.tracer)
# we close the Context
span.finish()
result = yield from future
assert result
@mark_asyncio
def test_create_task(self):
# the helper should create a new Task that has the Context attached
@asyncio.coroutine
def future_work():
# the ctx is available in this task
ctx = self.tracer.get_call_context()
assert 0 == len(ctx._trace)
child_span = self.tracer.trace('child_task')
return child_span
root_span = self.tracer.trace('main_task')
# schedule future work and wait for a result
task = helpers.create_task(future_work())
result = yield from task
assert root_span.trace_id == result.trace_id
assert root_span.span_id == result.parent_id

View File

@ -0,0 +1,392 @@
import asyncio
import pytest
import time
from ddtrace.context import Context
from ddtrace.internal.context_manager import CONTEXTVARS_IS_AVAILABLE
from ddtrace.provider import DefaultContextProvider
from ddtrace.contrib.asyncio.patch import patch, unpatch
from ddtrace.contrib.asyncio.helpers import set_call_context
from tests.opentracer.utils import init_tracer
from .utils import AsyncioTestCase, mark_asyncio
_orig_create_task = asyncio.BaseEventLoop.create_task
class TestAsyncioTracer(AsyncioTestCase):
"""Ensure that the tracer works with asynchronous executions within
the same ``IOLoop``.
"""
@mark_asyncio
@pytest.mark.skipif(
CONTEXTVARS_IS_AVAILABLE,
reason='only applicable to legacy asyncio provider'
)
def test_get_call_context(self):
# it should return the context attached to the current Task
# or create a new one
task = asyncio.Task.current_task()
ctx = getattr(task, '__datadog_context', None)
assert ctx is None
# get the context from the loop creates a new one that
# is attached to the Task object
ctx = self.tracer.get_call_context()
assert ctx == getattr(task, '__datadog_context', None)
@mark_asyncio
def test_get_call_context_twice(self):
# it should return the same Context if called twice
assert self.tracer.get_call_context() == self.tracer.get_call_context()
@mark_asyncio
def test_trace_coroutine(self):
# it should use the task context when invoked in a coroutine
with self.tracer.trace('coroutine') as span:
span.resource = 'base'
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
assert 1 == len(traces[0])
assert 'coroutine' == traces[0][0].name
assert 'base' == traces[0][0].resource
@mark_asyncio
def test_trace_multiple_coroutines(self):
# if multiple coroutines have nested tracing, they must belong
# to the same trace
@asyncio.coroutine
def coro():
# another traced coroutine
with self.tracer.trace('coroutine_2'):
return 42
with self.tracer.trace('coroutine_1'):
value = yield from coro()
# the coroutine has been called correctly
assert 42 == value
# a single trace has been properly reported
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
assert 'coroutine_1' == traces[0][0].name
assert 'coroutine_2' == traces[0][1].name
# the parenting is correct
assert traces[0][0] == traces[0][1]._parent
assert traces[0][0].trace_id == traces[0][1].trace_id
@mark_asyncio
def test_event_loop_exception(self):
# it should handle a loop exception
asyncio.set_event_loop(None)
ctx = self.tracer.get_call_context()
assert ctx is not None
def test_context_task_none(self):
# it should handle the case where a Task is not available
# Note: the @mark_asyncio is missing to simulate an execution
# without a Task
task = asyncio.Task.current_task()
# the task is not available
assert task is None
# but a new Context is still created making the operation safe
ctx = self.tracer.get_call_context()
assert ctx is not None
@mark_asyncio
def test_exception(self):
@asyncio.coroutine
def f1():
with self.tracer.trace('f1'):
raise Exception('f1 error')
with self.assertRaises(Exception):
yield from f1()
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
spans = traces[0]
assert 1 == len(spans)
span = spans[0]
assert 1 == span.error
assert 'f1 error' == span.get_tag('error.msg')
assert 'Exception: f1 error' in span.get_tag('error.stack')
@mark_asyncio
def test_nested_exceptions(self):
@asyncio.coroutine
def f1():
with self.tracer.trace('f1'):
raise Exception('f1 error')
@asyncio.coroutine
def f2():
with self.tracer.trace('f2'):
yield from f1()
with self.assertRaises(Exception):
yield from f2()
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
spans = traces[0]
assert 2 == len(spans)
span = spans[0]
assert 'f2' == span.name
assert 1 == span.error # f2 did not catch the exception
assert 'f1 error' == span.get_tag('error.msg')
assert 'Exception: f1 error' in span.get_tag('error.stack')
span = spans[1]
assert 'f1' == span.name
assert 1 == span.error
assert 'f1 error' == span.get_tag('error.msg')
assert 'Exception: f1 error' in span.get_tag('error.stack')
@mark_asyncio
def test_handled_nested_exceptions(self):
@asyncio.coroutine
def f1():
with self.tracer.trace('f1'):
raise Exception('f1 error')
@asyncio.coroutine
def f2():
with self.tracer.trace('f2'):
try:
yield from f1()
except Exception:
pass
yield from f2()
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
spans = traces[0]
assert 2 == len(spans)
span = spans[0]
assert 'f2' == span.name
assert 0 == span.error # f2 caught the exception
span = spans[1]
assert 'f1' == span.name
assert 1 == span.error
assert 'f1 error' == span.get_tag('error.msg')
assert 'Exception: f1 error' in span.get_tag('error.stack')
@mark_asyncio
def test_trace_multiple_calls(self):
# create multiple futures so that we expect multiple
# traces instead of a single one (helper not used)
@asyncio.coroutine
def coro():
# another traced coroutine
with self.tracer.trace('coroutine'):
yield from asyncio.sleep(0.01)
futures = [asyncio.ensure_future(coro()) for x in range(10)]
for future in futures:
yield from future
traces = self.tracer.writer.pop_traces()
assert 10 == len(traces)
assert 1 == len(traces[0])
assert 'coroutine' == traces[0][0].name
@mark_asyncio
def test_wrapped_coroutine(self):
@self.tracer.wrap('f1')
@asyncio.coroutine
def f1():
yield from asyncio.sleep(0.25)
yield from f1()
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
spans = traces[0]
assert 1 == len(spans)
span = spans[0]
assert span.duration > 0.25, 'span.duration={}'.format(span.duration)
class TestAsyncioPropagation(AsyncioTestCase):
"""Ensure that asyncio context propagation works between different tasks"""
def setUp(self):
# patch asyncio event loop
super(TestAsyncioPropagation, self).setUp()
patch()
def tearDown(self):
# unpatch asyncio event loop
super(TestAsyncioPropagation, self).tearDown()
unpatch()
@mark_asyncio
def test_tasks_chaining(self):
# ensures that the context is propagated between different tasks
@self.tracer.wrap('spawn_task')
@asyncio.coroutine
def coro_2():
yield from asyncio.sleep(0.01)
@self.tracer.wrap('main_task')
@asyncio.coroutine
def coro_1():
yield from asyncio.ensure_future(coro_2())
yield from coro_1()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 2
assert len(traces[0]) == 1
assert len(traces[1]) == 1
spawn_task = traces[0][0]
main_task = traces[1][0]
# check if the context has been correctly propagated
assert spawn_task.trace_id == main_task.trace_id
assert spawn_task.parent_id == main_task.span_id
@mark_asyncio
def test_concurrent_chaining(self):
# ensures that the context is correctly propagated when
# concurrent tasks are created from a common tracing block
@self.tracer.wrap('f1')
@asyncio.coroutine
def f1():
yield from asyncio.sleep(0.01)
@self.tracer.wrap('f2')
@asyncio.coroutine
def f2():
yield from asyncio.sleep(0.01)
with self.tracer.trace('main_task'):
yield from asyncio.gather(f1(), f2())
# do additional synchronous work to confirm main context is
# correctly handled
with self.tracer.trace('main_task_child'):
time.sleep(0.01)
traces = self.tracer.writer.pop_traces()
assert len(traces) == 3
assert len(traces[0]) == 1
assert len(traces[1]) == 1
assert len(traces[2]) == 2
child_1 = traces[0][0]
child_2 = traces[1][0]
main_task = traces[2][0]
main_task_child = traces[2][1]
# check if the context has been correctly propagated
assert child_1.trace_id == main_task.trace_id
assert child_1.parent_id == main_task.span_id
assert child_2.trace_id == main_task.trace_id
assert child_2.parent_id == main_task.span_id
assert main_task_child.trace_id == main_task.trace_id
assert main_task_child.parent_id == main_task.span_id
@pytest.mark.skipif(
CONTEXTVARS_IS_AVAILABLE,
reason='only applicable to legacy asyncio provider'
)
@mark_asyncio
def test_propagation_with_set_call_context(self):
# ensures that if a new Context is attached to the current
# running Task via helpers, a previous trace is resumed
task = asyncio.Task.current_task()
ctx = Context(trace_id=100, span_id=101)
set_call_context(task, ctx)
with self.tracer.trace('async_task'):
yield from asyncio.sleep(0.01)
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert span.trace_id == 100
assert span.parent_id == 101
@mark_asyncio
def test_propagation_with_new_context(self):
# ensures that if a new Context is activated, a trace
# with the Context arguments is created
ctx = Context(trace_id=100, span_id=101)
self.tracer.context_provider.activate(ctx)
with self.tracer.trace('async_task'):
yield from asyncio.sleep(0.01)
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert span.trace_id == 100
assert span.parent_id == 101
@mark_asyncio
def test_event_loop_unpatch(self):
# ensures that the event loop can be unpatched
unpatch()
assert isinstance(self.tracer._context_provider, DefaultContextProvider)
assert asyncio.BaseEventLoop.create_task == _orig_create_task
def test_event_loop_double_patch(self):
# ensures that double patching will not double instrument
# the event loop
patch()
self.test_tasks_chaining()
@mark_asyncio
def test_trace_multiple_coroutines_ot_outer(self):
"""OpenTracing version of test_trace_multiple_coroutines."""
# if multiple coroutines have nested tracing, they must belong
# to the same trace
@asyncio.coroutine
def coro():
# another traced coroutine
with self.tracer.trace('coroutine_2'):
return 42
ot_tracer = init_tracer('asyncio_svc', self.tracer)
with ot_tracer.start_active_span('coroutine_1'):
value = yield from coro()
# the coroutine has been called correctly
assert 42 == value
# a single trace has been properly reported
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
assert 'coroutine_1' == traces[0][0].name
assert 'coroutine_2' == traces[0][1].name
# the parenting is correct
assert traces[0][0] == traces[0][1]._parent
assert traces[0][0].trace_id == traces[0][1].trace_id
@mark_asyncio
def test_trace_multiple_coroutines_ot_inner(self):
"""OpenTracing version of test_trace_multiple_coroutines."""
# if multiple coroutines have nested tracing, they must belong
# to the same trace
ot_tracer = init_tracer('asyncio_svc', self.tracer)
@asyncio.coroutine
def coro():
# another traced coroutine
with ot_tracer.start_active_span('coroutine_2'):
return 42
with self.tracer.trace('coroutine_1'):
value = yield from coro()
# the coroutine has been called correctly
assert 42 == value
# a single trace has been properly reported
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
assert 'coroutine_1' == traces[0][0].name
assert 'coroutine_2' == traces[0][1].name
# the parenting is correct
assert traces[0][0] == traces[0][1]._parent
assert traces[0][0].trace_id == traces[0][1].trace_id

View File

@ -0,0 +1,57 @@
import asyncio
from ddtrace.provider import DefaultContextProvider
from .utils import AsyncioTestCase, mark_asyncio
class TestAsyncioSafety(AsyncioTestCase):
"""
Ensure that if the ``AsyncioTracer`` is not properly configured,
bad traces are produced but the ``Context`` object will not
leak memory.
"""
def setUp(self):
# Asyncio TestCase with the wrong context provider
super(TestAsyncioSafety, self).setUp()
self.tracer.configure(context_provider=DefaultContextProvider())
@mark_asyncio
def test_get_call_context(self):
# it should return a context even if not attached to the Task
ctx = self.tracer.get_call_context()
assert ctx is not None
# test that it behaves the wrong way
task = asyncio.Task.current_task()
task_ctx = getattr(task, '__datadog_context', None)
assert task_ctx is None
@mark_asyncio
def test_trace_coroutine(self):
# it should use the task context when invoked in a coroutine
with self.tracer.trace('coroutine') as span:
span.resource = 'base'
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
assert 1 == len(traces[0])
assert 'coroutine' == traces[0][0].name
assert 'base' == traces[0][0].resource
@mark_asyncio
def test_trace_multiple_calls(self):
@asyncio.coroutine
def coro():
# another traced coroutine
with self.tracer.trace('coroutine'):
yield from asyncio.sleep(0.01)
ctx = self.tracer.get_call_context()
futures = [asyncio.ensure_future(coro()) for x in range(1000)]
for future in futures:
yield from future
# the trace is wrong but the Context is finished
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
assert 1000 == len(traces[0])
assert 0 == len(ctx._trace)

View File

@ -0,0 +1,48 @@
import asyncio
from functools import wraps
from ddtrace.contrib.asyncio import context_provider
from ...base import BaseTracerTestCase
class AsyncioTestCase(BaseTracerTestCase):
"""
Base TestCase for asyncio framework that setup a new loop
for each test, preserving the original (not started) main
loop.
"""
def setUp(self):
super(AsyncioTestCase, self).setUp()
self.tracer.configure(context_provider=context_provider)
# each test must have its own event loop
self._main_loop = asyncio.get_event_loop()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
super(AsyncioTestCase, self).tearDown()
# restore the main loop
asyncio.set_event_loop(self._main_loop)
self.loop = None
self._main_loop = None
def mark_asyncio(f):
"""
Test decorator that wraps a function so that it can be executed
as an asynchronous coroutine. This uses the event loop set in the
``TestCase`` class, and runs the loop until it's completed.
"""
@wraps(f)
def wrapper(*args, **kwargs):
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
loop = asyncio.get_event_loop()
loop.run_until_complete(future)
loop.close()
return wrapper