How to use the ddtrace.tracer.trace function in ddtrace

To help you get started, we’ve selected a few ddtrace examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github DataDog / trace-examples / python / sample_app.py View on Github external
def rpc_response(request, trace_id=None, parent_id=None):
    """ would live typically on another host"""
    with tracer.trace('rpc.server') as span:
        if trace_id and parent_id:
            span.trace_id = trace_id
            span.parent_id = parent_id

        return "returning result (%s)" % request
github DataDog / trace-examples / python / vertica / vertica_example.py View on Github external
def main():
    print('querying vertica')
    with tracer.trace('vertica_example', service="vertica-example"):
        query()

    with tracer.trace('vertica_example_error', service="vertica-example"):
        invalid_query()

    with tracer.trace('vertica_example_fetching', service="vertica-example"):
        fetching()
github DataDog / datadog-lambda-layer-python / datadog_lambda / tracing.py View on Github external
Create a dummy span, and overrides its trace_id and span_id, to make
    ddtrace.helpers.get_correlation_ids() return the correct ids for both
    auto and manual log correlations.

    TODO: Remove me when Datadog tracer is natively supported in Lambda.
    """
    if not is_lambda_context():
        logger.debug("set_correlation_ids is only supported in LambdaContext")
        return
    if dd_tracing_enabled:
        logger.debug("using ddtrace implementation for spans")
        return

    context = get_dd_trace_context()

    span = tracer.trace("dummy.span")
    span.trace_id = context[TraceHeader.TRACE_ID]
    span.span_id = context[TraceHeader.PARENT_ID]

    logger.debug("correlation ids set")
github DataDog / trace-examples / python / asyncio / http_async_server.py View on Github external
async def handle_request(reader, writer):
    # trace something
    with tracer.trace('async.handler', service='asyncio-web') as span:
        # do something
        await asyncio.sleep(0.02)
        # in the meantime do something else
        value = await get_redis_value()
        # do something that will be conclude in the future
        future = helpers.ensure_future(delayed_job(tracer.current_span()))

    # response
    start_response(writer)
    writer.write(b'OK\r\n')
    writer.close()
    await future
    print('200: request handled')
github DataDog / trace-examples / python / flask / flaskr.py View on Github external
import os
import logging

from sqlite3 import dbapi2 as sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
     render_template, flash

from ddtrace import tracer
from ddtrace.contrib.sqlite3 import connection_factory
from ddtrace.contrib.flask import TraceMiddleware

# start a dummy trace here to ensure we start tracing
# before we fork.
with tracer.trace("aaaa"):
    pass

tracer.debug_logging = True

logging.basicConfig(level=logging.DEBUG)


# create our little application :)
app = Flask(__name__)

# Load default config and override config from an environment variable
app.config.update(dict(
    DATABASE=os.path.join(app.root_path, 'flaskr.db'),
    DEBUG=True,
    SECRET_KEY='development key',
    USERNAME='admin',
github DataDog / dd-py-tracing-workshop / step03 / app.py View on Github external
def pair():
    """A complex endpoint that makes a request to another Python service"""
    name = request.args.get('name')

    with tracer.trace("beer.query", service="beer-database"):
        beer = Beer.query.filter_by(name=name).first()

    # force a query
    with tracer.trace("donuts.query", service="beer-database"):
        Donut.query.all()

    with tracer.trace("donuts.query") as span:
        span.set_tag('beer.name', name)
        match = best_match(beer)
    return jsonify(match=match)
github ziquanmiao / minikube_datadog / flask_app / app.py View on Github external
def return_results():
    datadog.statsd.increment('counter_metric',tags=['endpoint:query'])
    with tracer.trace("Random wait", service="my-flask-app") as span:
        my_thread= threading.currentThread().getName()
        time.sleep(0.01)
        logger.info('some postgres query has been made', 
            extra={
                'job_category': 'query', 
                'logger.name': 'my_json', 
                'logger.thread_name' : my_thread
            }
        )
        time.sleep(random.random()*1.5)
    with tracer.trace("database query", service="my-flask-app") as span:
        span.set_tag('sample','tag')
        conn = con.connect()
        s = select([web_origins])
        result = conn.execute(s)
        row = result.fetchone()
        conn.close()
        Pin.override(con, service='replica-db')
        return str(row) + '\n'
github DataDog / trace-examples / python / vertica / vertica_example.py View on Github external
def main():
    print('querying vertica')
    with tracer.trace('vertica_example', service="vertica-example"):
        query()

    with tracer.trace('vertica_example_error', service="vertica-example"):
        invalid_query()

    with tracer.trace('vertica_example_fetching', service="vertica-example"):
        fetching()
github DataDog / trace-examples / python / rq / app.py View on Github external
from rq import Queue

from jobs import jobs


redis_conn = Redis()
q = Queue(connection=redis_conn)

while True:
    job, rargs = choice(jobs)
    args = rargs()

    with tracer.trace('main', service='rq-app') as span:
        print(f'submitting job {job.__name__} with args {args}')
        job = q.enqueue(job, *args, job_id=job.__name__)
        with tracer.trace('meantime', service='rq-app'):
            sleep(0.25)
        # print(job.get_status())
        with tracer.trace('handle results', service='rq-app'):
            q.fetch_job(job.id)
        sleep(random())