Files
alrex ed6dd7f257 Removing support for python 2.7 (#2)
OpenTelemetry doesn't support Python 2.7. Removing it from the tests and removing contrib packages that don't support Python 3. In this change:

- removing pylons contrib
- removing mysqldb contrib
- updating minimum versions of flask (>=1.0), gevent (>=1.1)

Signed-off-by: Alex Boten <aboten@lightstep.com>
2020-03-23 09:43:03 -07:00

1107 lines
27 KiB
YAML

version: 2
# Common configuration blocks as YAML anchors
# See: https://circleci.com/blog/circleci-hacks-reuse-yaml-in-your-circleci-config-with-yaml/
httpbin_local: &httpbin_local
image: kennethreitz/httpbin@sha256:2c7abc4803080c22928265744410173b6fea3b898872c01c5fd0f0f9df4a59fb
name: httpbin.org
test_runner: &test_runner
image: datadog/docker-library:ddtrace_py
restore_cache_step: &restore_cache_step
restore_cache:
keys:
# In the cache key:
# - .Environment.CIRCLE_JOB: We do separate tox environments by job name, so caching and restoring is
# much faster.
- tox-cache-{{ .Environment.CIRCLE_JOB }}-{{ checksum "tox.ini" }}
resource_class: &resource_class small
save_cache_step: &save_cache_step
save_cache:
key: tox-cache-{{ .Environment.CIRCLE_JOB }}-{{ checksum "tox.ini" }}
paths:
- .tox
deploy_docs_filters: &deploy_docs_filters
filters:
tags:
only: /(^docs$)|(^v[0-9]+(\.[0-9]+)*$)/
branches:
ignore: /.*/
persist_to_workspace_step: &persist_to_workspace_step
persist_to_workspace:
root: /tmp
paths:
- "*.results"
jobs:
black:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e 'black' --result-json /tmp/black.results
- *persist_to_workspace_step
- *save_cache_step
flake8:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e 'flake8' --result-json /tmp/flake8.results
- *persist_to_workspace_step
- *save_cache_step
# Test that we can build the package properly and package long description will render
test_build:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
# Create and activate a Python3.7 virtualenv
- run: virtualenv --python python3.7 .venv/build
# Install required dependencies
# DEV: `pyopenssl` needed until the following PR is released
# https://github.com/pypa/twine/pull/447
# DEV: `wheel` is needed to run `bdist_wheel`
- run: .venv/build/bin/pip install twine readme_renderer[md] pyopenssl wheel
# Ensure we didn't cache from previous runs
- run: rm -rf build/ dist/
# Manually build any extensions to ensure they succeed
# DEV: `DDTRACE_BUILD_RAISE=TRUE` will ensure we don't swallow any build errors
- run: DDTRACE_BUILD_RAISE=TRUE .venv/build/bin/python setup.py build_ext --force
# Ensure source package will build
- run: .venv/build/bin/python setup.py sdist
# Ensure wheel will build
# DEV: `DDTRACE_BUILD_RAISE=TRUE` will ensure we don't swallow any build errors
- run: DDTRACE_BUILD_RAISE=TRUE .venv/build/bin/python setup.py bdist_wheel
# Ensure package long description is valid and will render
# https://github.com/pypa/twine/tree/6c4d5ecf2596c72b89b969ccc37b82c160645df8#twine-check
- run: .venv/build/bin/twine check dist/*
tracer:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^py..-tracer$'
- *persist_to_workspace_step
- *save_cache_step
internal:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^py..-internal'
- *persist_to_workspace_step
- *save_cache_step
opentracer:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^py..-opentracer'
- *persist_to_workspace_step
- *save_cache_step
integration:
docker:
- <<: *test_runner
env:
TEST_DATADOG_INTEGRATION: 1
- image: datadog/docker-dd-agent
env:
- DD_APM_ENABLED=true
- DD_BIND_HOST=0.0.0.0
- DD_API_KEY=invalid_key_but_this_is_fine
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^py..-integration$'
- *persist_to_workspace_step
- *save_cache_step
futures:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^futures_contrib-'
- *persist_to_workspace_step
- *save_cache_step
boto:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^boto\(core\)\?_contrib-'
- *persist_to_workspace_step
- *save_cache_step
ddtracerun:
docker:
- *test_runner
- image: redis:4.0-alpine
resource_class: *resource_class
steps:
- checkout
- run: scripts/run-tox-scenario '^py..-ddtracerun$'
- *persist_to_workspace_step
test_utils:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^py..-test_utils$'
- *persist_to_workspace_step
- *save_cache_step
test_logging:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^py..-test_logging$'
- *persist_to_workspace_step
- *save_cache_step
asyncio:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^asyncio_contrib-'
- *persist_to_workspace_step
- *save_cache_step
aiohttp:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^aiohttp_contrib-'
- *persist_to_workspace_step
- *save_cache_step
tornado:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^tornado_contrib-'
- *persist_to_workspace_step
- *save_cache_step
bottle:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^bottle_contrib\(_autopatch\)\?-'
- *persist_to_workspace_step
- *save_cache_step
cassandra:
docker:
- <<: *test_runner
env:
CASS_DRIVER_NO_EXTENSIONS: 1
- image: spotify/cassandra:latest
env:
- MAX_HEAP_SIZE=512M
- HEAP_NEWSIZE=256M
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e wait cassandra
- run: scripts/run-tox-scenario '^cassandra_contrib-'
- *persist_to_workspace_step
- *save_cache_step
celery:
docker:
- *test_runner
- image: redis:4.0-alpine
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^celery_contrib-'
- *persist_to_workspace_step
- *save_cache_step
consul:
docker:
- *test_runner
- image: consul:1.6.0
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^consul_contrib-'
- *persist_to_workspace_step
- *save_cache_step
dogpile_cache:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^dogpile_contrib-'
- *persist_to_workspace_step
- *save_cache_step
elasticsearch:
docker:
- *test_runner
- image: elasticsearch:2.3
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^elasticsearch_contrib-'
- *persist_to_workspace_step
- *save_cache_step
falcon:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e 'falcon_contrib{,_autopatch}-{py34,py35,py36}-falcon{10,11,12,13,14}' --result-json /tmp/falcon.results
- *persist_to_workspace_step
- *save_cache_step
django:
docker:
- *test_runner
- image: redis:4.0-alpine
- image: memcached:1.5-alpine
- image: datadog/docker-dd-agent
env:
- DD_APM_ENABLED=true
- DD_BIND_HOST=0.0.0.0
- DD_API_KEY=invalid_key_but_this_is_fine
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^django_'
- *persist_to_workspace_step
- *save_cache_step
flask:
docker:
- *test_runner
- image: redis:4.0-alpine
- image: memcached:1.5-alpine
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^flask_\(cache_\)\?contrib\(_autopatch\)\?-'
- *persist_to_workspace_step
- *save_cache_step
gevent:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^gevent_contrib-'
- *persist_to_workspace_step
- *save_cache_step
httplib:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^httplib_contrib-'
- *persist_to_workspace_step
- *save_cache_step
grpc:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^grpc_contrib-'
- *persist_to_workspace_step
- *save_cache_step
molten:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^molten_contrib-'
- *persist_to_workspace_step
- *save_cache_step
mysqlconnector:
docker:
- *test_runner
- image: mysql:5.7
env:
- MYSQL_ROOT_PASSWORD=admin
- MYSQL_PASSWORD=test
- MYSQL_USER=test
- MYSQL_DATABASE=test
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e 'wait' mysql
- run: scripts/run-tox-scenario '^mysql_contrib-.*-mysqlconnector'
- *persist_to_workspace_step
- *save_cache_step
mysqlpython:
docker:
- *test_runner
- image: mysql:5.7
env:
- MYSQL_ROOT_PASSWORD=admin
- MYSQL_PASSWORD=test
- MYSQL_USER=test
- MYSQL_DATABASE=test
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e 'wait' mysql
- run: scripts/run-tox-scenario '^mysqldb_contrib-.*-mysqlclient'
- *persist_to_workspace_step
- *save_cache_step
mysqldb:
docker:
- *test_runner
- image: mysql:5.7
env:
- MYSQL_ROOT_PASSWORD=admin
- MYSQL_PASSWORD=test
- MYSQL_USER=test
- MYSQL_DATABASE=test
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e 'wait' mysql
- run: scripts/run-tox-scenario '^mysqldb_contrib-.*-mysqldb'
- *persist_to_workspace_step
- *save_cache_step
pymysql:
docker:
- *test_runner
- image: mysql:5.7
env:
- MYSQL_ROOT_PASSWORD=admin
- MYSQL_PASSWORD=test
- MYSQL_USER=test
- MYSQL_DATABASE=test
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e 'wait' mysql
- run: scripts/run-tox-scenario '^pymysql_contrib-'
- *persist_to_workspace_step
- *save_cache_step
pylibmc:
docker:
- *test_runner
- image: memcached:1.5-alpine
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^pylibmc_contrib-'
- *persist_to_workspace_step
- *save_cache_step
pymemcache:
docker:
- *test_runner
- image: memcached:1.5-alpine
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^pymemcache_contrib\(_autopatch\)\?-'
- *persist_to_workspace_step
- *save_cache_step
mongoengine:
docker:
- *test_runner
- image: mongo:3.6
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^mongoengine_contrib-'
- *persist_to_workspace_step
- *save_cache_step
pymongo:
docker:
- *test_runner
- image: mongo:3.6
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^pymongo_contrib-'
- *persist_to_workspace_step
- *save_cache_step
pyramid:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^pyramid_contrib\(_autopatch\)\?-'
- *persist_to_workspace_step
- *save_cache_step
requests:
docker:
- *test_runner
- *httpbin_local
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^requests_contrib\(_autopatch\)\?-'
- *persist_to_workspace_step
- *save_cache_step
requestsgevent:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^requests_gevent_contrib-'
- *persist_to_workspace_step
- *save_cache_step
sqlalchemy:
docker:
- *test_runner
- image: postgres:10.5-alpine
env:
- POSTGRES_PASSWORD=postgres
- POSTGRES_USER=postgres
- POSTGRES_DB=postgres
- image: mysql:5.7
env:
- MYSQL_ROOT_PASSWORD=admin
- MYSQL_PASSWORD=test
- MYSQL_USER=test
- MYSQL_DATABASE=test
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e 'wait' postgres mysql
- run: scripts/run-tox-scenario '^sqlalchemy_contrib-'
- *persist_to_workspace_step
- *save_cache_step
dbapi:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^dbapi_contrib-'
- *persist_to_workspace_step
- *save_cache_step
psycopg:
docker:
- *test_runner
- image: postgres:10.5-alpine
env:
- POSTGRES_PASSWORD=postgres
- POSTGRES_USER=postgres
- POSTGRES_DB=postgres
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e 'wait' postgres
- run: scripts/run-tox-scenario '^psycopg_contrib-'
- *persist_to_workspace_step
- *save_cache_step
aiobotocore:
docker:
- *test_runner
- image: palazzem/moto:1.0.1
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^aiobotocore_contrib-'
- *persist_to_workspace_step
- *save_cache_step
aiopg:
docker:
- *test_runner
- image: postgres:10.5-alpine
env:
- POSTGRES_PASSWORD=postgres
- POSTGRES_USER=postgres
- POSTGRES_DB=postgres
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e 'wait' postgres
- run: scripts/run-tox-scenario '^aiopg_contrib-'
- *persist_to_workspace_step
- *save_cache_step
redis:
docker:
- *test_runner
- image: redis:4.0-alpine
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^redis_contrib-'
- *persist_to_workspace_step
- *save_cache_step
rediscluster:
docker:
- *test_runner
- image: grokzen/redis-cluster:4.0.9
env:
- IP=0.0.0.0
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e wait rediscluster
- run: scripts/run-tox-scenario '^rediscluster_contrib-'
- *persist_to_workspace_step
- *save_cache_step
vertica:
docker:
- *test_runner
- image: sumitchawla/vertica
env:
- VP_TEST_USER=dbadmin
- VP_TEST_PASSWORD=abc123
- VP_TEST_DATABASE=docker
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e wait vertica
- run: scripts/run-tox-scenario '^vertica_contrib-'
- *persist_to_workspace_step
- *save_cache_step
kombu:
docker:
- *test_runner
- image: rabbitmq:3.7-alpine
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: tox -e wait rabbitmq
- run: scripts/run-tox-scenario '^kombu_contrib-'
- *persist_to_workspace_step
- *save_cache_step
sqlite3:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^sqlite3_contrib-'
- *persist_to_workspace_step
- *save_cache_step
unit_tests:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^unit_tests-'
- *persist_to_workspace_step
- *save_cache_step
benchmarks:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run:
command: |
mkdir -p /tmp/test-reports
tox -e 'benchmarks-{py34,py35,py36,py37}' --result-json /tmp/benchmarks.results -- --benchmark-storage=file:///tmp/test-reports/ --benchmark-autosave
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-reports
- *persist_to_workspace_step
- *save_cache_step
deploy_dev:
# build the master branch releasing development docs and wheels
docker:
- image: circleci/python:3.6
resource_class: *resource_class
steps:
- checkout
- run: sudo apt-get -y install rake
- run: sudo pip install mkwheelhouse sphinx awscli
- run: S3_DIR=trace-dev rake release:docs
- run: S3_DIR=trace-dev rake release:wheel
jinja2:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^jinja2_contrib-'
- *persist_to_workspace_step
- *save_cache_step
mako:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^mako_contrib-'
- *persist_to_workspace_step
- *save_cache_step
algoliasearch:
docker:
- *test_runner
resource_class: *resource_class
steps:
- checkout
- *restore_cache_step
- run: scripts/run-tox-scenario '^algoliasearch_contrib-'
- *persist_to_workspace_step
- *save_cache_step
build_docs:
# deploy official documentation
docker:
- image: circleci/python:3.6
resource_class: *resource_class
steps:
- checkout
- run: sudo apt-get -y install rake
# Sphinx 1.7.5 is required otherwise docs are not properly built
- run: sudo pip install mkwheelhouse sphinx==1.7.5 wrapt
- run: rake docs
- run:
command: |
mkdir -p /tmp/docs
cp -r docs/_build/html/* /tmp/docs
- store_artifacts:
path: /tmp/docs
deploy_to_s3:
# deploy official documentation
docker:
- image: circleci/python:3.6
resource_class: *resource_class
steps:
- checkout
- run: sudo apt-get -y install rake
# Sphinx 1.7.5 is required otherwise docs are not properly built
- run: sudo pip install mkwheelhouse sphinx==1.7.5 awscli wrapt
- run: S3_DIR=trace rake release:docs
wait_all_tests:
# this step ensures all `tox` environments are properly executed
docker:
- *test_runner
resource_class: *resource_class
steps:
- attach_workspace:
at: /tmp/workspace
- checkout
- run: ls /tmp/workspace/*
# debug: shows how many time each test was executed
- run: jq -s ".[]|.testenvs|keys|.[]" /tmp/workspace/* | grep -v GLOB | sed 's/"//g' | sort | uniq -c | sort -rn
# list all executed test
- run: jq -s ".[]|.testenvs|keys|.[]" /tmp/workspace/* | grep -v GLOB | sed 's/"//g' | sort | uniq | tee all_executed_tests
# list all tests in tox.ini
- run: tox -l | grep -v "^wait$" | sort > all_tests
# checks that all tests were executed
- run: diff all_tests all_executed_tests
workflows:
version: 2
deploy_docs:
jobs:
- build_docs:
<<: *deploy_docs_filters
- approve_docs_deployment:
<<: *deploy_docs_filters
type: approval
requires:
- build_docs
- deploy_to_s3:
<<: *deploy_docs_filters
requires:
- approve_docs_deployment
test:
jobs:
- build_docs
- black
- flake8
- test_build
- aiobotocore:
requires:
- flake8
- black
- aiohttp:
requires:
- flake8
- black
- aiopg:
requires:
- flake8
- black
- asyncio:
requires:
- flake8
- black
- algoliasearch:
requires:
- flake8
- black
- benchmarks:
requires:
- flake8
- black
- boto:
requires:
- flake8
- black
- bottle:
requires:
- flake8
- black
- cassandra:
requires:
- flake8
- black
- celery:
requires:
- flake8
- black
- consul:
requires:
- flake8
- black
- dbapi:
requires:
- flake8
- black
- ddtracerun:
requires:
- flake8
- black
- django:
requires:
- flake8
- black
- dogpile_cache:
requires:
- flake8
- black
- elasticsearch:
requires:
- flake8
- black
- falcon:
requires:
- flake8
- black
- flask:
requires:
- flake8
- black
- futures:
requires:
- flake8
- black
- gevent:
requires:
- flake8
- black
- grpc:
requires:
- flake8
- black
- httplib:
requires:
- flake8
- black
- integration:
requires:
- flake8
- black
- internal:
requires:
- flake8
- black
- jinja2:
requires:
- flake8
- black
- kombu:
requires:
- flake8
- black
- mako:
requires:
- flake8
- black
- molten:
requires:
- flake8
- black
- mongoengine:
requires:
- flake8
- black
- mysqlconnector:
requires:
- flake8
- black
- mysqldb:
requires:
- flake8
- black
- mysqlpython:
requires:
- flake8
- black
- opentracer:
requires:
- flake8
- black
- psycopg:
requires:
- flake8
- black
- pylibmc:
requires:
- flake8
- black
- pymemcache:
requires:
- flake8
- black
- pymongo:
requires:
- flake8
- black
- pymysql:
requires:
- flake8
- black
- pyramid:
requires:
- flake8
- black
- redis:
requires:
- flake8
- black
- rediscluster:
requires:
- flake8
- black
- requests:
requires:
- flake8
- black
- requestsgevent:
requires:
- flake8
- black
- sqlalchemy:
requires:
- flake8
- black
- sqlite3:
requires:
- flake8
- black
- test_utils:
requires:
- flake8
- black
- test_logging:
requires:
- flake8
- black
- tornado:
requires:
- flake8
- black
- tracer:
requires:
- flake8
- black
- unit_tests:
requires:
- flake8
- black
- vertica:
requires:
- flake8
- black
- wait_all_tests:
requires:
# Initial jobs
- build_docs
- black
- flake8
- test_build
# flake8 dependent jobs
- aiobotocore
- aiohttp
- aiopg
- asyncio
- algoliasearch
- benchmarks
- boto
- bottle
- cassandra
- celery
- consul
- dbapi
- ddtracerun
- dogpile_cache
- django
- elasticsearch
- falcon
- flask
- futures
- gevent
- grpc
- httplib
- integration
- internal
- jinja2
- kombu
- mako
- molten
- mongoengine
- mysqlconnector
- mysqldb
- mysqlpython
- opentracer
- psycopg
- pylibmc
- pymemcache
- pymongo
- pymysql
- pyramid
- redis
- rediscluster
- requests
- requestsgevent
- sqlalchemy
- sqlite3
- test_utils
- test_logging
- tornado
- tracer
- unit_tests
- vertica
- deploy_dev:
requires:
- wait_all_tests
filters:
branches:
only: master