New Upstream Release - python-confluent-kafka

Ready changes

Summary

Merged new upstream version: 2.1.1rc1 (was: 1.7.0).

Diff

diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..17ba260
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,136 @@
+---
+Language:        Cpp
+AccessModifierOffset: -2
+AlignAfterOpenBracket: Align
+AlignConsecutiveMacros: true
+AlignConsecutiveAssignments: true
+AlignConsecutiveDeclarations: false
+AlignEscapedNewlines: Right
+AlignOperands:   true
+AlignTrailingComments: true
+AllowAllArgumentsOnNextLine: true
+AllowAllConstructorInitializersOnNextLine: true
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortBlocksOnASingleLine: Never
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: None
+AllowShortLambdasOnASingleLine: All
+AllowShortIfStatementsOnASingleLine: Never
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: true
+AlwaysBreakTemplateDeclarations: MultiLine
+BinPackArguments: true
+BinPackParameters: false
+BraceWrapping:
+  AfterCaseLabel:  false
+  AfterClass:      false
+  AfterControlStatement: false
+  AfterEnum:       false
+  AfterFunction:   false
+  AfterNamespace:  false
+  AfterObjCDeclaration: false
+  AfterStruct:     false
+  AfterUnion:      false
+  AfterExternBlock: false
+  BeforeCatch:     false
+  BeforeElse:      false
+  IndentBraces:    false
+  SplitEmptyFunction: true
+  SplitEmptyRecord: true
+  SplitEmptyNamespace: true
+BreakBeforeBinaryOperators: None
+BreakBeforeBraces: Custom
+BreakBeforeInheritanceComma: false
+BreakInheritanceList: BeforeColon
+BreakBeforeTernaryOperators: true
+BreakConstructorInitializersBeforeComma: false
+BreakConstructorInitializers: AfterColon
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: true
+ColumnLimit:     80
+CommentPragmas:  '^ IWYU pragma:'
+CompactNamespaces: false
+ConstructorInitializerAllOnOneLineOrOnePerLine: false
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+Cpp11BracedListStyle: true
+DeriveLineEnding: true
+DerivePointerAlignment: false
+DisableFormat:   false
+ExperimentalAutoDetectBinPacking: false
+FixNamespaceComments: true
+ForEachMacros:
+  - foreach
+  - Q_FOREACH
+  - BOOST_FOREACH
+IncludeBlocks:   Preserve
+IncludeCategories:
+  - Regex:           '^"(llvm|llvm-c|clang|clang-c)/'
+    Priority:        2
+    SortPriority:    0
+  - Regex:           '^(<|"(gtest|gmock|isl|json)/)'
+    Priority:        3
+    SortPriority:    0
+  - Regex:           '.*'
+    Priority:        1
+    SortPriority:    0
+IncludeIsMainRegex: '(Test)?$'
+IncludeIsMainSourceRegex: ''
+IndentCaseLabels: false
+IndentGotoLabels: true
+IndentPPDirectives: None
+IndentWidth:     8
+IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: true
+MacroBlockBegin: ''
+MacroBlockEnd:   ''
+MaxEmptyLinesToKeep: 3
+NamespaceIndentation: None
+ObjCBinPackProtocolList: Auto
+ObjCBlockIndentWidth: 2
+ObjCSpaceAfterProperty: false
+ObjCSpaceBeforeProtocolList: true
+PenaltyBreakAssignment: 2
+PenaltyBreakBeforeFirstCallParameter: 19
+PenaltyBreakComment: 300
+PenaltyBreakFirstLessLess: 120
+PenaltyBreakString: 1000
+PenaltyBreakTemplateDeclaration: 10
+PenaltyExcessCharacter: 1000000
+PenaltyReturnTypeOnItsOwnLine: 60
+PointerAlignment: Right
+ReflowComments:  true
+SortIncludes:    false
+SortUsingDeclarations: true
+SpaceAfterCStyleCast: false
+SpaceAfterLogicalNot: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeCpp11BracedList: true
+SpaceBeforeCtorInitializerColon: true
+SpaceBeforeInheritanceColon: true
+SpaceBeforeParens: ControlStatements
+SpaceBeforeRangeBasedForLoopColon: true
+SpaceInEmptyBlock: false
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 2
+SpacesInAngles:  false
+SpacesInConditionalStatement: false
+SpacesInContainerLiterals: false
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+SpaceBeforeSquareBrackets: false
+Standard:        Latest
+StatementMacros:
+  - Q_UNUSED
+  - QT_REQUIRE_VERSION
+TabWidth:        8
+UseCRLF:         false
+UseTab:          Never
+...
+
diff --git a/.formatignore b/.formatignore
new file mode 100644
index 0000000..0e733ee
--- /dev/null
+++ b/.formatignore
@@ -0,0 +1,14 @@
+# Files to not check/fix coding style for.
+# These files are imported from other sources and we want to maintain
+# them in the original form to make future updates easier.
+docs/conf.py
+examples/protobuf/user_pb2.py
+tests/integration/schema_registry/data/proto/DependencyTestProto_pb2.py
+tests/integration/schema_registry/data/proto/NestedTestProto_pb2.py
+tests/integration/schema_registry/data/proto/PublicTestProto_pb2.py
+tests/integration/schema_registry/data/proto/SInt32Value_pb2.py
+tests/integration/schema_registry/data/proto/SInt64Value_pb2.py
+tests/integration/schema_registry/data/proto/TestProto_pb2.py
+tests/integration/schema_registry/data/proto/common_proto_pb2.py
+tests/integration/schema_registry/data/proto/exampleProtoCriteo_pb2.py
+tests/integration/schema_registry/data/proto/metadata_proto_pb2.py
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 0000000..279caf2
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,2 @@
+# See go/codeowners - automatically generated for confluentinc/confluent-kafka-python:
+*	@confluentinc/clients
diff --git a/.semaphore/project.yml b/.semaphore/project.yml
new file mode 100644
index 0000000..cca9423
--- /dev/null
+++ b/.semaphore/project.yml
@@ -0,0 +1,43 @@
+# This file is managed by ServiceBot plugin - Semaphore. The content in this file is created using a common
+# template and configurations in service.yml.
+# Modifications in this file will be overwritten by generated content in the nightly run.
+# For more information, please refer to the page:
+# https://confluentinc.atlassian.net/wiki/spaces/Foundations/pages/2871296194/Add+SemaphoreCI
+apiVersion: v1alpha
+kind: Project
+metadata:
+  name: confluent-kafka-python
+  description: ""
+spec:
+  visibility: private
+  repository:
+    url: git@github.com:confluentinc/confluent-kafka-python.git
+    run_on:
+    - branches
+    - tags
+    - pull_requests
+    pipeline_file: .semaphore/semaphore.yml
+    integration_type: github_app
+    status:
+      pipeline_files:
+      - path: .semaphore/semaphore.yml
+        level: pipeline
+    whitelist:
+      branches:
+      - master
+      - main
+      - /^v\d+\.\d+\.x$/
+  custom_permissions: true
+  debug_permissions:
+  - empty
+  - default_branch
+  - non_default_branch
+  - pull_request
+  - forked_pull_request
+  - tag
+  attach_permissions:
+  - default_branch
+  - non_default_branch
+  - pull_request
+  - forked_pull_request
+  - tag
diff --git a/.semaphore/semaphore.yml b/.semaphore/semaphore.yml
new file mode 100644
index 0000000..b776480
--- /dev/null
+++ b/.semaphore/semaphore.yml
@@ -0,0 +1,227 @@
+version: v1.0
+name: Test on PR or create and upload wheels on tag.
+agent:
+  machine:
+    type: s1-prod-ubuntu20-04-amd64-1
+global_job_config:
+  env_vars:
+    - name: LIBRDKAFKA_VERSION
+      value: v2.1.1-RC1
+  prologue:
+    commands:
+      - checkout
+      - mkdir artifacts
+blocks:
+  - name: "Wheels: OSX x64"
+    run:
+      when: "tag =~ '.*'"
+    dependencies: []
+    task:
+      agent:
+        machine:
+          type: s1-prod-macos
+      env_vars:
+        - name: OS_NAME
+          value: osx
+        - name: ARCH
+          value: x64
+      jobs:
+        - name: Build
+          commands:
+            - PIP_INSTALL_OPTIONS="--user" tools/wheels/build-wheels.sh "${LIBRDKAFKA_VERSION#v}" wheelhouse
+            - tar -czf wheelhouse-macOS-${ARCH}.tgz wheelhouse
+            - artifact push workflow wheelhouse-macOS-${ARCH}.tgz --destination artifacts/wheels-${OS_NAME}-${ARCH}.tgz/
+  - name: "Wheels: OSX arm64"
+    run:
+      when: "tag =~ '.*'"
+    dependencies: []
+    task:
+      agent:
+        machine:
+          type: s1-prod-macos-arm64
+      env_vars:
+        - name: OS_NAME
+          value: osx
+        - name: CIBW_ARCHS
+          value: arm64
+        - name: ARCH
+          value: arm64
+      jobs:
+        - name: Build
+          commands:
+            - PIP_INSTALL_OPTIONS="--user" tools/wheels/build-wheels.sh "${LIBRDKAFKA_VERSION#v}" wheelhouse
+            - tar -czf wheelhouse-macOS-${ARCH}.tgz wheelhouse
+            - artifact push workflow wheelhouse-macOS-${ARCH}.tgz --destination artifacts/wheels-${OS_NAME}-${ARCH}.tgz/
+  - name: "Wheels: Linux arm64"
+    run:
+      when: "tag =~ '.*'"
+    dependencies: []
+    task:
+      agent:
+        machine:
+          type: s1-prod-ubuntu20-04-arm64-1
+      env_vars:
+        - name: OS_NAME
+          value: linux
+        - name: ARCH
+          value: arm64
+      jobs:
+        - name: Build
+          commands:
+            - ./tools/build-manylinux.sh "${LIBRDKAFKA_VERSION#v}"
+            - tar -czf wheelhouse-linux-${ARCH}.tgz wheelhouse
+            - artifact push workflow wheelhouse-linux-${ARCH}.tgz --destination artifacts/wheels-${OS_NAME}-${ARCH}.tgz/
+  - name: "Wheels: Linux x64"
+    run:
+      when: "tag =~ '.*'"
+    dependencies: []
+    task:
+      agent:
+        machine:
+          type: s1-prod-ubuntu20-04-amd64-3
+      env_vars:
+        - name: OS_NAME
+          value: linux
+        - name: ARCH
+          value: x64
+      jobs:
+        - name: Build
+          commands:
+            - ./tools/wheels/build-wheels.sh "${LIBRDKAFKA_VERSION#v}" wheelhouse
+            - tar -czf wheelhouse-linux-${ARCH}.tgz wheelhouse
+            - artifact push workflow wheelhouse-linux-${ARCH}.tgz --destination artifacts/wheels-${OS_NAME}-${ARCH}.tgz/
+  - name: "Wheels: Windows"
+    run:
+      when: "tag =~ '.*'"
+    dependencies: []
+    task:
+      agent:
+          machine:
+            type: s1-prod-windows
+      env_vars:
+        - name: OS_NAME
+          value: windows
+        - name: ARCH
+          value: x64
+      prologue:
+        commands:
+          - cache restore msys2-x64
+          - ".\\tools\\mingw-w64\\setup-msys2.ps1"
+          - $env:PATH = 'C:\msys64\usr\bin;' + $env:PATH
+          - bash -lc './tools/mingw-w64/msys2-dependencies.sh'
+          - cache delete msys2-x64
+          - cache store msys2-x64 c:/msys64
+      jobs:
+        - name: Build
+          env_vars:
+            - name: CHERE_INVOKING
+              value: 'yes'
+            - name: MSYSTEM
+              value: UCRT64
+          commands:
+            - bash tools/mingw-w64/semaphore_commands.sh
+            - bash tools/wheels/install-librdkafka.sh $env:LIBRDKAFKA_VERSION.TrimStart("v") dest
+            - tools/wheels/build-wheels.bat x64 win_amd64 dest wheelhouse
+            - tar -czf wheelhouse-windows-${Env:ARCH}.tgz wheelhouse
+            - artifact push workflow wheelhouse-windows-${Env:ARCH}.tgz --destination artifacts/wheels-${Env:OS_NAME}-${Env:ARCH}.tgz/
+  - name: "Source package verification and Integration tests with Python 3 (Linux x64)"
+    dependencies: []
+    task:
+      agent:
+        machine:
+          type: s1-prod-ubuntu20-04-amd64-2
+      env_vars:
+        - name: OS_NAME
+          value: linux
+        - name: ARCH
+          value: x64
+      jobs:
+        - name: Build
+          commands:
+            - sem-version python 3.8
+            # use a virtualenv
+            - python3 -m venv _venv && source _venv/bin/activate
+            - chmod u+r+x tools/source-package-verification.sh
+            - tools/source-package-verification.sh
+  - name: "Source package verification with Python 3 (Linux arm64)"
+    dependencies: []
+    task:
+      agent:
+        machine:
+          type: s1-prod-ubuntu20-04-arm64-1
+      env_vars:
+        - name: OS_NAME
+          value: linux
+        - name: ARCH
+          value: arm64
+      jobs:
+        - name: Build
+          commands:
+            - sem-version python 3.8
+            # use a virtualenv
+            - python3 -m venv _venv && source _venv/bin/activate
+            - chmod u+r+x tools/source-package-verification.sh
+            - tools/source-package-verification.sh
+  - name: "Source package verification with Python 3 (OSX x64) +docs"
+    dependencies: []
+    task:
+      agent:
+        machine:
+          type: s1-prod-macos
+      env_vars:
+        - name: OS_NAME
+          value: osx
+        - name: ARCH
+          value: x64
+      jobs:
+        - name: Build
+          commands:
+            - sem-version python 3.8
+            # use a virtualenv
+            - python3 -m venv _venv && source _venv/bin/activate
+            - chmod u+r+x tools/source-package-verification.sh
+            - tools/source-package-verification.sh
+  - name: "Source package verification with Python 3 (OSX arm64) +docs"
+    dependencies: []
+    task:
+      agent:
+        machine:
+          type: s1-prod-macos-arm64
+      env_vars:
+        - name: OS_NAME
+          value: osx
+        - name: ARCH
+          value: arm64
+      jobs:
+        - name: Build
+          commands:
+            - sem-version python 3.8
+            # use a virtualenv
+            - python3 -m venv _venv && source _venv/bin/activate
+            - chmod u+r+x tools/source-package-verification.sh
+            - tools/source-package-verification.sh
+  - name: "Packaging"
+    run:
+      when: "tag =~ '.*'"
+    dependencies:
+      - "Wheels: OSX x64"
+      - "Wheels: OSX arm64"
+      - "Wheels: Linux arm64"
+      - "Wheels: Linux x64"
+      - "Wheels: Windows"
+    task:
+      agent:
+        machine:
+          type: s1-prod-ubuntu20-04-amd64-3
+      jobs:
+        - name: "Packaging all artifacts"
+          commands:
+            - artifact pull workflow artifacts
+            - cd artifacts
+            - ls *.tgz |xargs -n1 tar -xvf
+            - tar cvf confluent-kafka-python-wheels-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID}.tgz wheelhouse/
+            - ls -la
+            - sha256sum confluent-kafka-python-wheels-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID}.tgz
+            - cd ..
+            - artifact push project artifacts/confluent-kafka-python-wheels-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID}.tgz --destination confluent-kafka-python-wheels-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID}.tgz
+            - echo Thank you
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 998de55..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,122 +0,0 @@
-env:
- global:
-  - LIBRDKAFKA_VERSION=v1.7.0
-
-jobs:
- include:
-  - name: "Source package verification with Python 2.7 (Linux)"
-    os: linux
-    language: python
-    dist: xenial
-    python: "2.7"
-    env: LD_LIBRARY_PATH="$PWD/tmp-build/lib"
-    services: docker
-
-  - name: "Source package verification with Python 3.6 (Linux)"
-    os: linux
-    language: python
-    dist: xenial
-    python: "3.6"
-    env: LD_LIBRARY_PATH="$PWD/tmp-build/lib"
-    services: docker
-
-  - name: "Source package verification with Python 2.7 (OSX)"
-    os: osx
-    python: "2.7"
-    env: DYLD_LIBRARY_PATH="$PWD/tmp-build/lib" INTERPRETER_VERSION="2.7.17"
-
-  - name: "Source package verification with Python 3.6 (OSX) +docs"
-    os: osx
-    python: "3.6"
-    env: DYLD_LIBRARY_PATH="$PWD/tmp-build/lib" MK_DOCS="y" INTERPRETER_VERSION="3.6.5"
-
-  - name: "Wheels: Windows x64"
-    if: tag is present
-    os: windows
-    language: shell
-    env: BUILD_WHEELS=1
-    before_install:
-      - choco install python --version 3.8.0
-      - export PATH="/c/Python38:/c/Python38/Scripts:$PATH"
-      # make sure it's on PATH as 'python3'
-      - ln -s /c/Python38/python.exe /c/Python38/python3.exe
-    install:
-      - bash tools/wheels/install-librdkafka.sh ${LIBRDKAFKA_VERSION#v} dest
-    script:
-      - tools/wheels/build-wheels.bat x64 win_amd64 dest wheelhouse
-
-  - name: "Wheels: Windows x86"
-    if: tag is present
-    os: windows
-    language: shell
-    env: BUILD_WHEELS=1
-    before_install:
-      - choco install python --version 3.8.0
-      - export PATH="/c/Python38:/c/Python38/Scripts:$PATH"
-      # make sure it's on PATH as 'python3'
-      - ln -s /c/Python38/python.exe /c/Python38/python3.exe
-    install:
-      - bash tools/wheels/install-librdkafka.sh ${LIBRDKAFKA_VERSION#v} dest
-    script:
-      - tools/wheels/build-wheels.bat x86 win32 dest wheelhouse
-
-  - name: "Wheels: Linux x64"
-    if: tag is present
-    language: python
-    python: "3.8"
-    services: docker
-    env: BUILD_WHEELS=1
-    script: tools/wheels/build-wheels.sh ${LIBRDKAFKA_VERSION#v} wheelhouse
-
-  - name: "Wheels: MacOSX x64"
-    if: tag is present
-    os: osx
-    language: shell
-    env: BUILD_WHEELS=1
-    script: tools/wheels/build-wheels.sh ${LIBRDKAFKA_VERSION#v} wheelhouse
-
-
-# Install test dependencies unconditionally
-# Travis OSX envs requires some setup; see tools/prepare-osx.sh
-# Install cibuildwheel if this is a tagged PR
-before_install:
-  - if [[ $TRAVIS_OS_NAME == "osx" && $BUILD_WHEELS != 1 ]]; then tools/prepare-osx.sh ${INTERPRETER_VERSION} /tmp/venv && source /tmp/venv/bin/activate; fi
-
-install:
- # Install interceptors
- - tools/install-interceptors.sh
- - if [[ $BUILD_WHEELS != 1 ]]; then pip install -r tests/requirements.txt ; fi
- - if [[ $MK_DOCS == y ]]; then pip install -r docs/requirements.txt; fi
- # Install librdkafka and confluent_kafka[avro] if not building wheels
- - if [[ $BUILD_WHEELS != 1 ]]; then pip install -U protobuf && tools/bootstrap-librdkafka.sh --require-ssl ${LIBRDKAFKA_VERSION} tmp-build ; fi
-
-
-
-# Note: Will not be run for wheel builds.
-script:
- - flake8
- # Build package
- -  pip install --global-option=build_ext --global-option="-Itmp-build/include/" --global-option="-Ltmp-build/lib" . .[avro] .[schema-registry] .[json] .[protobuf]
- - ldd staging/libs/* || otool -L staging/libs/* || true
- # Run tests
- - if [[ $TRAVIS_OS_NAME == "linux" ]]; then LD_LIBRARY_PATH=$LD_LIBRARY_PATH:staging/libs DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH:staging/libs python -m pytest --timeout 600 --ignore=tmp-build || travis_terminate 1; fi
- # Build docs
- - if [[ $MK_DOCS == y ]]; then make docs; fi
-
-deploy:
- provider: s3
- edge: true
- access_key_id:
-  secure: "Mcp7TRPNt3Eilct5UXRu6dI1NelOtxG0a+y2riKGDCEoWivTDbkcFdYFFnV8xOZPZzgTr2W4vNpXNa6SoJar1zs7moVFi0nUweb7EWNuAJZo1JRmzAvoCf3Wvsn2solEHo7jYNsIwm8LHaR9XCypPcFIEyzKEjiPd7wGZucogQEdPIaCC7sCkUcG1y69CtfY+55/68w93JBAQs2uTltF/sue4nQOPAs/urlR6Qy4Sek0BtkzTj3YN7gIB3/7zz/0lNAhx1mkPUJTnCdTWLQKj0BnQmFL4eP1jHbo0BLlHTFrHaiFGkF37wnjw532eXLgEGfgtIFpcx+GmNWZZd7MPEf8wLEBetmdoxLIHaat6uaMYEwgBeP1aTfUDR4loTZf0Mrd2ulzBWL5nbxPD+haGzutLuksMReGJTgAB8bZJKC0KRFPTKUEVdQBr4Vobqd7EKj0+VOoPGEPX7PmsT1dQLGonwnrQ47w/RwpkuriGb0zLhJK1uwUAqCV7IjWncPY6uoKfSClpgEwcknJgcIoLy0n6fyYCPfcQQOBQh+oyb/D8dkFvPmXbNwBH9r6rNJFrA0lx9WhJpIvd4j+amGk5rmGrMEJMSB71QgmujR315qvJE1xasL+RaLJwJLn9Q0fImX8LLROdx5Eb+/N7hyxZkFZ9F43WKkcBLLBEBP/mEY="
- secret_access_key:
-  secure: "Bl7hB0J56ZBt9gJBiiVYiVW0mVwK4Y3oOEqARyVXt5M0OcKb176NI6u5d02LdS575ITdvWAxqarQuV1sr31KNjr4MKt85xz4F+Fzf7Yavz1+eG32G+3mxqrvw82T+j3rojVEqjUqNuEQ9st0RAae/z7lJxAsnWUQ27xLFTVZriayojWX8uzvgB77SQut3qkrgODAMPJGv109TpMeOGZROgvi8LY4EXoQIULk8fJV0C67qiTGFVRwarspdznDrTs9WzKYo84UcErg4cWpmxD2U479EfBmI/7hNC06NU0sxhfnkqVKbb1CdhWXx1b0tmn+cM4GwcFL0MHo54jnYunOlL7/ZB7ckmughN+a5wQm6PiHj64aR6gkIyKBtLXUNo+qW3bD43gCgYfILLcKAV+Oag7fDWDyUsQJJvCGkN3KsRHDCWNk7KYS2FQSoOY3Nq1blESiCHC6DXmzzcLi1eTBp/9Eg5QRNielEb7fJkOJOi0XuygRHzgIf2i3c2acP1i2k1drU6Y/pZSNKO3rkXpUKE4nRf2U0n/HxNB7+G2KWn4ZFYH2o6yZdT4JU3oMEQdFIGuBE3AErkaLscZ6d67r2rwIUTGyFnH5UxRNKjDSsrU+a8A2Psyq6a7JsMqTT/V5b2I2/aLuSWmilGRd9x0CrCtyCkNn+WD+/FXCXfmYQ7g="
- bucket: librdkafka-ci-packages
- region: us-west-1
- local-dir: wheelhouse
- upload_dir: confluent-kafka-python/p-confluent-kafka-python__bld-travis__plat-${TRAVIS_OS_NAME}__tag-${TRAVIS_TAG}__sha-${TRAVIS_COMMIT}__bid-${TRAVIS_BUILD_ID}__
- acl: public_read
- cleanup: false
- on:
-  repo: confluentinc/confluent-kafka-python
-  tags: true
-  condition: "$BUILD_WHEELS == 1"
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 75ffb8a..7d51cdf 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,201 @@
 # Confluent's Python client for Apache Kafka
 
+## v2.1.1
+
+v2.1.1 is a maintenance release with the following fixes and enhancements:
+
+
+### Fixes
+
+- Added a new ConsumerGroupState UNKNOWN. The typo state UNKOWN is deprecated and will be removed in the next major version.
+- Fix some Admin API documentation stating -1 for infinite timeout incorrectly.
+  Request timeout can't be infinite.
+
+
+## v2.1.0
+
+v2.1.0 is a feature release with the following features, fixes and enhancements:
+
+- Added `set_sasl_credentials`. This new method (on the Producer, Consumer, and AdminClient) allows modifying the stored
+  SASL PLAIN/SCRAM credentials that will be used for subsequent (new) connections to a broker (#1511).
+- Wheels for Linux / arm64 (#1496).
+- Added support for Default num_partitions in CreateTopics Admin API.
+- Added support for password protected private key in CachedSchemaRegistryClient.
+- Add reference support in Schema Registry client. (@RickTalken, #1304)
+- Migrated travis jobs to Semaphore CI (#1503)
+- Added support for schema references. (#1514 and @slominskir #1088)
+- [KIP-320](https://cwiki.apache.org/confluence/display/KAFKA/KIP-320%3A+Allow+fetchers+to+detect+and+handle+log+truncation):
+  add offset leader epoch methods to the TopicPartition and Message classes (#1540).
+
+confluent-kafka-python is based on librdkafka v2.1.0, see the
+[librdkafka release notes](https://github.com/edenhill/librdkafka/releases/tag/v2.1.0)
+for a complete list of changes, enhancements, fixes and upgrade considerations.
+
+
+## v2.0.2
+
+v2.0.2 is a feature release with the following features, fixes and enhancements:
+
+ - Added Python 3.11 wheels.
+ - [KIP-222](https://cwiki.apache.org/confluence/display/KAFKA/KIP-222+-+Add+Consumer+Group+operations+to+Admin+API)
+   Add Consumer Group operations to Admin API.
+ - [KIP-518](https://cwiki.apache.org/confluence/display/KAFKA/KIP-518%3A+Allow+listing+consumer+groups+per+state)
+   Allow listing consumer groups per state.
+ - [KIP-396](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=97551484)
+   Partially implemented: support for AlterConsumerGroupOffsets.
+ - As result of the above KIPs, added (#1449)
+   - `list_consumer_groups` Admin operation. Supports listing by state.
+   - `describe_consumer_groups` Admin operation. Supports multiple groups.
+   - `delete_consumer_groups` Admin operation. Supports multiple groups.
+   - `list_consumer_group_offsets` Admin operation. Currently, only supports 1 group with multiple partitions. Supports require_stable option.
+   - `alter_consumer_group_offsets` Admin operation. Currently, only supports 1 group with multiple offsets.
+ - Added `normalize.schemas` configuration property to Schema Registry client (@rayokota, #1406)
+ - Added metadata to `TopicPartition` type and `commit()` (#1410).
+ - Added `consumer.memberid()` for getting member id assigned to
+   the consumer in a consumer group (#1154).
+ - Implemented `nb_bool` method for the Producer, so that the default (which uses len)
+   will not be used. This avoids situations where producers with no enqueued items would
+   evaluate to False (@vladz-sternum, #1445).
+ - Deprecated `AvroProducer` and `AvroConsumer`. Use `AvroSerializer` and `AvroDeserializer` instead.
+ - Deprecated `list_groups`. Use `list_consumer_groups` and `describe_consumer_groups` instead.
+ - Improved Consumer Example to show atleast once semantics.
+ - Improved Serialization and Deserialization Examples.
+ - Documentation Improvements.
+
+## Upgrade considerations
+
+OpenSSL 3.0.x upgrade in librdkafka requires a major version bump, as some
+ legacy ciphers need to be explicitly configured to continue working,
+ but it is highly recommended NOT to use them. The rest of the API remains
+ backward compatible.
+
+confluent-kafka-python is based on librdkafka 2.0.2, see the
+[librdkafka v2.0.0 release notes](https://github.com/edenhill/librdkafka/releases/tag/v2.0.0)
+and later ones for a complete list of changes, enhancements, fixes and upgrade considerations.
+
+**Note: There were no v2.0.0 and v2.0.1 releases.**
+
+## v1.9.2
+
+v1.9.2 is a maintenance release with the following fixes and enhancements:
+
+ - Support for setting principal and SASL extensions in oauth_cb
+   and handle failures (@Manicben, #1402)
+ - Wheel for macOS M1/arm64
+ - KIP-140 Admin API ACL fix:
+   When requesting multiple create_acls or delete_acls operations,
+   if the provided ACL bindings or ACL binding filters are not
+   unique, an exception will be thrown immediately rather than later
+   when the responses are read. (#1370).
+ - KIP-140 Admin API ACL fix:
+   Better documentation of the describe and delete ACLs behavior
+   when using the MATCH resource patter type in a filter. (#1373).
+ - Avro serialization examples:
+   added a parameter for using a generic or specific Avro schema. (#1381).
+
+confluent-kafka-python is based on librdkafka v1.9.2, see the
+[librdkafka release notes](https://github.com/edenhill/librdkafka/releases/tag/v1.9.2)
+for a complete list of changes, enhancements, fixes and upgrade considerations.
+
+
+## v1.9.1
+
+There was no 1.9.1 release of the Python Client.
+
+
+## v1.9.0
+
+This is a feature release:
+
+ - OAUTHBEARER OIDC support
+ - KIP-140 Admin API ACL support
+
+### Fixes
+
+ - The warnings for `use.deprecated.format` (introduced in v1.8.2)
+   had its logic reversed, which result in warning logs to be emitted when
+   the property was correctly configured, and the log message itself also
+   contained text that had it backwards.
+   The warning is now only emitted when `use.deprecated.format` is set
+   to the old legacy encoding (`True`). #1265
+ - Use `str(Schema)` rather than `Schema.to_json` to prevent fastavro
+   from raising exception `TypeError: unhashable type: 'mappingproxy'`.
+   (@ffissore, #1156, #1197)
+ - Fix the argument order in the constructor signature for
+   AvroDeserializer/Serializer: the argument order in the constructor
+   signature for AvroDeserializer/Serializer was altered in v1.6.1, but
+   the example is not changed yet. (@DLT1412, #1263)
+ - Fix the json deserialization errors from `_schema_loads` for
+   valid primitive declarations. (@dylrich, #989)
+
+confluent-kafka-python is based on librdkafka v1.9.0, see the
+[librdkafka release notes](https://github.com/edenhill/librdkafka/releases/tag/v1.9.0)
+for a complete list of changes, enhancements, fixes and upgrade considerations.
+
+
+## v1.8.2
+
+v1.8.2 is a maintenance release with the following fixes and enhancements:
+
+ - **IMPORTANT**: Added mandatory `use.deprecated.format` to
+   `ProtobufSerializer` and `ProtobufDeserializer`.
+   See **Upgrade considerations** below for more information.
+ - **Python 2.7 binary wheels are no longer provided.**
+   Users still on Python 2.7 will need to build confluent-kafka from source
+   and install librdkafka separately, see [README.md](README.md#Prerequisites)
+   for build instructions.
+ - Added `use.latest.version` and `skip.known.types` (Protobuf) to
+   the Serializer classes. (Robert Yokota, #1133).
+ - `list_topics()` and `list_groups()` added to AdminClient.
+ - Added support for headers in the SerializationContext (Laurent Domenech-Cabaud)
+ - Fix crash in header parsing (Armin Ronacher, #1165)
+ - Added long package description in setuptools (Bowrna, #1172).
+ - Documentation fixes by Aviram Hassan and Ryan Slominski.
+ - Don't raise AttributeError exception when CachedSchemaRegistryClient
+   constructor raises a valid exception.
+
+confluent-kafka-python is based on librdkafka v1.8.2, see the
+[librdkafka release notes](https://github.com/edenhill/librdkafka/releases/tag/v1.8.2)
+for a complete list of changes, enhancements, fixes and upgrade considerations.
+
+**Note**: There were no v1.8.0 and v1.8.1 releases.
+
+
+## Upgrade considerations
+
+### Protobuf serialization format changes
+
+Prior to this version the confluent-kafka-python client had a bug where
+nested protobuf schemas indexes were incorrectly serialized, causing
+incompatibility with other Schema-Registry protobuf consumers and producers.
+
+This has now been fixed, but since the old defect serialization and the new
+correct serialization are mutually incompatible the user of
+confluent-kafka-python will need to make an explicit choice which
+serialization format to use during a transitory phase while old producers and
+consumers are upgraded.
+
+The `ProtobufSerializer` and `ProtobufDeserializer` constructors now
+both take a (for the time being) configuration dictionary that requires
+the `use.deprecated.format` configuration property to be explicitly set.
+
+Producers should be upgraded first and as long as there are old (<=v1.7.0)
+Python consumers reading from topics being produced to, the new (>=v1.8.2)
+Python producer must be configured with `use.deprecated.format` set to `True`.
+
+When all existing messages in the topic have been consumed by older consumers
+the consumers should be upgraded and both new producers and the new consumers
+must set `use.deprecated.format` to `False`.
+
+
+The requirement to explicitly set `use.deprecated.format` will be removed
+in a future version and the setting will then default to `False` (new format).
+
+
+
+
+
+
 ## v1.7.0
 
 v1.7.0 is a maintenance release with the following fixes and enhancements:
diff --git a/INSTALL.md b/INSTALL.md
new file mode 100644
index 0000000..6b64246
--- /dev/null
+++ b/INSTALL.md
@@ -0,0 +1,121 @@
+# confluent-kafka-python installation instructions
+
+## Install pre-built wheels (recommended)
+
+Confluent provides pre-built Python wheels of confluent-kafka-python with
+all dependencies included.
+
+To install, simply do:
+
+```bash
+python3 -m pip install confluent-kafka
+```
+
+If you get a build error or require Kerberos/GSSAPI support please read the next section: *Install from source*
+
+
+## Install from source
+
+It is sometimes necessary to install confluent-kafka from source, rather
+than from prebuilt binary wheels, such as when:
+ - You need GSSAPI/Kerberos authentication.
+ - You're on a Python version we do not provide prebuilt wheels for.
+ - You're on an architecture or platform we do not provide prebuilt wheels for.
+ - You want to build confluent-kafka-python from the master branch.
+
+
+### Install from source on RedHat, CentOS, Fedora, etc
+
+```bash
+#
+# Perform these steps as the root user (e.g., in a 'sudo bash' shell)
+#
+
+# Install build tools and Kerberos support.
+
+yum install -y python3 python3-pip python3-devel gcc make cyrus-sasl-gssapi krb5-workstation
+
+# Install the latest version of librdkafka:
+
+rpm --import https://packages.confluent.io/rpm/7.0/archive.key
+
+echo '
+[Confluent-Clients]
+name=Confluent Clients repository
+baseurl=https://packages.confluent.io/clients/rpm/centos/$releasever/$basearch
+gpgcheck=1
+gpgkey=https://packages.confluent.io/clients/rpm/archive.key
+enabled=1' > /etc/yum.repos.d/confluent.repo
+
+yum install -y librdkafka-devel
+
+
+#
+# Now build and install confluent-kafka-python as your standard user
+# (e.g., exit the root shell first).
+#
+
+python3 -m pip install --no-binary confluent-kafka confluent-kafka
+
+
+# Verify that confluent_kafka is installed:
+
+python3 -c 'import confluent_kafka; print(confluent_kafka.version())'
+```
+
+### Install from source on Debian or Ubuntu
+
+```bash
+#
+# Perform these steps as the root user (e.g., in a 'sudo bash' shell)
+#
+
+# Install build tools and Kerberos support.
+
+apt install -y wget software-properties-common lsb-release gcc make python3 python3-pip python3-dev libsasl2-modules-gssapi-mit krb5-user
+
+
+# Install the latest version of librdkafka:
+
+wget -qO - https://packages.confluent.io/deb/7.0/archive.key | apt-key add -
+
+add-apt-repository "deb https://packages.confluent.io/clients/deb $(lsb_release -cs) main"
+
+apt update
+
+apt install -y librdkafka-dev
+
+
+#
+# Now build and install confluent-kafka-python as your standard user
+# (e.g., exit the root shell first).
+#
+
+python3 -m pip install --no-binary confluent-kafka confluent-kafka
+
+
+# Verify that confluent_kafka is installed:
+
+python3 -c 'import confluent_kafka; print(confluent_kafka.version())'
+```
+
+
+### Install from source on Mac OS X
+
+```bash
+
+# Install librdkafka from homebrew
+
+brew install librdkafka
+
+
+# Build and install confluent-kafka-python
+
+python3 -m pip install --no-binary confluent-kafka confluent-kafka
+
+
+# Verify that confluent_kafka is installed:
+
+python3 -c 'import confluent_kafka; print(confluent_kafka.version())'
+
+```
diff --git a/Makefile b/Makefile
index 8773569..83fdd30 100644
--- a/Makefile
+++ b/Makefile
@@ -12,3 +12,19 @@ clean:
 
 docs:
 	$(MAKE) -C docs html
+
+style-check:
+	@(tools/style-format.sh \
+		$$(git ls-tree -r --name-only HEAD | egrep '\.(c|h|py)$$') )
+
+style-check-changed:
+	@(tools/style-format.sh \
+		$$( (git diff --name-only ; git diff --name-only --staged) | egrep '\.(c|h|py)$$'))
+
+style-fix:
+	@(tools/style-format.sh --fix \
+		$$(git ls-tree -r --name-only HEAD | egrep '\.(c|h|py)$$'))
+
+style-fix-changed:
+	@(tools/style-format.sh --fix \
+		$$( (git diff --name-only ; git diff --name-only --staged) | egrep '\.(c|h|py)$$'))
diff --git a/README.md b/README.md
index bff24d1..c4aa53b 100644
--- a/README.md
+++ b/README.md
@@ -3,9 +3,9 @@ Confluent's Python Client for Apache Kafka<sup>TM</sup>
 
 **confluent-kafka-python** provides a high-level Producer, Consumer and AdminClient compatible with all
 [Apache Kafka<sup>TM<sup>](http://kafka.apache.org/) brokers >= v0.8, [Confluent Cloud](https://www.confluent.io/confluent-cloud/)
-and the [Confluent Platform](https://www.confluent.io/product/compare/). The client is:
+and [Confluent Platform](https://www.confluent.io/product/compare/). The client is:
 
-- **Reliable** - It's a wrapper around [librdkafka](https://github.com/edenhill/librdkafka) (provided automatically via binary wheels) which is widely deployed in a diverse set of production scenarios. It's tested using [the same set of system tests](https://github.com/confluentinc/confluent-kafka-python/tree/master/confluent_kafka/kafkatest) as the Java client [and more](https://github.com/confluentinc/confluent-kafka-python/tree/master/tests). It's supported by [Confluent](https://confluent.io).
+- **Reliable** - It's a wrapper around [librdkafka](https://github.com/edenhill/librdkafka) (provided automatically via binary wheels) which is widely deployed in a diverse set of production scenarios. It's tested using [the same set of system tests](https://github.com/confluentinc/confluent-kafka-python/tree/master/src/confluent_kafka/kafkatest) as the Java client [and more](https://github.com/confluentinc/confluent-kafka-python/tree/master/tests). It's supported by [Confluent](https://confluent.io).
 
 - **Performant** - Performance is a key design consideration. Maximum throughput is on par with the Java client for larger message sizes (where the overhead of the Python interpreter has less impact). Latency is on par with the Java client.
 
@@ -15,21 +15,25 @@ with Apache Kafka at its core. It's high priority for us that client features ke
 pace with core Apache Kafka and components of the [Confluent Platform](https://www.confluent.io/product/compare/).
 
 
-See the [API documentation](http://docs.confluent.io/current/clients/confluent-kafka-python/index.html) for more info.
+## Usage
 
+For a step-by-step guide on using the client see [Getting Started with Apache Kafka and Python](https://developer.confluent.io/get-started/python/).
 
-Usage
-=====
+Aditional examples can be found in the [examples](examples) directory or the [confluentinc/examples](https://github.com/confluentinc/examples/tree/master/clients/cloud/python) github repo, which include demonstration of:
+- Exactly once data processing using the transactional API.
+- Integration with asyncio.
+- (De)serializing Protobuf, JSON, and Avro data with Confluent Schema Registry integration.
+- [Confluent Cloud](https://www.confluent.io/confluent-cloud/) configuration.
 
-Below are some examples of typical usage. For more examples, see the [examples](examples) directory or the [confluentinc/examples](https://github.com/confluentinc/examples/tree/master/clients/cloud/python) github repo for a [Confluent Cloud](https://www.confluent.io/confluent-cloud/) example.
+Also refer to the [API documentation](http://docs.confluent.io/current/clients/confluent-kafka-python/index.html).
 
+Finally, the [tests](tests) are useful as a reference for example usage.
 
-**Producer**
+### Basic Producer Example
 
 ```python
 from confluent_kafka import Producer
 
-
 p = Producer({'bootstrap.servers': 'mybroker1,mybroker2'})
 
 def delivery_report(err, msg):
@@ -44,9 +48,9 @@ for data in some_data_source:
     # Trigger any available delivery report callbacks from previous produce() calls
     p.poll(0)
 
-    # Asynchronously produce a message, the delivery report callback
-    # will be triggered from poll() above, or flush() below, when the message has
-    # been successfully delivered or failed permanently.
+    # Asynchronously produce a message. The delivery report callback will
+    # be triggered from the call to poll() above, or flush() below, when the
+    # message has been successfully delivered or failed permanently.
     p.produce('mytopic', data.encode('utf-8'), callback=delivery_report)
 
 # Wait for any outstanding messages to be delivered and delivery report
@@ -54,13 +58,16 @@ for data in some_data_source:
 p.flush()
 ```
 
+For a discussion on the poll based producer API, refer to the
+[Integrating Apache Kafka With Python Asyncio Web Applications](https://www.confluent.io/blog/kafka-python-asyncio-integration/)
+blog post.
+
 
-**High-level Consumer**
+### Basic Consumer Example
 
 ```python
 from confluent_kafka import Consumer
 
-
 c = Consumer({
     'bootstrap.servers': 'mybroker',
     'group.id': 'mygroup',
@@ -83,101 +90,8 @@ while True:
 c.close()
 ```
 
-**AvroProducer**
-
-```python
-from confluent_kafka import avro
-from confluent_kafka.avro import AvroProducer
-
-
-value_schema_str = """
-{
-   "namespace": "my.test",
-   "name": "value",
-   "type": "record",
-   "fields" : [
-     {
-       "name" : "name",
-       "type" : "string"
-     }
-   ]
-}
-"""
-
-key_schema_str = """
-{
-   "namespace": "my.test",
-   "name": "key",
-   "type": "record",
-   "fields" : [
-     {
-       "name" : "name",
-       "type" : "string"
-     }
-   ]
-}
-"""
-
-value_schema = avro.loads(value_schema_str)
-key_schema = avro.loads(key_schema_str)
-value = {"name": "Value"}
-key = {"name": "Key"}
-
-
-def delivery_report(err, msg):
-    """ Called once for each message produced to indicate delivery result.
-        Triggered by poll() or flush(). """
-    if err is not None:
-        print('Message delivery failed: {}'.format(err))
-    else:
-        print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
-
-
-avroProducer = AvroProducer({
-    'bootstrap.servers': 'mybroker,mybroker2',
-    'on_delivery': delivery_report,
-    'schema.registry.url': 'http://schema_registry_host:port'
-    }, default_key_schema=key_schema, default_value_schema=value_schema)
-
-avroProducer.produce(topic='my_topic', value=value, key=key)
-avroProducer.flush()
-```
-
-**AvroConsumer**
-
-```python
-from confluent_kafka.avro import AvroConsumer
-from confluent_kafka.avro.serializer import SerializerError
-
-
-c = AvroConsumer({
-    'bootstrap.servers': 'mybroker,mybroker2',
-    'group.id': 'groupid',
-    'schema.registry.url': 'http://127.0.0.1:8081'})
-
-c.subscribe(['my_topic'])
-
-while True:
-    try:
-        msg = c.poll(10)
-
-    except SerializerError as e:
-        print("Message deserialization failed for {}: {}".format(msg, e))
-        break
-
-    if msg is None:
-        continue
-
-    if msg.error():
-        print("AvroConsumer error: {}".format(msg.error()))
-        continue
-
-    print(msg.value())
-
-c.close()
-```
 
-**AdminClient**
+### Basic AdminClient Example
 
 Create topics:
 
@@ -203,15 +117,12 @@ for topic, f in fs.items():
 ```
 
 
-
-Thread Safety
--------------
+## Thread Safety
 
 The `Producer`, `Consumer` and `AdminClient` are all thread safe.
 
 
-Install
-=======
+## Install
 
 **Install self-contained binary wheels**
 
@@ -220,24 +131,16 @@ Install
 **NOTE:** The pre-built Linux wheels do NOT contain SASL Kerberos/GSSAPI support.
           If you need SASL Kerberos/GSSAPI support you must install librdkafka and
           its dependencies using the repositories below and then build
-          confluent-kafka  using the command in the "Install from
-          source from PyPi" section below.
-
-**Install AvroProducer and AvroConsumer**
-
-    $ pip install "confluent-kafka[avro]"
+          confluent-kafka using the instructions in the
+          "Install from source" section below.
 
-**Install from source from PyPi**
-*(requires librdkafka + dependencies to be installed separately)*:
+**Install from source**
 
-    $ pip install --no-binary :all: confluent-kafka
+For source install, see the *Install from source* section in [INSTALL.md](INSTALL.md).
 
 
-For source install, see *Prerequisites* below.
+## Broker Compatibility
 
-
-Broker Compatibility
-====================
 The Python client (as well as the underlying C library librdkafka) supports
 all broker versions &gt;= 0.8.
 But due to the nature of the Kafka protocol in broker versions 0.8 and 0.9 it
@@ -259,8 +162,8 @@ More info here:
 https://github.com/edenhill/librdkafka/wiki/Broker-version-compatibility
 
 
-SSL certificates
-================
+## SSL certificates
+
 If you're connecting to a Kafka cluster through SSL you will need to configure
 the client with `'security.protocol': 'SSL'` (or `'SASL_SSL'` if SASL
 authentication is used).
@@ -272,33 +175,14 @@ Linux distribution's `ca-certificates` package which needs to be installed
 through `apt`, `yum`, et.al.
 
 If your system stores CA certificates in another location you will need to
-configure the client with `'ssl.ca.location': '/path/to/cacert.pem'`. 
+configure the client with `'ssl.ca.location': '/path/to/cacert.pem'`.
 
 Alternatively, the CA certificates can be provided by the [certifi](https://pypi.org/project/certifi/)
 Python package. To use certifi, add an `import certifi` line and configure the
 client's CA location with `'ssl.ca.location': certifi.where()`.
 
 
-Prerequisites
-=============
-
- * Python >= 2.7 or Python 3.x
- * [librdkafka](https://github.com/edenhill/librdkafka) >= 1.6.0 (latest release is embedded in wheels)
-
-librdkafka is embedded in the macosx manylinux wheels, for other platforms, SASL Kerberos/GSSAPI support or
-when a specific version of librdkafka is desired, following these guidelines:
-
-  * For **Debian/Ubuntu** based systems, add this APT repo and then do `sudo apt-get install librdkafka-dev python-dev`:
-http://docs.confluent.io/current/installation.html#installation-apt
-
- * For **RedHat** and **RPM**-based distros, add this YUM repo and then do `sudo yum install librdkafka-devel python-devel`:
-http://docs.confluent.io/current/installation.html#rpm-packages-via-yum
-
- * On **OSX**, use **homebrew** and do `brew install librdkafka`
-
-
-License
-=======
+## License
 
 [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0)
 
@@ -306,7 +190,12 @@ KAFKA is a registered trademark of The Apache Software Foundation and has been l
 by confluent-kafka-python. confluent-kafka-python has no affiliation with and is not endorsed by
 The Apache Software Foundation.
 
-Developer Notes
-===============
+
+## Developer Notes
 
 Instructions on building and testing confluent-kafka-python can be found [here](DEVELOPER.md).
+
+
+## Confluent Cloud
+
+For a step-by-step guide on using the Python client with Confluent Cloud see [Getting Started with Apache Kafka and Python](https://developer.confluent.io/get-started/python/) on [Confluent Developer](https://developer.confluent.io/). 
diff --git a/debian/changelog b/debian/changelog
index 3785f67..e8ecef4 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,11 @@
+python-confluent-kafka (2.1.1rc1-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+  * Drop patch 0002-Fix-test-failure-appearing-with-recent-librdkafka.patch,
+    present upstream.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Sat, 10 Jun 2023 11:07:22 -0000
+
 python-confluent-kafka (1.7.0-4) unstable; urgency=medium
 
   * Rename patch 0002 to fix inaccurate description.
diff --git a/debian/patches/0001-Switch-to-trove-classifiers.patch b/debian/patches/0001-Switch-to-trove-classifiers.patch
index e0524f2..1c91699 100644
--- a/debian/patches/0001-Switch-to-trove-classifiers.patch
+++ b/debian/patches/0001-Switch-to-trove-classifiers.patch
@@ -7,11 +7,11 @@ This avoids shipping a license file in the top directory.
  setup.py | 1 -
  1 file changed, 1 deletion(-)
 
-diff --git a/setup.py b/setup.py
-index 9e1d337..8a0bc17 100755
---- a/setup.py
-+++ b/setup.py
-@@ -84,7 +84,6 @@ setup(name='confluent-kafka',
+Index: python-confluent-kafka.git/setup.py
+===================================================================
+--- python-confluent-kafka.git.orig/setup.py
++++ python-confluent-kafka.git/setup.py
+@@ -83,7 +83,6 @@ setup(name='confluent-kafka',
        ext_modules=[module],
        packages=find_packages('src'),
        package_dir={'': 'src'},
diff --git a/debian/patches/0002-Fix-test-failure-appearing-with-recent-librdkafka.patch b/debian/patches/0002-Fix-test-failure-appearing-with-recent-librdkafka.patch
deleted file mode 100644
index efeafcd..0000000
--- a/debian/patches/0002-Fix-test-failure-appearing-with-recent-librdkafka.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From: =?utf-8?q?=C3=89tienne_Mollier?= <emollier@debian.org>
-Date: Sun, 26 Feb 2023 11:38:48 +0100
-Subject: Fix test failure appearing with recent librdkafka
-
-Bug: https://github.com/confluentinc/confluent-kafka-python/pull/1467
-Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1031483
----
- tests/test_Producer.py | 16 ++++++++--------
- 1 file changed, 8 insertions(+), 8 deletions(-)
-
-diff --git a/tests/test_Producer.py b/tests/test_Producer.py
-index ef82b41..41eac1f 100644
---- a/tests/test_Producer.py
-+++ b/tests/test_Producer.py
-@@ -206,8 +206,8 @@ def test_transaction_api():
-     # Any subsequent APIs will fail since init did not succeed.
-     with pytest.raises(KafkaException) as ex:
-         p.begin_transaction()
--    assert ex.value.args[0].code() == KafkaError._STATE
--    assert ex.value.args[0].retriable() is False
-+    assert ex.value.args[0].code() == KafkaError._CONFLICT
-+    assert ex.value.args[0].retriable() is True
-     assert ex.value.args[0].fatal() is False
-     assert ex.value.args[0].txn_requires_abort() is False
- 
-@@ -218,22 +218,22 @@ def test_transaction_api():
-     with pytest.raises(KafkaException) as ex:
-         p.send_offsets_to_transaction([TopicPartition("topic", 0, 123)],
-                                       group_metadata)
--    assert ex.value.args[0].code() == KafkaError._STATE
--    assert ex.value.args[0].retriable() is False
-+    assert ex.value.args[0].code() == KafkaError._CONFLICT
-+    assert ex.value.args[0].retriable() is True
-     assert ex.value.args[0].fatal() is False
-     assert ex.value.args[0].txn_requires_abort() is False
- 
-     with pytest.raises(KafkaException) as ex:
-         p.commit_transaction(0.5)
--    assert ex.value.args[0].code() == KafkaError._STATE
--    assert ex.value.args[0].retriable() is False
-+    assert ex.value.args[0].code() == KafkaError._CONFLICT
-+    assert ex.value.args[0].retriable() is True
-     assert ex.value.args[0].fatal() is False
-     assert ex.value.args[0].txn_requires_abort() is False
- 
-     with pytest.raises(KafkaException) as ex:
-         p.abort_transaction(0.5)
--    assert ex.value.args[0].code() == KafkaError._STATE
--    assert ex.value.args[0].retriable() is False
-+    assert ex.value.args[0].code() == KafkaError._CONFLICT
-+    assert ex.value.args[0].retriable() is True
-     assert ex.value.args[0].fatal() is False
-     assert ex.value.args[0].txn_requires_abort() is False
- 
diff --git a/debian/patches/series b/debian/patches/series
index d01ce8e..63467fc 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,2 +1 @@
 0001-Switch-to-trove-classifiers.patch
-0002-Fix-test-failure-appearing-with-recent-librdkafka.patch
diff --git a/docs/conf.py b/docs/conf.py
index f4e750e..0f15acc 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -27,14 +27,14 @@ sys.path[:0] = [os.path.abspath(x) for x in glob('../build/lib.*')]
 ######################################################################
 # General information about the project.
 project = u'confluent-kafka'
-copyright = u'2016-2021, Confluent Inc.'
+copyright = u'2016-2023, Confluent Inc.'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
 # The short X.Y version.
-version = '1.7.0'
+version = '2.1.1rc1'
 # The full version, including alpha/beta/rc tags.
 release = version
 ######################################################################
diff --git a/docs/index.rst b/docs/index.rst
index e741415..df5a620 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -3,59 +3,67 @@ confluent_kafka API
 
 A reliable, performant and feature-rich Python client for Apache Kafka v0.8 and above.
 
-Configuration
+Guides
    - :ref:`Configuration Guide <pythonclient_configuration>`
+   - :ref:`Transactional API <pythonclient_transactional>`
 
 Client API
-   - :ref:`AdminClient <pythonclient_adminclient>`
-   - :ref:`Consumer <pythonclient_consumer>`
-   - :ref:`DeserializingConsumer <serde_consumer>` (new API subject to change)
-   - :ref:`AvroConsumer <avro_consumer>` (legacy)
    - :ref:`Producer <pythonclient_producer>`
-   - :ref:`SerializingProducer <serde_producer>` (new API subject to change)
-   - :ref:`AvroProducer <avro_producer>` (legacy)
-   - :ref:`SchemaRegistry <schemaregistry_client>`
-
+   - :ref:`Consumer <pythonclient_consumer>`
+   - :ref:`AdminClient <pythonclient_adminclient>`
+   - :ref:`SchemaRegistryClient <schemaregistry_client>`
 
 Serialization API
-   - Serializer
-      - :ref:`AvroDeserializer <schemaregistry_avro_deserializer>`
-      - :ref:`DoubleDeserializer <serde_deserializer_double>`
-      - :ref:`JSONDeserializer <schemaregistry_json_deserializer>`
-      - :ref:`IntegerDeserializer <serde_deserializer_integer>`
-      - :ref:`ProtobufDeserializer <schemaregistry_protobuf_deserializer>`
-      - :ref:`StringDeserializer <serde_deserializer_string>`
-
-   - Deserializer
-      - :ref:`AvroSerializer <schemaregistry_avro_serializer>`
-      - :ref:`DoubleSerializer <serde_serializer_double>`
-      - :ref:`JSONSerializer <schemaregistry_json_serializer>`
-      - :ref:`IntegerSerializer <serde_serializer_integer>`
-      - :ref:`ProtobufSerializer <schemaregistry_protobuf_serializer>`
-      - :ref:`StringSerializer <serde_serializer_string>`
-
+   - Avro :ref:`serializer <schemaregistry_avro_serializer>` / :ref:`deserializer <schemaregistry_avro_deserializer>`
+   - JSON Schema :ref:`serializer <schemaregistry_json_serializer>` / :ref:`deserializer <schemaregistry_json_deserializer>`
+   - Protobuf :ref:`serializer <schemaregistry_protobuf_serializer>` / :ref:`deserializer <schemaregistry_protobuf_deserializer>`
+   - String :ref:`serializer <serde_serializer_string>` / :ref:`deserializer <serde_deserializer_string>`
+   - Integer :ref:`serializer <serde_serializer_integer>` / :ref:`deserializer <serde_deserializer_integer>`
+   - Double :ref:`serializer <serde_serializer_double>` / :ref:`deserializer <serde_deserializer_double>`
 
 Supporting classes
     - :ref:`Message <pythonclient_message>`
     - :ref:`TopicPartition <pythonclient_topicpartition>`
     - :ref:`ThrottleEvent <pythonclient_throttleevent>`
 
-    - Errors
-        - :ref:`KafkaError <pythonclient_kafkaerror>`
-        - :ref:`KafkaException <pythonclient_kafkaexception>`
-        - :ref:`ConsumeError <pyclient_error_consumer>`
-        - :ref:`ProduceError <pyclient_error_producer>`
-        - :ref:`SerializationError <serde_error>`
-            - :ref:`KeySerializationError <serde_error_serializer_key>`
-            - :ref:`ValueSerializationError <serde_error_serializer_value>`
-            - :ref:`KeyDeserializationError <serde_error_deserializer_key>`
-            - :ref:`ValueDeserializationError <serde_error_deserializer_value>`
+    - Errors:
+       - :ref:`KafkaError <pythonclient_kafkaerror>`
+       - :ref:`KafkaException <pythonclient_kafkaexception>`
+       - :ref:`ConsumeError <pyclient_error_consumer>`
+       - :ref:`ProduceError <pyclient_error_producer>`
+       - :ref:`SerializationError <serde_error>`
+       - :ref:`KeySerializationError <serde_error_serializer_key>`
+       - :ref:`ValueSerializationError <serde_error_serializer_value>`
+       - :ref:`KeyDeserializationError <serde_error_deserializer_key>`
+       - :ref:`ValueDeserializationError <serde_error_deserializer_value>`
 
     - Admin API
-        - :ref:`NewTopic <pyclient_admin_newtopic>`
-        - :ref:`NewPartitions <pyclient_admin_newpartitions>`
+       - :ref:`NewTopic <pyclient_admin_newtopic>`
+       - :ref:`NewPartitions <pyclient_admin_newpartitions>`
+       - :ref:`ConfigSource <pythonclient_config_source>`
+       - :ref:`ConfigEntry <pythonclient_config_entry>`
+       - :ref:`ConfigResource <pythonclient_config_resource>`
+       - :ref:`ResourceType <pythonclient_resource_type>`
+       - :ref:`ResourcePatternType <pythonclient_resource_pattern_type>`
+       - :ref:`AclOperation <pythonclient_acl_operation>`
+       - :ref:`AclPermissionType <pythonclient_acl_permission_type>`
+       - :ref:`AclBinding <pythonclient_acl_binding>`
+       - :ref:`AclBindingFilter <pythonclient_acl_binding_filter>`
+
+Experimental
+   These classes are experimental and are likely to be removed, or subject to incompatible
+   API changes in future versions of the library. To avoid breaking changes on upgrading,
+   we recommend using (de)serializers directly, as per the examples applications in the
+   github repo.
 
-Guide to the :ref:`Transactional Producer API <pythonclient_transactional>`
+   - :ref:`SerializingProducer <serde_producer>`
+   - :ref:`DeserializingConsumer <serde_consumer>`
+
+Legacy
+   These classes are deprecated and will be removed in a future version of the library.
+
+   - :ref:`AvroConsumer <avro_consumer>`
+   - :ref:`AvroProducer <avro_producer>`
 
 
 
@@ -89,6 +97,87 @@ NewPartitions
 .. autoclass:: confluent_kafka.admin.NewPartitions
    :members:
 
+.. _pythonclient_config_source:
+
+**************
+ConfigSource
+**************
+
+.. autoclass:: confluent_kafka.admin.ConfigSource
+   :members:
+
+.. _pythonclient_config_entry:
+
+**************
+ConfigEntry
+**************
+
+.. autoclass:: confluent_kafka.admin.ConfigEntry
+   :members:
+
+.. _pythonclient_config_resource:
+
+**************
+ConfigResource
+**************
+
+.. autoclass:: confluent_kafka.admin.ConfigResource
+   :members:
+
+.. _pythonclient_resource_type:
+
+**************
+ResourceType
+**************
+
+.. autoclass:: confluent_kafka.admin.ResourceType
+   :members:
+
+.. _pythonclient_resource_pattern_type:
+
+*******************
+ResourcePatternType
+*******************
+
+.. autoclass:: confluent_kafka.admin.ResourcePatternType
+   :members:
+
+.. _pythonclient_acl_operation:
+
+**************
+AclOperation
+**************
+
+.. autoclass:: confluent_kafka.admin.AclOperation
+   :members:
+
+.. _pythonclient_acl_permission_type:
+
+*****************
+AclPermissionType
+*****************
+
+.. autoclass:: confluent_kafka.admin.AclPermissionType
+   :members:
+
+.. _pythonclient_acl_binding:
+
+**************
+AclBinding
+**************
+
+.. autoclass:: confluent_kafka.admin.AclBinding
+   :members:
+
+.. _pythonclient_acl_binding_filter:
+
+****************
+AclBindingFilter
+****************
+
+.. autoclass:: confluent_kafka.admin.AclBindingFilter
+   :members:
+
 .. _pythonclient_consumer:
 
 ********
@@ -100,9 +189,9 @@ Consumer
 
 .. _serde_consumer:
 
-*********************
-DeserializingConsumer
-*********************
+************************************
+DeserializingConsumer (experimental)
+************************************
 
 .. autoclass:: confluent_kafka.DeserializingConsumer
    :members:
@@ -120,9 +209,9 @@ Producer
 
 .. _serde_producer:
 
-*******************
-SerializingProducer
-*******************
+**********************************
+SerializingProducer (experimental)
+**********************************
 
 .. autoclass:: confluent_kafka.SerializingProducer
    :members:
@@ -138,24 +227,6 @@ SchemaRegistryClient
 .. autoclass:: confluent_kafka.schema_registry.SchemaRegistryClient
    :members:
 
-.. _avro_producer:
-
-********************
-AvroProducer(Legacy)
-********************
-
-.. autoclass:: confluent_kafka.avro.AvroProducer
-   :members:
-
-.. _avro_consumer:
-
-********************
-AvroConsumer(Legacy)
-********************
-
-.. autoclass:: confluent_kafka.avro.AvroConsumer
-   :members:
-
 Serialization API
 =================
 
@@ -315,10 +386,198 @@ StringSerializer
 
    .. automethod:: __call__
 
+
+Supporting Classes
+==================
+
+.. _pythonclient_message:
+
+*******
+Message
+*******
+
+.. autoclass:: confluent_kafka.Message
+   :members:
+
+.. _pythonclient_topicpartition:
+
+**************
+TopicPartition
+**************
+
+.. autoclass:: confluent_kafka.TopicPartition
+   :members:
+
+.. _serde_field:
+
+************
+MessageField
+************
+
+.. autoclass:: confluent_kafka.serialization.MessageField
+   :members:
+
+.. _serde_ctx:
+
+********************
+SerializationContext
+********************
+
+.. autoclass:: confluent_kafka.serialization.SerializationContext
+   :members:
+
+.. _schemaregistry_schema:
+
+******
+Schema
+******
+
+.. autoclass:: confluent_kafka.schema_registry.Schema
+   :members:
+
+.. _schemaregistry_registered_schema:
+
+****************
+RegisteredSchema
+****************
+
+.. autoclass:: confluent_kafka.schema_registry.RegisteredSchema
+   :members:
+
+.. _schemaregistry_error:
+
+*******************
+SchemaRegistryError
+*******************
+
+.. autoclass:: confluent_kafka.schema_registry.error.SchemaRegistryError
+   :members:
+
+.. _pythonclient_kafkaerror:
+
+**********
+KafkaError
+**********
+
+.. autoclass:: confluent_kafka.KafkaError
+   :members:
+
+.. _pythonclient_kafkaexception:
+
+**************
+KafkaException
+**************
+
+.. autoclass:: confluent_kafka.KafkaException
+   :members:
+
+.. _pyclient_error_consumer:
+
+************
+ConsumeError
+************
+
+.. autoclass:: confluent_kafka.error.ConsumeError
+   :members:
+
+.. _pyclient_error_producer:
+
+************
+ProduceError
+************
+
+.. autoclass:: confluent_kafka.error.ProduceError
+   :members:
+
+.. _serde_error:
+
+*******************
+SerializationError
+*******************
+
+.. autoclass:: confluent_kafka.error.SerializationError
+   :members:
+
+.. _serde_error_serializer_key:
+
+*********************
+KeySerializationError
+*********************
+
+.. autoclass:: confluent_kafka.error.KeySerializationError
+   :members:
+
+.. _serde_error_serializer_value:
+
+***********************
+ValueSerializationError
+***********************
+
+.. autoclass:: confluent_kafka.error.ValueSerializationError
+   :members:
+
+.. _serde_error_deserializer_key:
+
+***********************
+KeyDeserializationError
+***********************
+
+.. autoclass:: confluent_kafka.error.KeyDeserializationError
+   :members:
+
+.. _serde_error_deserializer_value:
+
+*************************
+ValueDeserializationError
+*************************
+
+.. autoclass:: confluent_kafka.error.ValueDeserializationError
+   :members:
+
+******
+Offset
+******
+
+Logical offset constants:
+
+ * :py:const:`OFFSET_BEGINNING` - Beginning of partition (oldest offset)
+ * :py:const:`OFFSET_END` - End of partition (next offset)
+ * :py:const:`OFFSET_STORED` - Use stored/committed offset
+ * :py:const:`OFFSET_INVALID` - Invalid/Default offset
+
+.. _pythonclient_throttleevent:
+
+*************
+ThrottleEvent
+*************
+
+.. autoclass:: confluent_kafka.ThrottleEvent
+   :members:
+
+
+.. _avro_producer:
+
+*********************
+AvroProducer (Legacy)
+*********************
+
+.. autoclass:: confluent_kafka.avro.AvroProducer
+   :members:
+
+.. _avro_consumer:
+
+*********************
+AvroConsumer (Legacy)
+*********************
+
+.. autoclass:: confluent_kafka.avro.AvroConsumer
+   :members:
+
+
 .. _pythonclient_transactional:
 
-Transactional Producer API
-==========================
+Transactional API
+=================
 
 The transactional producer operates on top of the idempotent producer,
 and provides full exactly-once semantics (EOS) for Apache Kafka when used
@@ -373,6 +632,7 @@ After the current transaction has been committed or aborted a new
 transaction may be started by calling
 :py:meth:`confluent_kafka.Producer.begin_transaction()` again.
 
+
 **Retriable errors**
 
 Some error cases allow the attempted operation to be retried, this is
@@ -441,6 +701,7 @@ neither the retriable or abortable flags set, as fatal.
                # treat all other errors as fatal
                raise
 
+
 .. _pythonclient_configuration:
 
 Kafka Client Configuration
@@ -459,12 +720,8 @@ providing a dict of configuration properties to the instance constructor, e.g.
   consumer = confluent_kafka.Consumer(conf)
 
 
-The supported configuration values are dictated by the underlying
-librdkafka C library. For the full range of configuration properties
-please consult librdkafka's documentation:
-https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
-
-The Python bindings also provide some additional configuration properties:
+The Python client provides the following configuration properties in 
+addition to the properties dictated by the underlying librdkafka C library:
 
 * ``default.topic.config``: value is a dict of client topic-level configuration
   properties that are applied to all used topics for the instance. **DEPRECATED:**
@@ -517,169 +774,8 @@ The Python bindings also provide some additional configuration properties:
     mylogger.addHandler(logging.StreamHandler())
     producer = confluent_kafka.Producer({'bootstrap.servers': 'mybroker.com'}, logger=mylogger)
 
-Supporting Classes
-==================
-
-.. _pythonclient_message:
-
-*******
-Message
-*******
-
-.. autoclass:: confluent_kafka.Message
-   :members:
-
-.. _pythonclient_topicpartition:
-
-**************
-TopicPartition
-**************
-
-.. autoclass:: confluent_kafka.TopicPartition
-   :members:
-
-.. _serde_field:
-
-************
-MessageField
-************
-
-.. autoclass:: confluent_kafka.serialization.MessageField
-   :members:
-
-.. _serde_ctx:
-
-********************
-SerializationContext
-********************
-
-.. autoclass:: confluent_kafka.serialization.SerializationContext
-   :members:
-
-.. _schemaregistry_schema:
-
-******
-Schema
-******
-
-.. autoclass:: confluent_kafka.schema_registry.Schema
-   :members:
-
-.. _schemaregistry_registered_schema:
-
-****************
-RegisteredSchema
-****************
-
-.. autoclass:: confluent_kafka.schema_registry.RegisteredSchema
-   :members:
+.. note::
+   In the Python client, the ``logger`` configuration property is used for log handler, not ``log_cb``.
 
-.. _schemaregistry_error:
-
-*******************
-SchemaRegistryError
-*******************
-
-.. autoclass:: confluent_kafka.schema_registry.error.SchemaRegistryError
-   :members:
-
-.. _pythonclient_kafkaerror:
-
-**********
-KafkaError
-**********
-
-.. autoclass:: confluent_kafka.KafkaError
-   :members:
-
-.. _pythonclient_kafkaexception:
-
-**************
-KafkaException
-**************
-
-.. autoclass:: confluent_kafka.KafkaException
-   :members:
-
-.. _pyclient_error_consumer:
-
-************
-ConsumeError
-************
-
-.. autoclass:: confluent_kafka.error.ConsumeError
-   :members:
-
-.. _pyclient_error_producer:
-
-************
-ProduceError
-************
-
-.. autoclass:: confluent_kafka.error.ProduceError
-   :members:
-
-.. _serde_error:
-
-*******************
-SerializationError
-*******************
-
-.. autoclass:: confluent_kafka.error.SerializationError
-   :members:
-
-.. _serde_error_serializer_key:
-
-*********************
-KeySerializationError
-*********************
-
-.. autoclass:: confluent_kafka.error.KeySerializationError
-   :members:
-
-.. _serde_error_serializer_value:
-
-***********************
-ValueSerializationError
-***********************
-
-.. autoclass:: confluent_kafka.error.ValueSerializationError
-   :members:
-
-.. _serde_error_deserializer_key:
-
-***********************
-KeyDeserializationError
-***********************
-
-.. autoclass:: confluent_kafka.error.KeyDeserializationError
-   :members:
-
-.. _serde_error_deserializer_value:
-
-*************************
-ValueDeserializationError
-*************************
-
-.. autoclass:: confluent_kafka.error.ValueDeserializationError
-   :members:
-
-******
-Offset
-******
-
-Logical offset constants:
-
- * :py:const:`OFFSET_BEGINNING` - Beginning of partition (oldest offset)
- * :py:const:`OFFSET_END` - End of partition (next offset)
- * :py:const:`OFFSET_STORED` - Use stored/committed offset
- * :py:const:`OFFSET_INVALID` - Invalid/Default offset
-
-.. _pythonclient_throttleevent:
-
-*************
-ThrottleEvent
-*************
-
-.. autoclass:: confluent_kafka.ThrottleEvent
-   :members:
+For the full range of configuration properties, please consult librdkafka's documentation:
+https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
\ No newline at end of file
diff --git a/examples/Makefile b/examples/Makefile
index 8d4b204..6a70238 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -1,5 +1,5 @@
-user_pb2.py: user.proto
-	protoc -I=. --python_out=. ./user.proto;
+user_pb2.py: protobuf/user.proto
+	cd protobuf && protoc -I=. --python_out=. ./user.proto;
 
 clean:
-	rm -f $(TARGET_DIR)/*_pb2.py
+	rm -f $(TARGET_DIR)/protobuf/*_pb2.py
diff --git a/examples/README.md b/examples/README.md
index d37dbc1..9f7b3f4 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -1,25 +1,24 @@
-The scripts in this directory provide code examples using Confluent's Python client:
-
-* [adminapi.py](adminapi.py): Collection of Kafka Admin API operations
-* [asyncio_example.py](asyncio_example.py): AsyncIO webserver with Kafka producer
-* [avro-cli.py](avro-cli.py): Produces Avro messages with Confluent Schema Registry and then reads them back again
-* [consumer.py](consumer.py): Reads messages from a Kafka topic
-* [producer.py](producer.py): Reads lines from stdin and sends them to Kafka
-* [eos-transactions.py](eos-transactions.py): Transactional producer with exactly once semantics (EOS)
-* [avro_producer.py](avro_producer.py): SerializingProducer with AvroSerializer
-* [avro_consumer.py](avro_consumer.py): DeserializingConsumer with AvroDeserializer
-* [json_producer.py](json_producer.py): SerializingProducer with JsonSerializer
-* [json_consumer.py](json_consumer.py): DeserializingConsumer with JsonDeserializer
-* [protobuf_producer.py](protobuf_producer.py): SerializingProducer with ProtobufSerializer
-* [protobuf_consumer.py](protobuf_consumer.py): DeserializingConsumer with ProtobufDeserializer
-* [sasl_producer.py](sasl_producer.py): SerializingProducer with SASL Authentication
-* [list_offsets.py](list_offsets.py): List committed offsets and consumer lag for group and topics
-* [oauth_producer.py](oauth_producer.py): SerializingProducer with OAuth Authentication (client credentials)
+The scripts in this directory provide various examples of using Confluent's Python client for Kafka:
+
+* [adminapi.py](adminapi.py): Various AdminClient operations.
+* [asyncio_example.py](asyncio_example.py): AsyncIO webserver with Kafka producer.
+* [consumer.py](consumer.py): Read messages from a Kafka topic.
+* [producer.py](producer.py): Read lines from stdin and send them to a Kafka topic.
+* [eos-transactions.py](eos-transactions.py): Transactional producer with exactly once semantics (EOS).
+* [avro_producer.py](avro_producer.py): Produce Avro serialized data using AvroSerializer.
+* [avro_consumer.py](avro_consumer.py): Read Avro serialized data using AvroDeserializer.
+* [json_producer.py](json_producer.py): Produce JSON serialized data using JSONSerializer.
+* [json_consumer.py](json_consumer.py): Read JSON serialized data using JSONDeserializer.
+* [protobuf_producer.py](protobuf_producer.py): Produce Protobuf serialized data using ProtobufSerializer.
+* [protobuf_consumer.py](protobuf_consumer.py): Read Protobuf serialized data using ProtobufDeserializer.
+* [sasl_producer.py](sasl_producer.py):  Demonstrates SASL Authentication.
+* [list_offsets.py](list_offsets.py): List committed offsets and consumer lag for group and topics.
+* [oauth_producer.py](oauth_producer.py): Demonstrates OAuth Authentication (client credentials).
 
 Additional examples for [Confluent Cloud](https://www.confluent.io/confluent-cloud/):
 
-* [confluent_cloud.py](confluent_cloud.py): produces messages to Confluent Cloud and then reads them back again
-* [confluentinc/examples](https://github.com/confluentinc/examples/tree/master/clients/cloud/python): integrates Confluent Cloud and Confluent Cloud Schema Registry
+* [confluent_cloud.py](confluent_cloud.py): Produce messages to Confluent Cloud and then read them back again.
+* [confluentinc/examples](https://github.com/confluentinc/examples/tree/master/clients/cloud/python): Integration with Confluent Cloud and Confluent Cloud Schema Registry
 
 ## venv setup
 
@@ -52,4 +51,4 @@ When you're finished with the venv:
 
 ```
 $ deactivate
-```
\ No newline at end of file
+```
diff --git a/examples/adminapi.py b/examples/adminapi.py
index 1ac3f74..2f95469 100755
--- a/examples/adminapi.py
+++ b/examples/adminapi.py
@@ -13,14 +13,15 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
-#
-# Example Admin clients.
-#
 
-from confluent_kafka.admin import AdminClient, NewTopic, NewPartitions, ConfigResource, ConfigSource
-from confluent_kafka import KafkaException
+# Example use of AdminClient operations.
+
+from confluent_kafka import (KafkaException, ConsumerGroupTopicPartitions,
+                             TopicPartition, ConsumerGroupState)
+from confluent_kafka.admin import (AdminClient, NewTopic, NewPartitions, ConfigResource, ConfigSource,
+                                   AclBinding, AclBindingFilter, ResourceType, ResourcePatternType, AclOperation,
+                                   AclPermissionType)
 import sys
 import threading
 import logging
@@ -28,6 +29,13 @@ import logging
 logging.basicConfig()
 
 
+def parse_nullable_string(s):
+    if s == "None":
+        return None
+    else:
+        return s
+
+
 def example_create_topics(a, topics):
     """ Create topics """
 
@@ -117,6 +125,141 @@ def example_describe_configs(a, args):
             raise
 
 
+def example_create_acls(a, args):
+    """ create acls """
+
+    acl_bindings = [
+        AclBinding(
+            ResourceType[restype],
+            parse_nullable_string(resname),
+            ResourcePatternType[resource_pattern_type],
+            parse_nullable_string(principal),
+            parse_nullable_string(host),
+            AclOperation[operation],
+            AclPermissionType[permission_type]
+        )
+        for restype, resname, resource_pattern_type,
+        principal, host, operation, permission_type
+        in zip(
+            args[0::7],
+            args[1::7],
+            args[2::7],
+            args[3::7],
+            args[4::7],
+            args[5::7],
+            args[6::7],
+        )
+    ]
+
+    try:
+        fs = a.create_acls(acl_bindings, request_timeout=10)
+    except ValueError as e:
+        print(f"create_acls() failed: {e}")
+        return
+
+    # Wait for operation to finish.
+    for res, f in fs.items():
+        try:
+            result = f.result()
+            if result is None:
+                print("Created {}".format(res))
+
+        except KafkaException as e:
+            print("Failed to create ACL {}: {}".format(res, e))
+        except Exception:
+            raise
+
+
+def example_describe_acls(a, args):
+    """ describe acls """
+
+    acl_binding_filters = [
+        AclBindingFilter(
+            ResourceType[restype],
+            parse_nullable_string(resname),
+            ResourcePatternType[resource_pattern_type],
+            parse_nullable_string(principal),
+            parse_nullable_string(host),
+            AclOperation[operation],
+            AclPermissionType[permission_type]
+        )
+        for restype, resname, resource_pattern_type,
+        principal, host, operation, permission_type
+        in zip(
+            args[0::7],
+            args[1::7],
+            args[2::7],
+            args[3::7],
+            args[4::7],
+            args[5::7],
+            args[6::7],
+        )
+    ]
+
+    fs = [
+        a.describe_acls(acl_binding_filter, request_timeout=10)
+        for acl_binding_filter in acl_binding_filters
+    ]
+    # Wait for operations to finish.
+    for acl_binding_filter, f in zip(acl_binding_filters, fs):
+        try:
+            print("Acls matching filter: {}".format(acl_binding_filter))
+            acl_bindings = f.result()
+            for acl_binding in acl_bindings:
+                print(acl_binding)
+
+        except KafkaException as e:
+            print("Failed to describe {}: {}".format(acl_binding_filter, e))
+        except Exception:
+            raise
+
+
+def example_delete_acls(a, args):
+    """ delete acls """
+
+    acl_binding_filters = [
+        AclBindingFilter(
+            ResourceType[restype],
+            parse_nullable_string(resname),
+            ResourcePatternType[resource_pattern_type],
+            parse_nullable_string(principal),
+            parse_nullable_string(host),
+            AclOperation[operation],
+            AclPermissionType[permission_type]
+        )
+        for restype, resname, resource_pattern_type,
+        principal, host, operation, permission_type
+        in zip(
+            args[0::7],
+            args[1::7],
+            args[2::7],
+            args[3::7],
+            args[4::7],
+            args[5::7],
+            args[6::7],
+        )
+    ]
+
+    try:
+        fs = a.delete_acls(acl_binding_filters, request_timeout=10)
+    except ValueError as e:
+        print(f"delete_acls() failed: {e}")
+        return
+
+    # Wait for operation to finish.
+    for res, f in fs.items():
+        try:
+            acl_bindings = f.result()
+            print("Deleted acls matching filter: {}".format(res))
+            for acl_binding in acl_bindings:
+                print(" ", acl_binding)
+
+        except KafkaException as e:
+            print("Failed to delete {}: {}".format(res, e))
+        except Exception:
+            raise
+
+
 def example_alter_configs(a, args):
     """ Alter configs atomically, replacing non-specified
     configuration properties with their default values.
@@ -277,17 +420,146 @@ def example_list(a, args):
         print(" {} consumer groups".format(len(groups)))
         for g in groups:
             if g.error is not None:
-                errstr = ": {}".format(t.error)
+                errstr = ": {}".format(g.error)
             else:
                 errstr = ""
 
             print(" \"{}\" with {} member(s), protocol: {}, protocol_type: {}{}".format(
-                  g, len(g.members), g.protocol, g.protocol_type, errstr))
+                g, len(g.members), g.protocol, g.protocol_type, errstr))
 
             for m in g.members:
                 print("id {} client_id: {} client_host: {}".format(m.id, m.client_id, m.client_host))
 
 
+def example_list_consumer_groups(a, args):
+    """
+    List Consumer Groups
+    """
+    states = {ConsumerGroupState[state] for state in args}
+    future = a.list_consumer_groups(request_timeout=10, states=states)
+    try:
+        list_consumer_groups_result = future.result()
+        print("{} consumer groups".format(len(list_consumer_groups_result.valid)))
+        for valid in list_consumer_groups_result.valid:
+            print("    id: {} is_simple: {} state: {}".format(
+                valid.group_id, valid.is_simple_consumer_group, valid.state))
+        print("{} errors".format(len(list_consumer_groups_result.errors)))
+        for error in list_consumer_groups_result.errors:
+            print("    error: {}".format(error))
+    except Exception:
+        raise
+
+
+def example_describe_consumer_groups(a, args):
+    """
+    Describe Consumer Groups
+    """
+
+    futureMap = a.describe_consumer_groups(args, request_timeout=10)
+
+    for group_id, future in futureMap.items():
+        try:
+            g = future.result()
+            print("Group Id: {}".format(g.group_id))
+            print("  Is Simple          : {}".format(g.is_simple_consumer_group))
+            print("  State              : {}".format(g.state))
+            print("  Partition Assignor : {}".format(g.partition_assignor))
+            print("  Coordinator        : ({}) {}:{}".format(g.coordinator.id, g.coordinator.host, g.coordinator.port))
+            print("  Members: ")
+            for member in g.members:
+                print("    Id                : {}".format(member.member_id))
+                print("    Host              : {}".format(member.host))
+                print("    Client Id         : {}".format(member.client_id))
+                print("    Group Instance Id : {}".format(member.group_instance_id))
+                if member.assignment:
+                    print("    Assignments       :")
+                    for toppar in member.assignment.topic_partitions:
+                        print("      {} [{}]".format(toppar.topic, toppar.partition))
+        except KafkaException as e:
+            print("Error while describing group id '{}': {}".format(group_id, e))
+        except Exception:
+            raise
+
+
+def example_delete_consumer_groups(a, args):
+    """
+    Delete Consumer Groups
+    """
+    groups = a.delete_consumer_groups(args, request_timeout=10)
+    for group_id, future in groups.items():
+        try:
+            future.result()  # The result itself is None
+            print("Deleted group with id '" + group_id + "' successfully")
+        except KafkaException as e:
+            print("Error deleting group id '{}': {}".format(group_id, e))
+        except Exception:
+            raise
+
+
+def example_list_consumer_group_offsets(a, args):
+    """
+    List consumer group offsets
+    """
+
+    topic_partitions = []
+    for topic, partition in zip(args[1::2], args[2::2]):
+        topic_partitions.append(TopicPartition(topic, int(partition)))
+    if len(topic_partitions) == 0:
+        topic_partitions = None
+    groups = [ConsumerGroupTopicPartitions(args[0], topic_partitions)]
+
+    futureMap = a.list_consumer_group_offsets(groups)
+
+    for group_id, future in futureMap.items():
+        try:
+            response_offset_info = future.result()
+            print("Group: " + response_offset_info.group_id)
+            for topic_partition in response_offset_info.topic_partitions:
+                if topic_partition.error:
+                    print("    Error: " + topic_partition.error.str() + " occurred with " +
+                          topic_partition.topic + " [" + str(topic_partition.partition) + "]")
+                else:
+                    print("    " + topic_partition.topic +
+                          " [" + str(topic_partition.partition) + "]: " + str(topic_partition.offset))
+
+        except KafkaException as e:
+            print("Failed to list {}: {}".format(group_id, e))
+        except Exception:
+            raise
+
+
+def example_alter_consumer_group_offsets(a, args):
+    """
+    Alter consumer group offsets
+    """
+
+    topic_partitions = []
+    for topic, partition, offset in zip(args[1::3], args[2::3], args[3::3]):
+        topic_partitions.append(TopicPartition(topic, int(partition), int(offset)))
+    if len(topic_partitions) == 0:
+        topic_partitions = None
+    groups = [ConsumerGroupTopicPartitions(args[0], topic_partitions)]
+
+    futureMap = a.alter_consumer_group_offsets(groups)
+
+    for group_id, future in futureMap.items():
+        try:
+            response_offset_info = future.result()
+            print("Group: " + response_offset_info.group_id)
+            for topic_partition in response_offset_info.topic_partitions:
+                if topic_partition.error:
+                    print("    Error: " + topic_partition.error.str() + " occurred with " +
+                          topic_partition.topic + " [" + str(topic_partition.partition) + "]")
+                else:
+                    print("    " + topic_partition.topic +
+                          " [" + str(topic_partition.partition) + "]: " + str(topic_partition.offset))
+
+        except KafkaException as e:
+            print("Failed to alter {}: {}".format(group_id, e))
+        except Exception:
+            raise
+
+
 if __name__ == '__main__':
     if len(sys.argv) < 3:
         sys.stderr.write('Usage: %s <bootstrap-brokers> <operation> <args..>\n\n' % sys.argv[0])
@@ -300,7 +572,21 @@ if __name__ == '__main__':
                          '<config=val,config2=val2> <resource_type2> <resource_name2> <config..> ..\n')
         sys.stderr.write(' delta_alter_configs <resource_type1> <resource_name1> ' +
                          '<config=val,config2=val2> <resource_type2> <resource_name2> <config..> ..\n')
+        sys.stderr.write(' create_acls <resource_type1> <resource_name1> <resource_patter_type1> ' +
+                         '<principal1> <host1> <operation1> <permission_type1> ..\n')
+        sys.stderr.write(' describe_acls <resource_type1 <resource_name1> <resource_patter_type1> ' +
+                         '<principal1> <host1> <operation1> <permission_type1> ..\n')
+        sys.stderr.write(' delete_acls <resource_type1> <resource_name1> <resource_patter_type1> ' +
+                         '<principal1> <host1> <operation1> <permission_type1> ..\n')
         sys.stderr.write(' list [<all|topics|brokers|groups>]\n')
+        sys.stderr.write(' list_consumer_groups [<state1> <state2> ..]\n')
+        sys.stderr.write(' describe_consumer_groups <group1> <group2> ..\n')
+        sys.stderr.write(' delete_consumer_groups <group1> <group2> ..\n')
+        sys.stderr.write(' list_consumer_group_offsets <group> [<topic1> <partition1> <topic2> <partition2> ..]\n')
+        sys.stderr.write(
+            ' alter_consumer_group_offsets <group> <topic1> <partition1> <offset1> ' +
+            '<topic2> <partition2> <offset2> ..\n')
+
         sys.exit(1)
 
     broker = sys.argv[1]
@@ -316,7 +602,15 @@ if __name__ == '__main__':
               'describe_configs': example_describe_configs,
               'alter_configs': example_alter_configs,
               'delta_alter_configs': example_delta_alter_configs,
-              'list': example_list}
+              'create_acls': example_create_acls,
+              'describe_acls': example_describe_acls,
+              'delete_acls': example_delete_acls,
+              'list': example_list,
+              'list_consumer_groups': example_list_consumer_groups,
+              'describe_consumer_groups': example_describe_consumer_groups,
+              'delete_consumer_groups': example_delete_consumer_groups,
+              'list_consumer_group_offsets': example_list_consumer_group_offsets,
+              'alter_consumer_group_offsets': example_alter_consumer_group_offsets}
 
     if operation not in opsmap:
         sys.stderr.write('Unknown operation: %s\n' % operation)
diff --git a/examples/avro-cli.py b/examples/avro-cli.py
deleted file mode 100755
index 46e0293..0000000
--- a/examples/avro-cli.py
+++ /dev/null
@@ -1,189 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018 Confluent Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import argparse
-from uuid import uuid4
-
-from six.moves import input
-
-from confluent_kafka import avro
-
-# Parse Schema used for serializing User class
-record_schema = avro.loads("""
-    {
-        "namespace": "confluent.io.examples.serialization.avro",
-        "name": "User",
-        "type": "record",
-        "fields": [
-            {"name": "name", "type": "string"},
-            {"name": "favorite_number", "type": "int"},
-            {"name": "favorite_color", "type": "string"}
-        ]
-    }
-""")
-
-
-class User(object):
-    """
-        User stores the deserialized user Avro record.
-    """
-
-    # Use __slots__ to explicitly declare all data members.
-    __slots__ = ["name", "favorite_number", "favorite_color", "id"]
-
-    def __init__(self, name=None, favorite_number=None, favorite_color=None):
-        self.name = name
-        self.favorite_number = favorite_number
-        self.favorite_color = favorite_color
-        # Unique id used to track produce request success/failures.
-        # Do *not* include in the serialized object.
-        self.id = uuid4()
-
-    def to_dict(self):
-        """
-            The Avro Python library does not support code generation.
-            For this reason we must provide a dict representation of our class for serialization.
-        """
-        return {
-            "name": self.name,
-            "favorite_number": self.favorite_number,
-            "favorite_color": self.favorite_color
-        }
-
-
-def on_delivery(err, msg, obj):
-    """
-        Handle delivery reports served from producer.poll.
-        This callback takes an extra argument, obj.
-        This allows the original contents to be included for debugging purposes.
-    """
-    if err is not None:
-        print('Message {} delivery failed for user {} with error {}'.format(
-            obj.id, obj.name, err))
-    else:
-        print('Message {} successfully produced to {} [{}] at offset {}'.format(
-            obj.id, msg.topic(), msg.partition(), msg.offset()))
-
-
-def produce(topic, conf):
-    """
-        Produce User records
-    """
-
-    from confluent_kafka.avro import AvroProducer
-
-    producer = AvroProducer(conf, default_value_schema=record_schema)
-
-    print("Producing user records to topic {}. ^c to exit.".format(topic))
-    while True:
-        # Instantiate new User, populate fields, produce record, execute callbacks.
-        record = User()
-        try:
-            record.name = input("Enter name: ")
-            record.favorite_number = int(input("Enter favorite number: "))
-            record.favorite_color = input("Enter favorite color: ")
-
-            # The message passed to the delivery callback will already be serialized.
-            # To aid in debugging we provide the original object to the delivery callback.
-            producer.produce(topic=topic, value=record.to_dict(),
-                             callback=lambda err, msg, obj=record: on_delivery(err, msg, obj))
-            # Serve on_delivery callbacks from previous asynchronous produce()
-            producer.poll(0)
-        except KeyboardInterrupt:
-            break
-        except ValueError:
-            print("Invalid input, discarding record...")
-            continue
-
-    print("\nFlushing records...")
-    producer.flush()
-
-
-def consume(topic, conf):
-    """
-        Consume User records
-    """
-    from confluent_kafka.avro import AvroConsumer
-    from confluent_kafka.avro.serializer import SerializerError
-
-    print("Consuming user records from topic {} with group {}. ^c to exit.".format(topic, conf["group.id"]))
-
-    c = AvroConsumer(conf, reader_value_schema=record_schema)
-    c.subscribe([topic])
-
-    while True:
-        try:
-            msg = c.poll(1)
-
-            # There were no messages on the queue, continue polling
-            if msg is None:
-                continue
-
-            if msg.error():
-                print("Consumer error: {}".format(msg.error()))
-                continue
-
-            record = User(msg.value())
-            print("name: {}\n\tfavorite_number: {}\n\tfavorite_color: {}\n".format(
-                record.name, record.favorite_number, record.favorite_color))
-        except SerializerError as e:
-            # Report malformed record, discard results, continue polling
-            print("Message deserialization failed {}".format(e))
-            continue
-        except KeyboardInterrupt:
-            break
-
-    print("Shutting down consumer..")
-    c.close()
-
-
-def main(args):
-    # handle common configs
-    conf = {'bootstrap.servers': args.bootstrap_servers,
-            'schema.registry.url': args.schema_registry}
-
-    if args.userinfo:
-        conf['schema.registry.basic.auth.credentials.source'] = 'USER_INFO'
-        conf['schema.registry.basic.auth.user.info'] = args.userinfo
-
-    if args.mode == "produce":
-        produce(args.topic, conf)
-    else:
-        # Fallback to earliest to ensure all messages are consumed
-        conf['group.id'] = args.group
-        conf['auto.offset.reset'] = "earliest"
-        consume(args.topic, conf)
-
-
-if __name__ == '__main__':
-    # To use the provided cluster execute <source root>/tests/docker/bin/cluster_up.sh.
-    # Defaults assume the use of the provided test cluster.
-    parser = argparse.ArgumentParser(description="Example client for handling Avro data")
-    parser.add_argument('-b', dest="bootstrap_servers",
-                        default="localhost:29092", help="Bootstrap broker(s) (host[:port])")
-    parser.add_argument('-s', dest="schema_registry",
-                        default="http://localhost:8083", help="Schema Registry (http(s)://host[:port]")
-    parser.add_argument('-t', dest="topic", default="example_avro",
-                        help="Topic name")
-    parser.add_argument('-u', dest="userinfo", default="ckp_tester:test_secret",
-                        help="Userinfo (username:password); requires Schema Registry with HTTP basic auth enabled")
-    parser.add_argument('mode', choices=['produce', 'consume'],
-                        help="Execution mode (produce | consume)")
-    parser.add_argument('-g', dest="group", default="example_avro",
-                        help="Consumer group; required if running 'consumer' mode")
-
-    main(parser.parse_args())
diff --git a/examples/avro/user_generic.avsc b/examples/avro/user_generic.avsc
new file mode 100644
index 0000000..f7584db
--- /dev/null
+++ b/examples/avro/user_generic.avsc
@@ -0,0 +1,18 @@
+{
+    "name": "User",
+    "type": "record",
+    "fields": [
+        {
+            "name": "name",
+            "type": "string"
+        },
+        {
+            "name": "favorite_number",
+            "type": "long"
+        },
+        {
+            "name": "favorite_color",
+            "type": "string"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/examples/avro/user_specific.avsc b/examples/avro/user_specific.avsc
new file mode 100644
index 0000000..9deb32c
--- /dev/null
+++ b/examples/avro/user_specific.avsc
@@ -0,0 +1,19 @@
+{
+    "namespace": "confluent.io.examples.serialization.avro",
+    "name": "User",
+    "type": "record",
+    "fields": [
+        {
+            "name": "name",
+            "type": "string"
+        },
+        {
+            "name": "favorite_number",
+            "type": "long"
+        },
+        {
+            "name": "favorite_color",
+            "type": "string"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/examples/avro_consumer.py b/examples/avro_consumer.py
index 93ed94d..0fe9255 100644
--- a/examples/avro_consumer.py
+++ b/examples/avro_consumer.py
@@ -14,17 +14,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
-#
-# This is a simple example of the SerializingProducer using Avro.
-#
+
+# A simple example demonstrating use of AvroDeserializer.
+
 import argparse
+import os
 
-from confluent_kafka import DeserializingConsumer
+from confluent_kafka import Consumer
+from confluent_kafka.serialization import SerializationContext, MessageField
 from confluent_kafka.schema_registry import SchemaRegistryClient
 from confluent_kafka.schema_registry.avro import AvroDeserializer
-from confluent_kafka.serialization import StringDeserializer
 
 
 class User(object):
@@ -37,8 +37,8 @@ class User(object):
         favorite_number (int): User's favorite number
 
         favorite_color (str): User's favorite color
-
     """
+
     def __init__(self, name=None, favorite_number=None, favorite_color=None):
         self.name = name
         self.favorite_number = favorite_number
@@ -54,8 +54,8 @@ def dict_to_user(obj, ctx):
 
         ctx (SerializationContext): Metadata pertaining to the serialization
             operation.
-
     """
+
     if obj is None:
         return None
 
@@ -66,35 +66,29 @@ def dict_to_user(obj, ctx):
 
 def main(args):
     topic = args.topic
+    is_specific = args.specific == "true"
 
-    schema_str = """
-    {
-        "namespace": "confluent.io.examples.serialization.avro",
-        "name": "User",
-        "type": "record",
-        "fields": [
-            {"name": "name", "type": "string"},
-            {"name": "favorite_number", "type": "int"},
-            {"name": "favorite_color", "type": "string"}
-        ]
-    }
-    """
+    if is_specific:
+        schema = "user_specific.avsc"
+    else:
+        schema = "user_generic.avsc"
+
+    path = os.path.realpath(os.path.dirname(__file__))
+    with open(f"{path}/avro/{schema}") as f:
+        schema_str = f.read()
 
     sr_conf = {'url': args.schema_registry}
     schema_registry_client = SchemaRegistryClient(sr_conf)
 
-    avro_deserializer = AvroDeserializer(schema_str,
-                                         schema_registry_client,
+    avro_deserializer = AvroDeserializer(schema_registry_client,
+                                         schema_str,
                                          dict_to_user)
-    string_deserializer = StringDeserializer('utf_8')
 
     consumer_conf = {'bootstrap.servers': args.bootstrap_servers,
-                     'key.deserializer': string_deserializer,
-                     'value.deserializer': avro_deserializer,
                      'group.id': args.group,
                      'auto.offset.reset': "earliest"}
 
-    consumer = DeserializingConsumer(consumer_conf)
+    consumer = Consumer(consumer_conf)
     consumer.subscribe([topic])
 
     while True:
@@ -104,14 +98,14 @@ def main(args):
             if msg is None:
                 continue
 
-            user = msg.value()
+            user = avro_deserializer(msg.value(), SerializationContext(msg.topic(), MessageField.VALUE))
             if user is not None:
                 print("User record {}: name: {}\n"
                       "\tfavorite_number: {}\n"
                       "\tfavorite_color: {}\n"
                       .format(msg.key(), user.name,
-                              user.favorite_color,
-                              user.favorite_number))
+                              user.favorite_number,
+                              user.favorite_color))
         except KeyboardInterrupt:
             break
 
@@ -119,8 +113,7 @@ def main(args):
 
 
 if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description="Consumer Example client with "
-                                                 "serialization capabilities")
+    parser = argparse.ArgumentParser(description="AvroDeserializer example")
     parser.add_argument('-b', dest="bootstrap_servers", required=True,
                         help="Bootstrap broker(s) (host[:port])")
     parser.add_argument('-s', dest="schema_registry", required=True,
@@ -129,5 +122,7 @@ if __name__ == '__main__':
                         help="Topic name")
     parser.add_argument('-g', dest="group", default="example_serde_avro",
                         help="Consumer group")
+    parser.add_argument('-p', dest="specific", default="true",
+                        help="Avro specific record")
 
     main(parser.parse_args())
diff --git a/examples/avro_producer.py b/examples/avro_producer.py
index 289b3fd..12c60a4 100644
--- a/examples/avro_producer.py
+++ b/examples/avro_producer.py
@@ -14,18 +14,18 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
-#
-# This is a simple example of the SerializingProducer using Avro.
-#
+
+# A simple example demonstrating use of AvroSerializer.
+
 import argparse
+import os
 from uuid import uuid4
 
 from six.moves import input
 
-from confluent_kafka import SerializingProducer
-from confluent_kafka.serialization import StringSerializer
+from confluent_kafka import Producer
+from confluent_kafka.serialization import StringSerializer, SerializationContext, MessageField
 from confluent_kafka.schema_registry import SchemaRegistryClient
 from confluent_kafka.schema_registry.avro import AvroSerializer
 
@@ -42,8 +42,8 @@ class User(object):
         favorite_color (str): User's favorite color
 
         address(str): User's address; confidential
-
     """
+
     def __init__(self, name, address, favorite_number, favorite_color):
         self.name = name
         self.favorite_number = favorite_number
@@ -64,8 +64,8 @@ def user_to_dict(user, ctx):
 
     Returns:
         dict: Dict populated with user attributes to be serialized.
-
     """
+
     # User._address must not be serialized; omit from dict
     return dict(name=user.name,
                 favorite_number=user.favorite_number,
@@ -88,8 +88,8 @@ def delivery_report(err, msg):
         If you wish to pass the original object(s) for key and value to delivery
         report callback we recommend a bound callback or lambda where you pass
         the objects along.
-
     """
+
     if err is not None:
         print("Delivery failed for User record {}: {}".format(msg.key(), err))
         return
@@ -99,31 +99,29 @@ def delivery_report(err, msg):
 
 def main(args):
     topic = args.topic
+    is_specific = args.specific == "true"
+
+    if is_specific:
+        schema = "user_specific.avsc"
+    else:
+        schema = "user_generic.avsc"
+
+    path = os.path.realpath(os.path.dirname(__file__))
+    with open(f"{path}/avro/{schema}") as f:
+        schema_str = f.read()
 
-    schema_str = """
-    {
-        "namespace": "confluent.io.examples.serialization.avro",
-        "name": "User",
-        "type": "record",
-        "fields": [
-            {"name": "name", "type": "string"},
-            {"name": "favorite_number", "type": "int"},
-            {"name": "favorite_color", "type": "string"}
-        ]
-    }
-    """
     schema_registry_conf = {'url': args.schema_registry}
     schema_registry_client = SchemaRegistryClient(schema_registry_conf)
 
-    avro_serializer = AvroSerializer(schema_str,
-                                     schema_registry_client,
+    avro_serializer = AvroSerializer(schema_registry_client,
+                                     schema_str,
                                      user_to_dict)
 
-    producer_conf = {'bootstrap.servers': args.bootstrap_servers,
-                     'key.serializer': StringSerializer('utf_8'),
-                     'value.serializer': avro_serializer}
+    string_serializer = StringSerializer('utf_8')
+
+    producer_conf = {'bootstrap.servers': args.bootstrap_servers}
 
-    producer = SerializingProducer(producer_conf)
+    producer = Producer(producer_conf)
 
     print("Producing user records to topic {}. ^C to exit.".format(topic))
     while True:
@@ -138,7 +136,9 @@ def main(args):
                         address=user_address,
                         favorite_color=user_favorite_color,
                         favorite_number=user_favorite_number)
-            producer.produce(topic=topic, key=str(uuid4()), value=user,
+            producer.produce(topic=topic,
+                             key=string_serializer(str(uuid4())),
+                             value=avro_serializer(user, SerializationContext(topic, MessageField.VALUE)),
                              on_delivery=delivery_report)
         except KeyboardInterrupt:
             break
@@ -151,12 +151,14 @@ def main(args):
 
 
 if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description="SerializingProducer Example")
+    parser = argparse.ArgumentParser(description="AvroSerializer example")
     parser.add_argument('-b', dest="bootstrap_servers", required=True,
                         help="Bootstrap broker(s) (host[:port])")
     parser.add_argument('-s', dest="schema_registry", required=True,
                         help="Schema Registry (http(s)://host[:port]")
     parser.add_argument('-t', dest="topic", default="example_serde_avro",
                         help="Topic name")
+    parser.add_argument('-p', dest="specific", default="true",
+                        help="Avro specific record")
 
     main(parser.parse_args())
diff --git a/examples/consumer.py b/examples/consumer.py
index 986e0f9..8dbee44 100755
--- a/examples/consumer.py
+++ b/examples/consumer.py
@@ -52,7 +52,7 @@ if __name__ == '__main__':
     # Consumer configuration
     # See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
     conf = {'bootstrap.servers': broker, 'group.id': group, 'session.timeout.ms': 6000,
-            'auto.offset.reset': 'earliest'}
+            'auto.offset.reset': 'earliest', 'enable.auto.offset.store': False}
 
     # Check to see if -T option exists
     for opt in optlist:
@@ -102,6 +102,10 @@ if __name__ == '__main__':
                                  (msg.topic(), msg.partition(), msg.offset(),
                                   str(msg.key())))
                 print(msg.value())
+                # Store the offset associated with msg to a local cache.
+                # Stored offsets are committed to Kafka by a background thread every 'auto.commit.interval.ms'.
+                # Explicitly storing offsets after processing gives at-least once semantics.
+                c.store_offsets(msg)
 
     except KeyboardInterrupt:
         sys.stderr.write('%% Aborted by user\n')
diff --git a/examples/docker/Dockerfile.alpine b/examples/docker/Dockerfile.alpine
index 0943c6e..de4b5d6 100644
--- a/examples/docker/Dockerfile.alpine
+++ b/examples/docker/Dockerfile.alpine
@@ -30,7 +30,7 @@ FROM alpine:3.12
 
 COPY . /usr/src/confluent-kafka-python
 
-ENV LIBRDKAFKA_VERSION v1.7.0
+ENV LIBRDKAFKA_VERSION v2.1.1-RC1
 ENV KAFKACAT_VERSION master
 
 
diff --git a/examples/json_consumer.py b/examples/json_consumer.py
index 0e5573e..fbba66a 100644
--- a/examples/json_consumer.py
+++ b/examples/json_consumer.py
@@ -14,16 +14,15 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
-#
-# This is a simple example of the SerializingProducer using JSON.
-#
+
+# A simple example demonstrating use of JSONDeserializer.
+
 import argparse
 
-from confluent_kafka import DeserializingConsumer
+from confluent_kafka import Consumer
+from confluent_kafka.serialization import SerializationContext, MessageField
 from confluent_kafka.schema_registry.json_schema import JSONDeserializer
-from confluent_kafka.serialization import StringDeserializer
 
 
 class User(object):
@@ -32,12 +31,10 @@ class User(object):
 
     Args:
         name (str): User's name
-
         favorite_number (int): User's favorite number
-
         favorite_color (str): User's favorite color
-
     """
+
     def __init__(self, name=None, favorite_number=None, favorite_color=None):
         self.name = name
         self.favorite_number = favorite_number
@@ -51,10 +48,9 @@ def dict_to_user(obj, ctx):
     Args:
         ctx (SerializationContext): Metadata pertaining to the serialization
             operation.
-
         obj (dict): Object literal(dict)
-
     """
+
     if obj is None:
         return None
 
@@ -92,15 +88,12 @@ def main(args):
     """
     json_deserializer = JSONDeserializer(schema_str,
                                          from_dict=dict_to_user)
-    string_deserializer = StringDeserializer('utf_8')
 
     consumer_conf = {'bootstrap.servers': args.bootstrap_servers,
-                     'key.deserializer': string_deserializer,
-                     'value.deserializer': json_deserializer,
                      'group.id': args.group,
                      'auto.offset.reset': "earliest"}
 
-    consumer = DeserializingConsumer(consumer_conf)
+    consumer = Consumer(consumer_conf)
     consumer.subscribe([topic])
 
     while True:
@@ -110,14 +103,15 @@ def main(args):
             if msg is None:
                 continue
 
-            user = msg.value()
+            user = json_deserializer(msg.value(), SerializationContext(msg.topic(), MessageField.VALUE))
+
             if user is not None:
                 print("User record {}: name: {}\n"
                       "\tfavorite_number: {}\n"
                       "\tfavorite_color: {}\n"
                       .format(msg.key(), user.name,
-                              user.favorite_color,
-                              user.favorite_number))
+                              user.favorite_number,
+                              user.favorite_color))
         except KeyboardInterrupt:
             break
 
@@ -125,7 +119,7 @@ def main(args):
 
 
 if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description="DeserializingConsumer Example")
+    parser = argparse.ArgumentParser(description="JSONDeserializer example")
     parser.add_argument('-b', dest="bootstrap_servers", required=True,
                         help="Bootstrap broker(s) (host[:port])")
     parser.add_argument('-s', dest="schema_registry", required=True,
diff --git a/examples/json_producer.py b/examples/json_producer.py
index 05faf58..4c4ea84 100644
--- a/examples/json_producer.py
+++ b/examples/json_producer.py
@@ -14,18 +14,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
-#
-# This is a simple example of the SerializingProducer using JSON.
-#
+
+# A simple example demonstrating use of JSONSerializer.
+
 import argparse
 from uuid import uuid4
 
 from six.moves import input
 
-from confluent_kafka import SerializingProducer
-from confluent_kafka.serialization import StringSerializer
+from confluent_kafka import Producer
+from confluent_kafka.serialization import StringSerializer, SerializationContext, MessageField
 from confluent_kafka.schema_registry import SchemaRegistryClient
 from confluent_kafka.schema_registry.json_schema import JSONSerializer
 
@@ -42,8 +41,8 @@ class User(object):
         favorite_color (str): User's favorite color
 
         address(str): User's address; confidential
-
     """
+
     def __init__(self, name, address, favorite_number, favorite_color):
         self.name = name
         self.favorite_number = favorite_number
@@ -64,8 +63,8 @@ def user_to_dict(user, ctx):
 
     Returns:
         dict: Dict populated with user attributes to be serialized.
-
     """
+
     # User._address must not be serialized; omit from dict
     return dict(name=user.name,
                 favorite_number=user.favorite_number,
@@ -74,22 +73,13 @@ def user_to_dict(user, ctx):
 
 def delivery_report(err, msg):
     """
-    Reports the failure or success of a message delivery.
+    Reports the success or failure of a message delivery.
 
     Args:
         err (KafkaError): The error that occurred on None on success.
-
         msg (Message): The message that was produced or failed.
-
-    Note:
-        In the delivery report callback the Message.key() and Message.value()
-        will be the binary format as encoded by any configured Serializers and
-        not the same object that was passed to produce().
-        If you wish to pass the original object(s) for key and value to delivery
-        report callback we recommend a bound callback or lambda where you pass
-        the objects along.
-
     """
+
     if err is not None:
         print("Delivery failed for User record {}: {}".format(msg.key(), err))
         return
@@ -127,13 +117,10 @@ def main(args):
     schema_registry_conf = {'url': args.schema_registry}
     schema_registry_client = SchemaRegistryClient(schema_registry_conf)
 
+    string_serializer = StringSerializer('utf_8')
     json_serializer = JSONSerializer(schema_str, schema_registry_client, user_to_dict)
 
-    producer_conf = {'bootstrap.servers': args.bootstrap_servers,
-                     'key.serializer': StringSerializer('utf_8'),
-                     'value.serializer': json_serializer}
-
-    producer = SerializingProducer(producer_conf)
+    producer = Producer({'bootstrap.servers': args.bootstrap_servers})
 
     print("Producing user records to topic {}. ^C to exit.".format(topic))
     while True:
@@ -148,7 +135,9 @@ def main(args):
                         address=user_address,
                         favorite_color=user_favorite_color,
                         favorite_number=user_favorite_number)
-            producer.produce(topic=topic, key=str(uuid4()), value=user,
+            producer.produce(topic=topic,
+                             key=string_serializer(str(uuid4())),
+                             value=json_serializer(user, SerializationContext(topic, MessageField.VALUE)),
                              on_delivery=delivery_report)
         except KeyboardInterrupt:
             break
@@ -161,7 +150,7 @@ def main(args):
 
 
 if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description="SerializingProducer Example")
+    parser = argparse.ArgumentParser(description="JSONSerailizer example")
     parser.add_argument('-b', dest="bootstrap_servers", required=True,
                         help="Bootstrap broker(s) (host[:port])")
     parser.add_argument('-s', dest="schema_registry", required=True,
diff --git a/examples/oauth_producer.py b/examples/oauth_producer.py
index 518feee..8ee4f96 100644
--- a/examples/oauth_producer.py
+++ b/examples/oauth_producer.py
@@ -14,19 +14,17 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
-#
+
 # This uses OAuth client credentials grant:
 # https://www.oauth.com/oauth2-servers/access-tokens/client-credentials/
 # where client_id and client_secret are passed as HTTP Authorization header
-#
 
 import logging
 import functools
 import argparse
 import time
-from confluent_kafka import SerializingProducer
+from confluent_kafka import Producer
 from confluent_kafka.serialization import StringSerializer
 import requests
 
@@ -51,8 +49,6 @@ def producer_config(args):
     logger = logging.getLogger(__name__)
     return {
         'bootstrap.servers': args.bootstrap_servers,
-        'key.serializer': StringSerializer('utf_8'),
-        'value.serializer': StringSerializer('utf_8'),
         'security.protocol': 'sasl_plaintext',
         'sasl.mechanisms': 'OAUTHBEARER',
         # sasl.oauthbearer.config can be used to pass argument to your oauth_cb
@@ -92,10 +88,9 @@ def delivery_report(err, msg):
 def main(args):
     topic = args.topic
     delimiter = args.delimiter
-
     producer_conf = producer_config(args)
-
-    producer = SerializingProducer(producer_conf)
+    producer = Producer(producer_conf)
+    serializer = StringSerializer('utf_8')
 
     print('Producing records to topic {}. ^C to exit.'.format(topic))
     while True:
@@ -105,10 +100,13 @@ def main(args):
             msg_data = input(">")
             msg = msg_data.split(delimiter)
             if len(msg) == 2:
-                producer.produce(topic=topic, key=msg[0], value=msg[1],
+                producer.produce(topic=topic,
+                                 key=serializer(msg[0]),
+                                 value=serializer(msg[1]),
                                  on_delivery=delivery_report)
             else:
-                producer.produce(topic=topic, value=msg[0],
+                producer.produce(topic=topic,
+                                 value=serializer(msg[0]),
                                  on_delivery=delivery_report)
         except KeyboardInterrupt:
             break
@@ -118,8 +116,7 @@ def main(args):
 
 
 if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description="SerializingProducer OAUTH Example"
-                                                 " with client credentials grant")
+    parser = argparse.ArgumentParser(description="OAUTH example with client credentials grant")
     parser.add_argument('-b', dest="bootstrap_servers", required=True,
                         help="Bootstrap broker(s) (host[:port])")
     parser.add_argument('-t', dest="topic", default="example_producer_oauth",
diff --git a/examples/user.proto b/examples/protobuf/user.proto
similarity index 100%
rename from examples/user.proto
rename to examples/protobuf/user.proto
diff --git a/examples/protobuf/user_pb2.py b/examples/protobuf/user_pb2.py
new file mode 100644
index 0000000..138f521
--- /dev/null
+++ b/examples/protobuf/user_pb2.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: user.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\nuser.proto\"E\n\x04User\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x66\x61vorite_number\x18\x02 \x01(\x03\x12\x16\n\x0e\x66\x61vorite_color\x18\x03 \x01(\tb\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'user_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+  _USER._serialized_start=14
+  _USER._serialized_end=83
+# @@protoc_insertion_point(module_scope)
diff --git a/examples/protobuf_consumer.py b/examples/protobuf_consumer.py
index 0253ba2..943bb0d 100644
--- a/examples/protobuf_consumer.py
+++ b/examples/protobuf_consumer.py
@@ -14,10 +14,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
-#
-# This is a simple example of the SerializingProducer using protobuf.
+
+# A simple example demonstrating use of ProtobufDeserializer.
 #
 # To regenerate Protobuf classes you must first install the protobuf
 # compiler. Once installed you may call protoc directly or use make.
@@ -28,29 +27,27 @@
 # After installing protoc execute the following command from the examples
 # directory to regenerate the user_pb2 module.
 # `make`
-#
+
 import argparse
 
-# Protobuf generated class; resides at ./user_pb2.py
-import user_pb2
-from confluent_kafka import DeserializingConsumer
+# Protobuf generated class; resides at ./protobuf/user_pb2.py
+import protobuf.user_pb2 as user_pb2
+from confluent_kafka import Consumer
+from confluent_kafka.serialization import SerializationContext, MessageField
 from confluent_kafka.schema_registry.protobuf import ProtobufDeserializer
-from confluent_kafka.serialization import StringDeserializer
 
 
 def main(args):
     topic = args.topic
 
-    protobuf_deserializer = ProtobufDeserializer(user_pb2.User)
-    string_deserializer = StringDeserializer('utf_8')
+    protobuf_deserializer = ProtobufDeserializer(user_pb2.User,
+                                                 {'use.deprecated.format': False})
 
     consumer_conf = {'bootstrap.servers': args.bootstrap_servers,
-                     'key.deserializer': string_deserializer,
-                     'value.deserializer': protobuf_deserializer,
                      'group.id': args.group,
                      'auto.offset.reset': "earliest"}
 
-    consumer = DeserializingConsumer(consumer_conf)
+    consumer = Consumer(consumer_conf)
     consumer.subscribe([topic])
 
     while True:
@@ -60,14 +57,16 @@ def main(args):
             if msg is None:
                 continue
 
-            user = msg.value()
+            user = protobuf_deserializer(msg.value(), SerializationContext(topic, MessageField.VALUE))
+
             if user is not None:
-                print("User record {}: name: {}\n"
+                print("User record {}:\n"
+                      "\tname: {}\n"
                       "\tfavorite_number: {}\n"
                       "\tfavorite_color: {}\n"
                       .format(msg.key(), user.name,
-                              user.favorite_color,
-                              user.favorite_number))
+                              user.favorite_number,
+                              user.favorite_color))
         except KeyboardInterrupt:
             break
 
@@ -75,7 +74,7 @@ def main(args):
 
 
 if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description="DeserializingConsumer Example")
+    parser = argparse.ArgumentParser(description="ProtobufDeserializer example")
     parser.add_argument('-b', dest="bootstrap_servers", required=True,
                         help="Bootstrap broker(s) (host[:port])")
     parser.add_argument('-s', dest="schema_registry", required=True,
diff --git a/examples/protobuf_producer.py b/examples/protobuf_producer.py
index 740eb6c..ac958f4 100644
--- a/examples/protobuf_producer.py
+++ b/examples/protobuf_producer.py
@@ -14,12 +14,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
+
+# A simple example demonstrating use of ProtobufSerializer.
 #
-# This is a simple example of the SerializingProducer using protobuf.
-#
-# To regenerate Protobuf classes you must first install the protobuf
+# To create Protobuf classes you must first install the protobuf
 # compiler. Once installed you may call protoc directly or use make.
 #
 # See the protocol buffer docs for instructions on installing and using protoc.
@@ -28,16 +27,16 @@
 # After installing protoc execute the following command from the examples
 # directory to regenerate the user_pb2 module.
 # `make`
-#
+
 import argparse
 from uuid import uuid4
 
 from six.moves import input
 
-# Protobuf generated class; resides at ./user_pb2.py
-import user_pb2
-from confluent_kafka import SerializingProducer
-from confluent_kafka.serialization import StringSerializer
+# Protobuf generated class; resides at ./protobuf/user_pb2.py
+import protobuf.user_pb2 as user_pb2
+from confluent_kafka import Producer
+from confluent_kafka.serialization import StringSerializer, SerializationContext, MessageField
 from confluent_kafka.schema_registry import SchemaRegistryClient
 from confluent_kafka.schema_registry.protobuf import ProtobufSerializer
 
@@ -48,18 +47,9 @@ def delivery_report(err, msg):
 
     Args:
         err (KafkaError): The error that occurred on None on success.
-
         msg (Message): The message that was produced or failed.
-
-    Note:
-        In the delivery report callback the Message.key() and Message.value()
-        will be the binary format as encoded by any configured Serializers and
-        not the same object that was passed to produce().
-        If you wish to pass the original object(s) for key and value to delivery
-        report callback we recommend a bound callback or lambda where you pass
-        the objects along.
-
     """
+
     if err is not None:
         print("Delivery failed for User record {}: {}".format(msg.key(), err))
         return
@@ -73,14 +63,14 @@ def main(args):
     schema_registry_conf = {'url': args.schema_registry}
     schema_registry_client = SchemaRegistryClient(schema_registry_conf)
 
+    string_serializer = StringSerializer('utf8')
     protobuf_serializer = ProtobufSerializer(user_pb2.User,
-                                             schema_registry_client)
+                                             schema_registry_client,
+                                             {'use.deprecated.format': False})
 
-    producer_conf = {'bootstrap.servers': args.bootstrap_servers,
-                     'key.serializer': StringSerializer('utf_8'),
-                     'value.serializer': protobuf_serializer}
+    producer_conf = {'bootstrap.servers': args.bootstrap_servers}
 
-    producer = SerializingProducer(producer_conf)
+    producer = Producer(producer_conf)
 
     print("Producing user records to topic {}. ^C to exit.".format(topic))
     while True:
@@ -93,9 +83,11 @@ def main(args):
             user = user_pb2.User(name=user_name,
                                  favorite_color=user_favorite_color,
                                  favorite_number=user_favorite_number)
-            producer.produce(topic=topic, key=str(uuid4()), value=user,
+            producer.produce(topic=topic, partition=0,
+                             key=string_serializer(str(uuid4())),
+                             value=protobuf_serializer(user, SerializationContext(topic, MessageField.VALUE)),
                              on_delivery=delivery_report)
-        except KeyboardInterrupt:
+        except (KeyboardInterrupt, EOFError):
             break
         except ValueError:
             print("Invalid input, discarding record...")
@@ -106,7 +98,7 @@ def main(args):
 
 
 if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description="SerializingProducer Example")
+    parser = argparse.ArgumentParser(description="ProtobufSerializer example")
     parser.add_argument('-b', dest="bootstrap_servers", required=True,
                         help="Bootstrap broker(s) (host[:port])")
     parser.add_argument('-s', dest="schema_registry", required=True,
diff --git a/examples/sasl_producer.py b/examples/sasl_producer.py
index f50f799..aa5a2bc 100644
--- a/examples/sasl_producer.py
+++ b/examples/sasl_producer.py
@@ -14,16 +14,15 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
-#
-# This is a simple example of the SerializingProducer using SASL authentication.
-#
+
+# This is a simple example demonstrating SASL authentication.
+
 import argparse
 
 from six.moves import input
 
-from confluent_kafka import SerializingProducer
+from confluent_kafka import Producer
 from confluent_kafka.serialization import StringSerializer
 
 
@@ -76,13 +75,10 @@ def sasl_conf(args):
 def main(args):
     topic = args.topic
     delimiter = args.delimiter
-    producer_conf = {'bootstrap.servers': args.bootstrap_servers,
-                     'key.serializer': StringSerializer('utf_8'),
-                     'value.serializer': StringSerializer('utf_8')}
-
+    producer_conf = {'bootstrap.servers': args.bootstrap_servers}
     producer_conf.update(sasl_conf(args))
-
-    producer = SerializingProducer(producer_conf)
+    producer = Producer(producer_conf)
+    serializer = StringSerializer('utf_8')
 
     print("Producing records to topic {}. ^C to exit.".format(topic))
     while True:
@@ -92,10 +88,13 @@ def main(args):
             msg_data = input(">")
             msg = msg_data.split(delimiter)
             if len(msg) == 2:
-                producer.produce(topic=topic, key=msg[0], value=msg[1],
+                producer.produce(topic=topic,
+                                 key=serializer(msg[0]),
+                                 value=serializer(msg[1]),
                                  on_delivery=delivery_report)
             else:
-                producer.produce(topic=topic, value=msg[0],
+                producer.produce(topic=topic,
+                                 value=serializer(msg[0]),
                                  on_delivery=delivery_report)
         except KeyboardInterrupt:
             break
@@ -105,8 +104,7 @@ def main(args):
 
 
 if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description="SerializingProducer"
-                                                 " SASL Example")
+    parser = argparse.ArgumentParser(description="SASL Example")
     parser.add_argument('-b', dest="bootstrap_servers", required=True,
                         help="Bootstrap broker(s) (host[:port])")
     parser.add_argument('-t', dest="topic", default="example_producer_sasl",
diff --git a/examples/user_pb2.py b/examples/user_pb2.py
deleted file mode 100644
index 3c1a2f1..0000000
--- a/examples/user_pb2.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: user.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='user.proto',
-  package='',
-  syntax='proto3',
-  serialized_pb=_b('\n\nuser.proto\"E\n\x04User\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0f\x66\x61vorite_number\x18\x02 \x01(\x03\x12\x16\n\x0e\x66\x61vorite_color\x18\x03 \x01(\tb\x06proto3')
-)
-
-
-
-
-_USER = _descriptor.Descriptor(
-  name='User',
-  full_name='User',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='name', full_name='User.name', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='favorite_number', full_name='User.favorite_number', index=1,
-      number=2, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='favorite_color', full_name='User.favorite_color', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=14,
-  serialized_end=83,
-)
-
-DESCRIPTOR.message_types_by_name['User'] = _USER
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-User = _reflection.GeneratedProtocolMessageType('User', (_message.Message,), dict(
-  DESCRIPTOR = _USER,
-  __module__ = 'user_pb2'
-  # @@protoc_insertion_point(class_scope:User)
-  ))
-_sym_db.RegisterMessage(User)
-
-
-# @@protoc_insertion_point(module_scope)
diff --git a/service.yml b/service.yml
new file mode 100644
index 0000000..971ef77
--- /dev/null
+++ b/service.yml
@@ -0,0 +1,11 @@
+name: confluent-kafka-python
+lang: python
+lang_version: 3.7
+git:
+  enable: true
+github:
+  enable: true
+  repo_name: confluentinc/confluent-kafka-python
+semaphore:
+  enable: true
+  pipeline_enable: false
diff --git a/setup.py b/setup.py
index 9e1d337..969bff9 100755
--- a/setup.py
+++ b/setup.py
@@ -27,8 +27,7 @@ SCHEMA_REGISTRY_REQUIRES = ['requests']
 
 AVRO_REQUIRES = ['fastavro>=0.23.0,<1.0;python_version<"3.0"',
                  'fastavro>=1.0;python_version>"3.0"',
-                 'avro==1.10.0;python_version<"3.0"',
-                 'avro-python3==1.10.0;python_version>"3.0"'
+                 'avro>=1.11.1,<2',
                  ] + SCHEMA_REGISTRY_REQUIRES
 
 JSON_REQUIRES = ['pyrsistent==0.16.1;python_version<"3.0"',
@@ -76,7 +75,7 @@ trove_classifiers = [
 setup(name='confluent-kafka',
       # Make sure to bump CFL_VERSION* in confluent_kafka/src/confluent_kafka.h
       # and version in docs/conf.py.
-      version='1.7.0',
+      version='2.1.1rc1',
       description='Confluent\'s Python client for Apache Kafka',
       author='Confluent Inc',
       author_email='support@confluent.io',
diff --git a/src/confluent_kafka/__init__.py b/src/confluent_kafka/__init__.py
index e8e5cc3..d477ba1 100644
--- a/src/confluent_kafka/__init__.py
+++ b/src/confluent_kafka/__init__.py
@@ -19,6 +19,7 @@
 from .deserializing_consumer import DeserializingConsumer
 from .serializing_producer import SerializingProducer
 from .error import KafkaException, KafkaError
+from ._model import Node, ConsumerGroupTopicPartitions, ConsumerGroupState
 
 from .cimpl import (Producer,
                     Consumer,
@@ -40,7 +41,8 @@ __all__ = ['admin', 'Consumer',
            'OFFSET_BEGINNING', 'OFFSET_END', 'OFFSET_INVALID', 'OFFSET_STORED',
            'Producer', 'DeserializingConsumer',
            'SerializingProducer', 'TIMESTAMP_CREATE_TIME', 'TIMESTAMP_LOG_APPEND_TIME',
-           'TIMESTAMP_NOT_AVAILABLE', 'TopicPartition']
+           'TIMESTAMP_NOT_AVAILABLE', 'TopicPartition', 'Node',
+           'ConsumerGroupTopicPartitions', 'ConsumerGroupState']
 
 __version__ = version()[0]
 
diff --git a/src/confluent_kafka/_model/__init__.py b/src/confluent_kafka/_model/__init__.py
new file mode 100644
index 0000000..2bab6a1
--- /dev/null
+++ b/src/confluent_kafka/_model/__init__.py
@@ -0,0 +1,91 @@
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+from .. import cimpl
+
+
+class Node:
+    """
+    Represents node information.
+    Used by :class:`ConsumerGroupDescription`
+
+    Parameters
+    ----------
+    id: int
+        The node id of this node.
+    id_string:
+        String representation of the node id.
+    host:
+        The host name for this node.
+    port: int
+        The port for this node.
+    rack: str
+        The rack for this node.
+    """
+    def __init__(self, id, host, port, rack=None):
+        self.id = id
+        self.id_string = str(id)
+        self.host = host
+        self.port = port
+        self.rack = rack
+
+
+class ConsumerGroupTopicPartitions:
+    """
+    Represents consumer group and its topic partition information.
+    Used by :meth:`AdminClient.list_consumer_group_offsets` and
+    :meth:`AdminClient.alter_consumer_group_offsets`.
+
+    Parameters
+    ----------
+    group_id: str
+        Id of the consumer group.
+    topic_partitions: list(TopicPartition)
+        List of topic partitions information.
+    """
+    def __init__(self, group_id, topic_partitions=None):
+        self.group_id = group_id
+        self.topic_partitions = topic_partitions
+
+
+class ConsumerGroupState(Enum):
+    """
+    Enumerates the different types of Consumer Group State.
+
+    Note that the state UNKOWN (typo one) is deprecated and will be removed in
+    future major release. Use UNKNOWN instead.
+
+    Values
+    ------
+    UNKNOWN                 : State is not known or not set.
+    UNKOWN                  : State is not known or not set. Typo.
+    PREPARING_REBALANCING   : Preparing rebalance for the consumer group.
+    COMPLETING_REBALANCING  : Consumer Group is completing rebalancing.
+    STABLE                  : Consumer Group is stable.
+    DEAD                    : Consumer Group is Dead.
+    EMPTY                   : Consumer Group is Empty.
+    """
+    UNKNOWN = cimpl.CONSUMER_GROUP_STATE_UNKNOWN
+    UNKOWN = UNKNOWN
+    PREPARING_REBALANCING = cimpl.CONSUMER_GROUP_STATE_PREPARING_REBALANCE
+    COMPLETING_REBALANCING = cimpl.CONSUMER_GROUP_STATE_COMPLETING_REBALANCE
+    STABLE = cimpl.CONSUMER_GROUP_STATE_STABLE
+    DEAD = cimpl.CONSUMER_GROUP_STATE_DEAD
+    EMPTY = cimpl.CONSUMER_GROUP_STATE_EMPTY
+
+    def __lt__(self, other):
+        if self.__class__ != other.__class__:
+            return NotImplemented
+        return self.value < other.value
diff --git a/src/confluent_kafka/_util/__init__.py b/src/confluent_kafka/_util/__init__.py
new file mode 100644
index 0000000..315277f
--- /dev/null
+++ b/src/confluent_kafka/_util/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .validation_util import ValidationUtil  # noqa: F401
+from .conversion_util import ConversionUtil  # noqa: F401
diff --git a/src/confluent_kafka/_util/conversion_util.py b/src/confluent_kafka/_util/conversion_util.py
new file mode 100644
index 0000000..82c9b70
--- /dev/null
+++ b/src/confluent_kafka/_util/conversion_util.py
@@ -0,0 +1,38 @@
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+
+
+class ConversionUtil:
+    @staticmethod
+    def convert_to_enum(val, enum_clazz):
+        if type(enum_clazz) is not type(Enum):
+            raise TypeError("'enum_clazz' must be of type Enum")
+
+        if type(val) == str:
+            # Allow it to be specified as case-insensitive string, for convenience.
+            try:
+                val = enum_clazz[val.upper()]
+            except KeyError:
+                raise ValueError("Unknown value \"%s\": should be a %s" % (val, enum_clazz.__name__))
+
+        elif type(val) == int:
+            # The C-code passes restype as an int, convert to enum.
+            val = enum_clazz(val)
+
+        elif type(val) != enum_clazz:
+            raise TypeError("Unknown value \"%s\": should be a %s" % (val, enum_clazz.__name__))
+
+        return val
diff --git a/src/confluent_kafka/_util/validation_util.py b/src/confluent_kafka/_util/validation_util.py
new file mode 100644
index 0000000..ffe5785
--- /dev/null
+++ b/src/confluent_kafka/_util/validation_util.py
@@ -0,0 +1,56 @@
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ..cimpl import KafkaError
+
+try:
+    string_type = basestring
+except NameError:
+    string_type = str
+
+
+class ValidationUtil:
+    @staticmethod
+    def check_multiple_not_none(obj, vars_to_check):
+        for param in vars_to_check:
+            ValidationUtil.check_not_none(obj, param)
+
+    @staticmethod
+    def check_not_none(obj, param):
+        if getattr(obj, param) is None:
+            raise ValueError("Expected %s to be not None" % (param,))
+
+    @staticmethod
+    def check_multiple_is_string(obj, vars_to_check):
+        for param in vars_to_check:
+            ValidationUtil.check_is_string(obj, param)
+
+    @staticmethod
+    def check_is_string(obj, param):
+        param_value = getattr(obj, param)
+        if param_value is not None and not isinstance(param_value, string_type):
+            raise TypeError("Expected %s to be a string" % (param,))
+
+    @staticmethod
+    def check_kafka_errors(errors):
+        if not isinstance(errors, list):
+            raise TypeError("errors should be None or a list")
+        for error in errors:
+            if not isinstance(error, KafkaError):
+                raise TypeError("Expected list of KafkaError")
+
+    @staticmethod
+    def check_kafka_error(error):
+        if not isinstance(error, KafkaError):
+            raise TypeError("Expected error to be a KafkaError")
diff --git a/src/confluent_kafka/admin/__init__.py b/src/confluent_kafka/admin/__init__.py
index 161b032..39a5e50 100644
--- a/src/confluent_kafka/admin/__init__.py
+++ b/src/confluent_kafka/admin/__init__.py
@@ -1,10 +1,49 @@
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 """
 Kafka admin client: create, view, alter, and delete topics and resources.
 """
-from ..cimpl import (KafkaException, # noqa
+import concurrent.futures
+
+# Unused imports are keeped to be accessible using this public module
+from ._config import (ConfigSource,  # noqa: F401
+                      ConfigEntry,
+                      ConfigResource)
+from ._resource import (ResourceType,  # noqa: F401
+                        ResourcePatternType)
+from ._acl import (AclOperation,  # noqa: F401
+                   AclPermissionType,
+                   AclBinding,
+                   AclBindingFilter)
+from ._metadata import (BrokerMetadata,  # noqa: F401
+                        ClusterMetadata,
+                        GroupMember,
+                        GroupMetadata,
+                        PartitionMetadata,
+                        TopicMetadata)
+from ._group import (ConsumerGroupListing,  # noqa: F401
+                     ListConsumerGroupsResult,
+                     ConsumerGroupDescription,
+                     MemberAssignment,
+                     MemberDescription)
+from ..cimpl import (KafkaException,  # noqa: F401
+                     KafkaError,
                      _AdminClientImpl,
                      NewTopic,
                      NewPartitions,
+                     TopicPartition as _TopicPartition,
                      CONFIG_SOURCE_UNKNOWN_CONFIG,
                      CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG,
                      CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG,
@@ -15,181 +54,19 @@ from ..cimpl import (KafkaException, # noqa
                      RESOURCE_ANY,
                      RESOURCE_TOPIC,
                      RESOURCE_GROUP,
-                     RESOURCE_BROKER)
-
-import concurrent.futures
-import functools
-
-from enum import Enum
-
-
-class ConfigSource(Enum):
-    """
-    Enumerates the different sources of configuration properties.
-    Used by ConfigEntry to specify the
-    source of configuration properties returned by `describe_configs()`.
-    """
-    UNKNOWN_CONFIG = CONFIG_SOURCE_UNKNOWN_CONFIG  #: Unknown
-    DYNAMIC_TOPIC_CONFIG = CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG  #: Dynamic Topic
-    DYNAMIC_BROKER_CONFIG = CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG  #: Dynamic Broker
-    DYNAMIC_DEFAULT_BROKER_CONFIG = CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG  #: Dynamic Default Broker
-    STATIC_BROKER_CONFIG = CONFIG_SOURCE_STATIC_BROKER_CONFIG  #: Static Broker
-    DEFAULT_CONFIG = CONFIG_SOURCE_DEFAULT_CONFIG  #: Default
-
-
-class ConfigEntry(object):
-    """
-    Represents a configuration property. Returned by describe_configs() for each configuration
-    entry of the specified resource.
-
-    This class is typically not user instantiated.
-    """
-
-    def __init__(self, name, value,
-                 source=ConfigSource.UNKNOWN_CONFIG,
-                 is_read_only=False,
-                 is_default=False,
-                 is_sensitive=False,
-                 is_synonym=False,
-                 synonyms=[]):
-        """
-        This class is typically not user instantiated.
-        """
-        super(ConfigEntry, self).__init__()
-
-        self.name = name
-        """Configuration property name."""
-        self.value = value
-        """Configuration value (or None if not set or is_sensitive==True)."""
-        self.source = source
-        """Configuration source."""
-        self.is_read_only = bool(is_read_only)
-        """Indicates whether the configuration property is read-only."""
-        self.is_default = bool(is_default)
-        """Indicates whether the configuration property is using its default value."""
-        self.is_sensitive = bool(is_sensitive)
-        """
-        Indicates whether the configuration property value contains
-        sensitive information (such as security settings), in which
-        case .value is None."""
-        self.is_synonym = bool(is_synonym)
-        """Indicates whether the configuration property is a synonym for the parent configuration entry."""
-        self.synonyms = synonyms
-        """A list of synonyms (ConfigEntry) and alternate sources for this configuration property."""
-
-    def __repr__(self):
-        return "ConfigEntry(%s=\"%s\")" % (self.name, self.value)
-
-    def __str__(self):
-        return "%s=\"%s\"" % (self.name, self.value)
-
-
-@functools.total_ordering
-class ConfigResource(object):
-    """
-    Represents a resource that has configuration, and (optionally)
-    a collection of configuration properties for that resource. Used by
-    describe_configs() and alter_configs().
-
-    Parameters
-    ----------
-    restype : `ConfigResource.Type`
-       The resource type.
-    name : `str`
-       The resource name, which depends on the resource type. For RESOURCE_BROKER, the resource name is the broker id.
-    set_config : `dict`
-        The configuration to set/overwrite. Dictionary of str, str.
-    """
-
-    class Type(Enum):
-        """
-        Enumerates the different types of Kafka resources.
-        """
-        UNKNOWN = RESOURCE_UNKNOWN  #: Resource type is not known or not set.
-        ANY = RESOURCE_ANY  #: Match any resource, used for lookups.
-        TOPIC = RESOURCE_TOPIC  #: Topic resource. Resource name is topic name.
-        GROUP = RESOURCE_GROUP  #: Group resource. Resource name is group.id.
-        BROKER = RESOURCE_BROKER  #: Broker resource. Resource name is broker id.
-
-    def __init__(self, restype, name,
-                 set_config=None, described_configs=None, error=None):
-        """
-        :param ConfigResource.Type restype: Resource type.
-        :param str name: The resource name, which depends on restype.
-                         For RESOURCE_BROKER, the resource name is the broker id.
-        :param dict set_config: The configuration to set/overwrite. Dictionary of str, str.
-        :param dict described_configs: For internal use only.
-        :param KafkaError error: For internal use only.
-        """
-        super(ConfigResource, self).__init__()
-
-        if name is None:
-            raise ValueError("Expected resource name to be a string")
-
-        if type(restype) == str:
-            # Allow resource type to be specified as case-insensitive string, for convenience.
-            try:
-                restype = ConfigResource.Type[restype.upper()]
-            except KeyError:
-                raise ValueError("Unknown resource type \"%s\": should be a ConfigResource.Type" % restype)
-
-        elif type(restype) == int:
-            # The C-code passes restype as an int, convert to Type.
-            restype = ConfigResource.Type(restype)
-
-        self.restype = restype
-        self.restype_int = int(self.restype.value)  # for the C code
-        self.name = name
-
-        if set_config is not None:
-            self.set_config_dict = set_config.copy()
-        else:
-            self.set_config_dict = dict()
-
-        self.configs = described_configs
-        self.error = error
+                     RESOURCE_BROKER,
+                     OFFSET_INVALID)
 
-    def __repr__(self):
-        if self.error is not None:
-            return "ConfigResource(%s,%s,%r)" % (self.restype, self.name, self.error)
-        else:
-            return "ConfigResource(%s,%s)" % (self.restype, self.name)
+from confluent_kafka import ConsumerGroupTopicPartitions \
+    as _ConsumerGroupTopicPartitions
 
-    def __hash__(self):
-        return hash((self.restype, self.name))
+from confluent_kafka import ConsumerGroupState \
+    as _ConsumerGroupState
 
-    def __lt__(self, other):
-        if self.restype < other.restype:
-            return True
-        return self.name.__lt__(other.name)
-
-    def __eq__(self, other):
-        return self.restype == other.restype and self.name == other.name
-
-    def __len__(self):
-        """
-        :rtype: int
-        :returns: number of configuration entries/operations
-        """
-        return len(self.set_config_dict)
-
-    def set_config(self, name, value, overwrite=True):
-        """
-        Set/overwrite a configuration value.
-
-        When calling alter_configs, any configuration properties that are not included
-        in the request will be reverted to their default values. As a workaround, use
-        describe_configs() to retrieve the current configuration and overwrite the
-        settings you want to change.
-
-        :param str name: Configuration property name
-        :param str value: Configuration value
-        :param bool overwrite: If True, overwrite entry if it already exists (default).
-                               If False, do nothing if entry already exists.
-        """
-        if not overwrite and name in self.set_config_dict:
-            return
-        self.set_config_dict[name] = value
+try:
+    string_type = basestring
+except NameError:
+    string_type = str
 
 
 class AdminClient (_AdminClientImpl):
@@ -213,6 +90,7 @@ class AdminClient (_AdminClientImpl):
 
     Requires broker version v0.11.0.0 or later.
     """
+
     def __init__(self, conf):
         """
         Create a new AdminClient using the provided configuration dictionary.
@@ -274,6 +152,94 @@ class AdminClient (_AdminClientImpl):
             for resource, fut in futmap.items():
                 fut.set_exception(e)
 
+    @staticmethod
+    def _make_list_consumer_groups_result(f, futmap):
+        pass
+
+    @staticmethod
+    def _make_consumer_groups_result(f, futmap):
+        """
+        Map per-group results to per-group futures in futmap.
+        """
+        try:
+
+            results = f.result()
+            futmap_values = list(futmap.values())
+            len_results = len(results)
+            len_futures = len(futmap_values)
+            if len_results != len_futures:
+                raise RuntimeError(
+                    "Results length {} is different from future-map length {}".format(len_results, len_futures))
+            for i, result in enumerate(results):
+                fut = futmap_values[i]
+                if isinstance(result, KafkaError):
+                    fut.set_exception(KafkaException(result))
+                else:
+                    fut.set_result(result)
+        except Exception as e:
+            # Request-level exception, raise the same for all groups
+            for _, fut in futmap.items():
+                fut.set_exception(e)
+
+    @staticmethod
+    def _make_consumer_group_offsets_result(f, futmap):
+        """
+        Map per-group results to per-group futures in futmap.
+        The result value of each (successful) future is ConsumerGroupTopicPartitions.
+        """
+        try:
+
+            results = f.result()
+            futmap_values = list(futmap.values())
+            len_results = len(results)
+            len_futures = len(futmap_values)
+            if len_results != len_futures:
+                raise RuntimeError(
+                    "Results length {} is different from future-map length {}".format(len_results, len_futures))
+            for i, result in enumerate(results):
+                fut = futmap_values[i]
+                if isinstance(result, KafkaError):
+                    fut.set_exception(KafkaException(result))
+                else:
+                    fut.set_result(result)
+        except Exception as e:
+            # Request-level exception, raise the same for all groups
+            for _, fut in futmap.items():
+                fut.set_exception(e)
+
+    @staticmethod
+    def _make_acls_result(f, futmap):
+        """
+        Map create ACL binding results to corresponding futures in futmap.
+        For create_acls the result value of each (successful) future is None.
+        For delete_acls the result value of each (successful) future is the list of deleted AclBindings.
+        """
+        try:
+            results = f.result()
+            futmap_values = list(futmap.values())
+            len_results = len(results)
+            len_futures = len(futmap_values)
+            if len_results != len_futures:
+                raise RuntimeError(
+                    "Results length {} is different from future-map length {}".format(len_results, len_futures))
+            for i, result in enumerate(results):
+                fut = futmap_values[i]
+                if isinstance(result, KafkaError):
+                    fut.set_exception(KafkaException(result))
+                else:
+                    fut.set_result(result)
+        except Exception as e:
+            # Request-level exception, raise the same for all the AclBindings or AclBindingFilters
+            for resource, fut in futmap.items():
+                fut.set_exception(e)
+
+    @staticmethod
+    def _create_future():
+        f = concurrent.futures.Future()
+        if not f.set_running_or_notify_cancel():
+            raise RuntimeError("Future was cancelled prematurely")
+        return f
+
     @staticmethod
     def _make_futures(futmap_keys, class_check, make_result_fn):
         """
@@ -283,22 +249,99 @@ class AdminClient (_AdminClientImpl):
         futmap = {}
         for key in futmap_keys:
             if class_check is not None and not isinstance(key, class_check):
-                raise ValueError("Expected list of {}".format(type(class_check)))
-            futmap[key] = concurrent.futures.Future()
-            if not futmap[key].set_running_or_notify_cancel():
-                raise RuntimeError("Future was cancelled prematurely")
+                raise ValueError("Expected list of {}".format(repr(class_check)))
+            futmap[key] = AdminClient._create_future()
 
         # Create an internal future for the entire request,
         # this future will trigger _make_..._result() and set result/exception
         # per topic,future in futmap.
-        f = concurrent.futures.Future()
+        f = AdminClient._create_future()
         f.add_done_callback(lambda f: make_result_fn(f, futmap))
 
-        if not f.set_running_or_notify_cancel():
-            raise RuntimeError("Future was cancelled prematurely")
-
         return f, futmap
 
+    @staticmethod
+    def _has_duplicates(items):
+        return len(set(items)) != len(items)
+
+    @staticmethod
+    def _check_list_consumer_group_offsets_request(request):
+        if request is None:
+            raise TypeError("request cannot be None")
+        if not isinstance(request, list):
+            raise TypeError("request must be a list")
+        if len(request) != 1:
+            raise ValueError("Currently we support listing offsets for a single consumer group only")
+        for req in request:
+            if not isinstance(req, _ConsumerGroupTopicPartitions):
+                raise TypeError("Expected list of 'ConsumerGroupTopicPartitions'")
+
+            if req.group_id is None:
+                raise TypeError("'group_id' cannot be None")
+            if not isinstance(req.group_id, string_type):
+                raise TypeError("'group_id' must be a string")
+            if not req.group_id:
+                raise ValueError("'group_id' cannot be empty")
+
+            if req.topic_partitions is not None:
+                if not isinstance(req.topic_partitions, list):
+                    raise TypeError("'topic_partitions' must be a list or None")
+                if len(req.topic_partitions) == 0:
+                    raise ValueError("'topic_partitions' cannot be empty")
+                for topic_partition in req.topic_partitions:
+                    if topic_partition is None:
+                        raise ValueError("Element of 'topic_partitions' cannot be None")
+                    if not isinstance(topic_partition, _TopicPartition):
+                        raise TypeError("Element of 'topic_partitions' must be of type TopicPartition")
+                    if topic_partition.topic is None:
+                        raise TypeError("Element of 'topic_partitions' must not have 'topic' attribute as None")
+                    if not topic_partition.topic:
+                        raise ValueError("Element of 'topic_partitions' must not have 'topic' attribute as Empty")
+                    if topic_partition.partition < 0:
+                        raise ValueError("Element of 'topic_partitions' must not have negative 'partition' value")
+                    if topic_partition.offset != OFFSET_INVALID:
+                        print(topic_partition.offset)
+                        raise ValueError("Element of 'topic_partitions' must not have 'offset' value")
+
+    @staticmethod
+    def _check_alter_consumer_group_offsets_request(request):
+        if request is None:
+            raise TypeError("request cannot be None")
+        if not isinstance(request, list):
+            raise TypeError("request must be a list")
+        if len(request) != 1:
+            raise ValueError("Currently we support altering offsets for a single consumer group only")
+        for req in request:
+            if not isinstance(req, _ConsumerGroupTopicPartitions):
+                raise TypeError("Expected list of 'ConsumerGroupTopicPartitions'")
+            if req.group_id is None:
+                raise TypeError("'group_id' cannot be None")
+            if not isinstance(req.group_id, string_type):
+                raise TypeError("'group_id' must be a string")
+            if not req.group_id:
+                raise ValueError("'group_id' cannot be empty")
+            if req.topic_partitions is None:
+                raise ValueError("'topic_partitions' cannot be null")
+            if not isinstance(req.topic_partitions, list):
+                raise TypeError("'topic_partitions' must be a list")
+            if len(req.topic_partitions) == 0:
+                raise ValueError("'topic_partitions' cannot be empty")
+            for topic_partition in req.topic_partitions:
+                if topic_partition is None:
+                    raise ValueError("Element of 'topic_partitions' cannot be None")
+                if not isinstance(topic_partition, _TopicPartition):
+                    raise TypeError("Element of 'topic_partitions' must be of type TopicPartition")
+                if topic_partition.topic is None:
+                    raise TypeError("Element of 'topic_partitions' must not have 'topic' attribute as None")
+                if not topic_partition.topic:
+                    raise ValueError("Element of 'topic_partitions' must not have 'topic' attribute as Empty")
+                if topic_partition.partition < 0:
+                    raise ValueError(
+                        "Element of 'topic_partitions' must not have negative value for 'partition' field")
+                if topic_partition.offset < 0:
+                    raise ValueError(
+                        "Element of 'topic_partitions' must not have negative value for 'offset' field")
+
     def create_topics(self, new_topics, **kwargs):
         """
         Create one or more new topics.
@@ -363,6 +406,14 @@ class AdminClient (_AdminClientImpl):
 
         return futmap
 
+    def list_topics(self, *args, **kwargs):
+
+        return super(AdminClient, self).list_topics(*args, **kwargs)
+
+    def list_groups(self, *args, **kwargs):
+
+        return super(AdminClient, self).list_groups(*args, **kwargs)
+
     def create_partitions(self, new_partitions, **kwargs):
         """
         Create additional partitions for the given topics.
@@ -471,163 +522,290 @@ class AdminClient (_AdminClientImpl):
 
         return futmap
 
+    def create_acls(self, acls, **kwargs):
+        """
+        Create one or more ACL bindings.
 
-class ClusterMetadata (object):
-    """
-    Provides information about the Kafka cluster, brokers, and topics.
-    Returned by list_topics().
+        :param list(AclBinding) acls: A list of unique ACL binding specifications (:class:`.AclBinding`)
+                         to create.
+        :param float request_timeout: The overall request timeout in seconds,
+                  including broker lookup, request transmission, operation time
+                  on broker, and response. Default: `socket.timeout.ms*1000.0`
 
-    This class is typically not user instantiated.
-    """
-    def __init__(self):
-        self.cluster_id = None
-        """Cluster id string, if supported by the broker, else None."""
-        self.controller_id = -1
-        """Current controller broker id, or -1."""
-        self.brokers = {}
-        """Map of brokers indexed by the broker id (int). Value is a BrokerMetadata object."""
-        self.topics = {}
-        """Map of topics indexed by the topic name. Value is a TopicMetadata object."""
-        self.orig_broker_id = -1
-        """The broker this metadata originated from."""
-        self.orig_broker_name = None
-        """The broker name/address this metadata originated from."""
-
-    def __repr__(self):
-        return "ClusterMetadata({})".format(self.cluster_id)
-
-    def __str__(self):
-        return str(self.cluster_id)
-
-
-class BrokerMetadata (object):
-    """
-    Provides information about a Kafka broker.
+        :returns: A dict of futures for each ACL binding, keyed by the :class:`AclBinding` object.
+                  The future result() method returns None on success.
 
-    This class is typically not user instantiated.
-    """
-    def __init__(self):
-        self.id = -1
-        """Broker id"""
-        self.host = None
-        """Broker hostname"""
-        self.port = -1
-        """Broker port"""
+        :rtype: dict[AclBinding, future]
 
-    def __repr__(self):
-        return "BrokerMetadata({}, {}:{})".format(self.id, self.host, self.port)
+        :raises KafkaException: Operation failed locally or on broker.
+        :raises TypeException: Invalid input.
+        :raises ValueException: Invalid input.
+        """
+        if AdminClient._has_duplicates(acls):
+            raise ValueError("duplicate ACL bindings not allowed")
 
-    def __str__(self):
-        return "{}:{}/{}".format(self.host, self.port, self.id)
+        f, futmap = AdminClient._make_futures(acls, AclBinding,
+                                              AdminClient._make_acls_result)
 
+        super(AdminClient, self).create_acls(acls, f, **kwargs)
 
-class TopicMetadata (object):
-    """
-    Provides information about a Kafka topic.
+        return futmap
 
-    This class is typically not user instantiated.
-    """
-    # The dash in "-topic" and "-error" is needed to circumvent a
-    # Sphinx issue where it tries to reference the same instance variable
-    # on other classes which raises a warning/error.
-    def __init__(self):
-        self.topic = None
-        """Topic name"""
-        self.partitions = {}
-        """Map of partitions indexed by partition id. Value is a PartitionMetadata object."""
-        self.error = None
-        """Topic error, or None. Value is a KafkaError object."""
-
-    def __repr__(self):
-        if self.error is not None:
-            return "TopicMetadata({}, {} partitions, {})".format(self.topic, len(self.partitions), self.error)
-        else:
-            return "TopicMetadata({}, {} partitions)".format(self.topic, len(self.partitions))
-
-    def __str__(self):
-        return self.topic
-
-
-class PartitionMetadata (object):
-    """
-    Provides information about a Kafka partition.
+    def describe_acls(self, acl_binding_filter, **kwargs):
+        """
+        Match ACL bindings by filter.
+
+        :param AclBindingFilter acl_binding_filter: a filter with attributes that
+                  must match.
+                  String attributes match exact values or any string if set to None.
+                  Enums attributes match exact values or any value if equal to `ANY`.
+                  If :class:`ResourcePatternType` is set to :attr:`ResourcePatternType.MATCH`
+                  returns ACL bindings with:
+                  :attr:`ResourcePatternType.LITERAL` pattern type with resource name equal
+                  to the given resource name;
+                  :attr:`ResourcePatternType.LITERAL` pattern type with wildcard resource name
+                  that matches the given resource name;
+                  :attr:`ResourcePatternType.PREFIXED` pattern type with resource name
+                  that is a prefix of the given resource name
+        :param float request_timeout: The overall request timeout in seconds,
+                  including broker lookup, request transmission, operation time
+                  on broker, and response. Default: `socket.timeout.ms*1000.0`
 
-    This class is typically not user instantiated.
+        :returns: A future returning a list(:class:`AclBinding`) as result
 
-    :warning: Depending on cluster state the broker ids referenced in
-              leader, replicas and ISRs may temporarily not be reported
-              in ClusterMetadata.brokers. Always check the availability
-              of a broker id in the brokers dict.
-    """
-    def __init__(self):
-        self.id = -1
-        """Partition id."""
-        self.leader = -1
-        """Current leader broker for this partition, or -1."""
-        self.replicas = []
-        """List of replica broker ids for this partition."""
-        self.isrs = []
-        """List of in-sync-replica broker ids for this partition."""
-        self.error = None
-        """Partition error, or None. Value is a KafkaError object."""
-
-    def __repr__(self):
-        if self.error is not None:
-            return "PartitionMetadata({}, {})".format(self.id, self.error)
-        else:
-            return "PartitionMetadata({})".format(self.id)
-
-    def __str__(self):
-        return "{}".format(self.id)
-
-
-class GroupMember(object):
-    """Provides information about a group member.
-
-    For more information on the metadata format, refer to:
-    `A Guide To The Kafka Protocol <https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI>`_.
-
-    This class is typically not user instantiated.
-    """  # noqa: E501
-    def __init__(self,):
-        self.id = None
-        """Member id (generated by broker)."""
-        self.client_id = None
-        """Client id."""
-        self.client_host = None
-        """Client hostname."""
-        self.metadata = None
-        """Member metadata(binary), format depends on protocol type."""
-        self.assignment = None
-        """Member assignment(binary), format depends on protocol type."""
-
-
-class GroupMetadata(object):
-    """GroupMetadata provides information about a Kafka consumer group
-
-    This class is typically not user instantiated.
-    """
-    def __init__(self):
-        self.broker = None
-        """Originating broker metadata."""
-        self.id = None
-        """Group name."""
-        self.error = None
-        """Broker-originated error, or None. Value is a KafkaError object."""
-        self.state = None
-        """Group state."""
-        self.protocol_type = None
-        """Group protocol type."""
-        self.protocol = None
-        """Group protocol."""
-        self.members = []
-        """Group members."""
-
-    def __repr__(self):
-        if self.error is not None:
-            return "GroupMetadata({}, {})".format(self.id, self.error)
-        else:
-            return "GroupMetadata({})".format(self.id)
-
-    def __str__(self):
-        return self.id
+        :rtype: future
+
+        :raises KafkaException: Operation failed locally or on broker.
+        :raises TypeException: Invalid input.
+        :raises ValueException: Invalid input.
+        """
+
+        f = AdminClient._create_future()
+
+        super(AdminClient, self).describe_acls(acl_binding_filter, f, **kwargs)
+
+        return f
+
+    def delete_acls(self, acl_binding_filters, **kwargs):
+        """
+        Delete ACL bindings matching one or more ACL binding filters.
+
+        :param list(AclBindingFilter) acl_binding_filters: a list of unique ACL binding filters
+                  to match ACLs to delete.
+                  String attributes match exact values or any string if set to None.
+                  Enums attributes match exact values or any value if equal to `ANY`.
+                  If :class:`ResourcePatternType` is set to :attr:`ResourcePatternType.MATCH`
+                  deletes ACL bindings with:
+                  :attr:`ResourcePatternType.LITERAL` pattern type with resource name
+                  equal to the given resource name;
+                  :attr:`ResourcePatternType.LITERAL` pattern type with wildcard resource name
+                  that matches the given resource name;
+                  :attr:`ResourcePatternType.PREFIXED` pattern type with resource name
+                  that is a prefix of the given resource name
+        :param float request_timeout: The overall request timeout in seconds,
+                  including broker lookup, request transmission, operation time
+                  on broker, and response. Default: `socket.timeout.ms*1000.0`
+
+        :returns: A dict of futures for each ACL binding filter, keyed by the :class:`AclBindingFilter` object.
+                  The future result() method returns a list of :class:`AclBinding`.
+
+        :rtype: dict[AclBindingFilter, future]
+
+        :raises KafkaException: Operation failed locally or on broker.
+        :raises TypeException: Invalid input.
+        :raises ValueException: Invalid input.
+        """
+        if AdminClient._has_duplicates(acl_binding_filters):
+            raise ValueError("duplicate ACL binding filters not allowed")
+
+        f, futmap = AdminClient._make_futures(acl_binding_filters, AclBindingFilter,
+                                              AdminClient._make_acls_result)
+
+        super(AdminClient, self).delete_acls(acl_binding_filters, f, **kwargs)
+
+        return futmap
+
+    def list_consumer_groups(self, **kwargs):
+        """
+        List consumer groups.
+
+        :param float request_timeout: The overall request timeout in seconds,
+                  including broker lookup, request transmission, operation time
+                  on broker, and response. Default: `socket.timeout.ms*1000.0`
+        :param set(ConsumerGroupState) states: only list consumer groups which are currently in
+                  these states.
+
+        :returns: a future. Result method of the future returns :class:`ListConsumerGroupsResult`.
+
+        :rtype: future
+
+        :raises KafkaException: Operation failed locally or on broker.
+        :raises TypeException: Invalid input.
+        :raises ValueException: Invalid input.
+        """
+        if "states" in kwargs:
+            states = kwargs["states"]
+            if states is not None:
+                if not isinstance(states, set):
+                    raise TypeError("'states' must be a set")
+                for state in states:
+                    if not isinstance(state, _ConsumerGroupState):
+                        raise TypeError("All elements of states must be of type ConsumerGroupState")
+                kwargs["states_int"] = [state.value for state in states]
+            kwargs.pop("states")
+
+        f, _ = AdminClient._make_futures([], None, AdminClient._make_list_consumer_groups_result)
+
+        super(AdminClient, self).list_consumer_groups(f, **kwargs)
+
+        return f
+
+    def describe_consumer_groups(self, group_ids, **kwargs):
+        """
+        Describe consumer groups.
+
+        :param list(str) group_ids: List of group_ids which need to be described.
+        :param float request_timeout: The overall request timeout in seconds,
+                  including broker lookup, request transmission, operation time
+                  on broker, and response. Default: `socket.timeout.ms*1000.0`
+
+        :returns: A dict of futures for each group, keyed by the group_id.
+                  The future result() method returns :class:`ConsumerGroupDescription`.
+
+        :rtype: dict[str, future]
+
+        :raises KafkaException: Operation failed locally or on broker.
+        :raises TypeException: Invalid input.
+        :raises ValueException: Invalid input.
+        """
+
+        if not isinstance(group_ids, list):
+            raise TypeError("Expected input to be list of group ids to be described")
+
+        if len(group_ids) == 0:
+            raise ValueError("Expected at least one group to be described")
+
+        f, futmap = AdminClient._make_futures(group_ids, None,
+                                              AdminClient._make_consumer_groups_result)
+
+        super(AdminClient, self).describe_consumer_groups(group_ids, f, **kwargs)
+
+        return futmap
+
+    def delete_consumer_groups(self, group_ids, **kwargs):
+        """
+        Delete the given consumer groups.
+
+        :param list(str) group_ids: List of group_ids which need to be deleted.
+        :param float request_timeout: The overall request timeout in seconds,
+                  including broker lookup, request transmission, operation time
+                  on broker, and response. Default: `socket.timeout.ms*1000.0`
+
+        :returns: A dict of futures for each group, keyed by the group_id.
+                  The future result() method returns None.
+
+        :rtype: dict[str, future]
+
+        :raises KafkaException: Operation failed locally or on broker.
+        :raises TypeException: Invalid input.
+        :raises ValueException: Invalid input.
+        """
+        if not isinstance(group_ids, list):
+            raise TypeError("Expected input to be list of group ids to be deleted")
+
+        if len(group_ids) == 0:
+            raise ValueError("Expected at least one group to be deleted")
+
+        f, futmap = AdminClient._make_futures(group_ids, string_type, AdminClient._make_consumer_groups_result)
+
+        super(AdminClient, self).delete_consumer_groups(group_ids, f, **kwargs)
+
+        return futmap
+
+    def list_consumer_group_offsets(self, list_consumer_group_offsets_request, **kwargs):
+        """
+        List offset information for the consumer group and (optional) topic partition provided in the request.
+
+        :note: Currently, the API supports only a single group.
+
+        :param list(ConsumerGroupTopicPartitions) list_consumer_group_offsets_request: List of
+                    :class:`ConsumerGroupTopicPartitions` which consist of group name and topic
+                    partition information for which offset detail is expected. If only group name is
+                    provided, then offset information of all the topic and partition associated with
+                    that group is returned.
+        :param bool require_stable: If True, fetches stable offsets. Default: False
+        :param float request_timeout: The overall request timeout in seconds,
+                  including broker lookup, request transmission, operation time
+                  on broker, and response. Default: `socket.timeout.ms*1000.0`
+
+        :returns: A dict of futures for each group, keyed by the group id.
+                  The future result() method returns :class:`ConsumerGroupTopicPartitions`.
+
+        :rtype: dict[str, future]
+
+        :raises KafkaException: Operation failed locally or on broker.
+        :raises TypeException: Invalid input.
+        :raises ValueException: Invalid input.
+        """
+
+        AdminClient._check_list_consumer_group_offsets_request(list_consumer_group_offsets_request)
+
+        f, futmap = AdminClient._make_futures([request.group_id for request in list_consumer_group_offsets_request],
+                                              string_type,
+                                              AdminClient._make_consumer_group_offsets_result)
+
+        super(AdminClient, self).list_consumer_group_offsets(list_consumer_group_offsets_request, f, **kwargs)
+
+        return futmap
+
+    def alter_consumer_group_offsets(self, alter_consumer_group_offsets_request, **kwargs):
+        """
+        Alter offset for the consumer group and topic partition provided in the request.
+
+        :note: Currently, the API supports only a single group.
+
+        :param list(ConsumerGroupTopicPartitions) alter_consumer_group_offsets_request: List of
+                    :class:`ConsumerGroupTopicPartitions` which consist of group name and topic
+                    partition; and corresponding offset to be updated.
+        :param float request_timeout: The overall request timeout in seconds,
+                  including broker lookup, request transmission, operation time
+                  on broker, and response. Default: `socket.timeout.ms*1000.0`
+
+        :returns: A dict of futures for each group, keyed by the group id.
+                  The future result() method returns :class:`ConsumerGroupTopicPartitions`.
+
+        :rtype: dict[ConsumerGroupTopicPartitions, future]
+
+        :raises KafkaException: Operation failed locally or on broker.
+        :raises TypeException: Invalid input.
+        :raises ValueException: Invalid input.
+        """
+
+        AdminClient._check_alter_consumer_group_offsets_request(alter_consumer_group_offsets_request)
+
+        f, futmap = AdminClient._make_futures([request.group_id for request in alter_consumer_group_offsets_request],
+                                              string_type,
+                                              AdminClient._make_consumer_group_offsets_result)
+
+        super(AdminClient, self).alter_consumer_group_offsets(alter_consumer_group_offsets_request, f, **kwargs)
+
+        return futmap
+
+    def set_sasl_credentials(self, username, password):
+        """
+        Sets the SASL credentials used for this client.
+        These credentials will overwrite the old ones, and will be used the
+        next time the client needs to authenticate.
+        This method will not disconnect existing broker connections that
+        have been established with the old credentials.
+        This method is applicable only to SASL PLAIN and SCRAM mechanisms.
+
+        :param str username: The username to set.
+        :param str password: The password to set.
+
+        :rtype: None
+
+        :raises KafkaException: Operation failed locally or on broker.
+        :raises TypeException: Invalid input.
+        """
+        super(AdminClient, self).set_sasl_credentials(username, password)
diff --git a/src/confluent_kafka/admin/_acl.py b/src/confluent_kafka/admin/_acl.py
new file mode 100644
index 0000000..3512a74
--- /dev/null
+++ b/src/confluent_kafka/admin/_acl.py
@@ -0,0 +1,207 @@
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+import functools
+from .. import cimpl as _cimpl
+from ._resource import ResourceType, ResourcePatternType
+from .._util import ValidationUtil, ConversionUtil
+
+try:
+    string_type = basestring
+except NameError:
+    string_type = str
+
+
+class AclOperation(Enum):
+    """
+    Enumerates the different types of ACL operation.
+    """
+    UNKNOWN = _cimpl.ACL_OPERATION_UNKNOWN  #: Unknown
+    ANY = _cimpl.ACL_OPERATION_ANY  #: In a filter, matches any AclOperation
+    ALL = _cimpl.ACL_OPERATION_ALL  #: ALL the operations
+    READ = _cimpl.ACL_OPERATION_READ  #: READ operation
+    WRITE = _cimpl.ACL_OPERATION_WRITE  #: WRITE operation
+    CREATE = _cimpl.ACL_OPERATION_CREATE  #: CREATE operation
+    DELETE = _cimpl.ACL_OPERATION_DELETE  #: DELETE operation
+    ALTER = _cimpl.ACL_OPERATION_ALTER  #: ALTER operation
+    DESCRIBE = _cimpl.ACL_OPERATION_DESCRIBE  #: DESCRIBE operation
+    CLUSTER_ACTION = _cimpl.ACL_OPERATION_CLUSTER_ACTION  #: CLUSTER_ACTION operation
+    DESCRIBE_CONFIGS = _cimpl.ACL_OPERATION_DESCRIBE_CONFIGS  #: DESCRIBE_CONFIGS operation
+    ALTER_CONFIGS = _cimpl.ACL_OPERATION_ALTER_CONFIGS  #: ALTER_CONFIGS operation
+    IDEMPOTENT_WRITE = _cimpl.ACL_OPERATION_IDEMPOTENT_WRITE  #: IDEMPOTENT_WRITE operation
+
+    def __lt__(self, other):
+        if self.__class__ != other.__class__:
+            return NotImplemented
+        return self.value < other.value
+
+
+class AclPermissionType(Enum):
+    """
+    Enumerates the different types of ACL permission types.
+    """
+    UNKNOWN = _cimpl.ACL_PERMISSION_TYPE_UNKNOWN  #: Unknown
+    ANY = _cimpl.ACL_PERMISSION_TYPE_ANY  #: In a filter, matches any AclPermissionType
+    DENY = _cimpl.ACL_PERMISSION_TYPE_DENY  #: Disallows access
+    ALLOW = _cimpl.ACL_PERMISSION_TYPE_ALLOW  #: Grants access
+
+    def __lt__(self, other):
+        if self.__class__ != other.__class__:
+            return NotImplemented
+        return self.value < other.value
+
+
+@functools.total_ordering
+class AclBinding(object):
+    """
+    Represents an ACL binding that specify the operation and permission type for a specific principal
+    over one or more resources of the same type. Used by :meth:`AdminClient.create_acls`,
+    returned by :meth:`AdminClient.describe_acls` and :meth:`AdminClient.delete_acls`.
+
+    Parameters
+    ----------
+    restype : ResourceType
+        The resource type.
+    name : str
+        The resource name, which depends on the resource type. For :attr:`ResourceType.BROKER`,
+        the resource name is the broker id.
+    resource_pattern_type : ResourcePatternType
+        The resource pattern, relative to the name.
+    principal : str
+        The principal this AclBinding refers to.
+    host : str
+        The host that the call is allowed to come from.
+    operation: AclOperation
+        The operation/s specified by this binding.
+    permission_type: AclPermissionType
+        The permission type for the specified operation.
+    """
+
+    def __init__(self, restype, name,
+                 resource_pattern_type, principal, host,
+                 operation, permission_type):
+        self.restype = restype
+        self.name = name
+        self.resource_pattern_type = resource_pattern_type
+        self.principal = principal
+        self.host = host
+        self.operation = operation
+        self.permission_type = permission_type
+        self._convert_args()
+        # for the C code
+        self.restype_int = int(self.restype.value)
+        self.resource_pattern_type_int = int(self.resource_pattern_type.value)
+        self.operation_int = int(self.operation.value)
+        self.permission_type_int = int(self.permission_type.value)
+
+    def _convert_enums(self):
+        self.restype = ConversionUtil.convert_to_enum(self.restype, ResourceType)
+        self.resource_pattern_type = ConversionUtil.convert_to_enum(
+            self.resource_pattern_type, ResourcePatternType)
+        self.operation = ConversionUtil.convert_to_enum(
+            self.operation, AclOperation)
+        self.permission_type = ConversionUtil.convert_to_enum(
+            self.permission_type, AclPermissionType)
+
+    def _check_forbidden_enums(self, forbidden_enums):
+        for k, v in forbidden_enums.items():
+            enum_value = getattr(self, k)
+            if enum_value in v:
+                raise ValueError("Cannot use enum %s, value %s in this class" % (k, enum_value.name))
+
+    def _not_none_args(self):
+        return ["restype", "name", "resource_pattern_type",
+                "principal", "host", "operation", "permission_type"]
+
+    def _string_args(self):
+        return ["name", "principal", "host"]
+
+    def _forbidden_enums(self):
+        return {
+            "restype": [ResourceType.ANY],
+            "resource_pattern_type": [ResourcePatternType.ANY,
+                                      ResourcePatternType.MATCH],
+            "operation": [AclOperation.ANY],
+            "permission_type": [AclPermissionType.ANY]
+        }
+
+    def _convert_args(self):
+        not_none_args = self._not_none_args()
+        string_args = self._string_args()
+        forbidden_enums = self._forbidden_enums()
+        ValidationUtil.check_multiple_not_none(self, not_none_args)
+        ValidationUtil.check_multiple_is_string(self, string_args)
+        self._convert_enums()
+        self._check_forbidden_enums(forbidden_enums)
+
+    def __repr__(self):
+        type_name = type(self).__name__
+        return "%s(%s,%s,%s,%s,%s,%s,%s)" % ((type_name,) + self._to_tuple())
+
+    def _to_tuple(self):
+        return (self.restype, self.name, self.resource_pattern_type,
+                self.principal, self.host, self.operation,
+                self.permission_type)
+
+    def __hash__(self):
+        return hash(self._to_tuple())
+
+    def __lt__(self, other):
+        if self.__class__ != other.__class__:
+            return NotImplemented
+        return self._to_tuple() < other._to_tuple()
+
+    def __eq__(self, other):
+        if self.__class__ != other.__class__:
+            return NotImplemented
+        return self._to_tuple() == other._to_tuple()
+
+
+class AclBindingFilter(AclBinding):
+    """
+    Represents an ACL binding filter used to return a list of ACL bindings matching some or all of its attributes.
+    Used by :meth:`AdminClient.describe_acls` and :meth:`AdminClient.delete_acls`.
+
+    Parameters
+    ----------
+    restype : ResourceType
+        The resource type, or :attr:`ResourceType.ANY` to match any value.
+    name : str
+        The resource name to match.
+        None matches any value.
+    resource_pattern_type : ResourcePatternType
+        The resource pattern, :attr:`ResourcePatternType.ANY` to match any value or
+        :attr:`ResourcePatternType.MATCH` to perform pattern matching.
+    principal : str
+        The principal to match, or None to match any value.
+    host : str
+        The host to match, or None to match any value.
+    operation: AclOperation
+        The operation to match or :attr:`AclOperation.ANY` to match any value.
+    permission_type: AclPermissionType
+        The permission type to match or :attr:`AclPermissionType.ANY` to match any value.
+    """
+
+    def _not_none_args(self):
+        return ["restype", "resource_pattern_type",
+                "operation", "permission_type"]
+
+    def _forbidden_enums(self):
+        return {
+            "restype": [ResourceType.UNKNOWN],
+            "resource_pattern_type": [ResourcePatternType.UNKNOWN],
+            "operation": [AclOperation.UNKNOWN],
+            "permission_type": [AclPermissionType.UNKNOWN]
+        }
diff --git a/src/confluent_kafka/admin/_config.py b/src/confluent_kafka/admin/_config.py
new file mode 100644
index 0000000..678ffa8
--- /dev/null
+++ b/src/confluent_kafka/admin/_config.py
@@ -0,0 +1,179 @@
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+import functools
+from .. import cimpl as _cimpl
+from ._resource import ResourceType
+
+
+class ConfigSource(Enum):
+    """
+    Enumerates the different sources of configuration properties.
+    Used by ConfigEntry to specify the
+    source of configuration properties returned by `describe_configs()`.
+    """
+    UNKNOWN_CONFIG = _cimpl.CONFIG_SOURCE_UNKNOWN_CONFIG  #: Unknown
+    DYNAMIC_TOPIC_CONFIG = _cimpl.CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG  #: Dynamic Topic
+    DYNAMIC_BROKER_CONFIG = _cimpl.CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG  #: Dynamic Broker
+    DYNAMIC_DEFAULT_BROKER_CONFIG = _cimpl.CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG  #: Dynamic Default Broker
+    STATIC_BROKER_CONFIG = _cimpl.CONFIG_SOURCE_STATIC_BROKER_CONFIG  #: Static Broker
+    DEFAULT_CONFIG = _cimpl.CONFIG_SOURCE_DEFAULT_CONFIG  #: Default
+
+
+class ConfigEntry(object):
+    """
+    Represents a configuration property. Returned by describe_configs() for each configuration
+    entry of the specified resource.
+
+    This class is typically not user instantiated.
+    """
+
+    def __init__(self, name, value,
+                 source=ConfigSource.UNKNOWN_CONFIG,
+                 is_read_only=False,
+                 is_default=False,
+                 is_sensitive=False,
+                 is_synonym=False,
+                 synonyms=[]):
+        """
+        This class is typically not user instantiated.
+        """
+        super(ConfigEntry, self).__init__()
+
+        self.name = name
+        """Configuration property name."""
+        self.value = value
+        """Configuration value (or None if not set or is_sensitive==True)."""
+        self.source = source
+        """Configuration source."""
+        self.is_read_only = bool(is_read_only)
+        """Indicates whether the configuration property is read-only."""
+        self.is_default = bool(is_default)
+        """Indicates whether the configuration property is using its default value."""
+        self.is_sensitive = bool(is_sensitive)
+        """
+        Indicates whether the configuration property value contains
+        sensitive information (such as security settings), in which
+        case .value is None."""
+        self.is_synonym = bool(is_synonym)
+        """Indicates whether the configuration property is a synonym for the parent configuration entry."""
+        self.synonyms = synonyms
+        """A list of synonyms (ConfigEntry) and alternate sources for this configuration property."""
+
+    def __repr__(self):
+        return "ConfigEntry(%s=\"%s\")" % (self.name, self.value)
+
+    def __str__(self):
+        return "%s=\"%s\"" % (self.name, self.value)
+
+
+@functools.total_ordering
+class ConfigResource(object):
+    """
+    Represents a resource that has configuration, and (optionally)
+    a collection of configuration properties for that resource. Used by
+    describe_configs() and alter_configs().
+
+    Parameters
+    ----------
+    restype : `ConfigResource.Type`
+       The resource type.
+    name : `str`
+       The resource name, which depends on the resource type. For RESOURCE_BROKER, the resource name is the broker id.
+    set_config : `dict`
+        The configuration to set/overwrite. Dictionary of str, str.
+    """
+
+    Type = ResourceType
+
+    def __init__(self, restype, name,
+                 set_config=None, described_configs=None, error=None):
+        """
+        :param ConfigResource.Type restype: Resource type.
+        :param str name: The resource name, which depends on restype.
+                         For RESOURCE_BROKER, the resource name is the broker id.
+        :param dict set_config: The configuration to set/overwrite. Dictionary of str, str.
+        :param dict described_configs: For internal use only.
+        :param KafkaError error: For internal use only.
+        """
+        super(ConfigResource, self).__init__()
+
+        if name is None:
+            raise ValueError("Expected resource name to be a string")
+
+        if type(restype) == str:
+            # Allow resource type to be specified as case-insensitive string, for convenience.
+            try:
+                restype = ConfigResource.Type[restype.upper()]
+            except KeyError:
+                raise ValueError("Unknown resource type \"%s\": should be a ConfigResource.Type" % restype)
+
+        elif type(restype) == int:
+            # The C-code passes restype as an int, convert to Type.
+            restype = ConfigResource.Type(restype)
+
+        self.restype = restype
+        self.restype_int = int(self.restype.value)  # for the C code
+        self.name = name
+
+        if set_config is not None:
+            self.set_config_dict = set_config.copy()
+        else:
+            self.set_config_dict = dict()
+
+        self.configs = described_configs
+        self.error = error
+
+    def __repr__(self):
+        if self.error is not None:
+            return "ConfigResource(%s,%s,%r)" % (self.restype, self.name, self.error)
+        else:
+            return "ConfigResource(%s,%s)" % (self.restype, self.name)
+
+    def __hash__(self):
+        return hash((self.restype, self.name))
+
+    def __lt__(self, other):
+        if self.restype < other.restype:
+            return True
+        return self.name.__lt__(other.name)
+
+    def __eq__(self, other):
+        return self.restype == other.restype and self.name == other.name
+
+    def __len__(self):
+        """
+        :rtype: int
+        :returns: number of configuration entries/operations
+        """
+        return len(self.set_config_dict)
+
+    def set_config(self, name, value, overwrite=True):
+        """
+        Set/overwrite a configuration value.
+
+        When calling alter_configs, any configuration properties that are not included
+        in the request will be reverted to their default values. As a workaround, use
+        describe_configs() to retrieve the current configuration and overwrite the
+        settings you want to change.
+
+        :param str name: Configuration property name
+        :param str value: Configuration value
+        :param bool overwrite: If True, overwrite entry if it already exists (default).
+                               If False, do nothing if entry already exists.
+        """
+        if not overwrite and name in self.set_config_dict:
+            return
+        self.set_config_dict[name] = value
diff --git a/src/confluent_kafka/admin/_group.py b/src/confluent_kafka/admin/_group.py
new file mode 100644
index 0000000..1c8d5e6
--- /dev/null
+++ b/src/confluent_kafka/admin/_group.py
@@ -0,0 +1,128 @@
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from .._util import ConversionUtil
+from .._model import ConsumerGroupState
+
+
+class ConsumerGroupListing:
+    """
+    Represents consumer group listing information for a group used in list consumer group operation.
+    Used by :class:`ListConsumerGroupsResult`.
+
+    Parameters
+    ----------
+    group_id : str
+        The consumer group id.
+    is_simple_consumer_group : bool
+        Whether a consumer group is simple or not.
+    state : ConsumerGroupState
+        Current state of the consumer group.
+    """
+    def __init__(self, group_id, is_simple_consumer_group, state=None):
+        self.group_id = group_id
+        self.is_simple_consumer_group = is_simple_consumer_group
+        if state is not None:
+            self.state = ConversionUtil.convert_to_enum(state, ConsumerGroupState)
+
+
+class ListConsumerGroupsResult:
+    """
+    Represents result of List Consumer Group operation.
+    Used by :meth:`AdminClient.list_consumer_groups`.
+
+    Parameters
+    ----------
+    valid : list(ConsumerGroupListing)
+        List of successful consumer group listing responses.
+    errors : list(KafkaException)
+        List of errors encountered during the operation, if any.
+    """
+    def __init__(self, valid=None, errors=None):
+        self.valid = valid
+        self.errors = errors
+
+
+class MemberAssignment:
+    """
+    Represents member assignment information.
+    Used by :class:`MemberDescription`.
+
+    Parameters
+    ----------
+    topic_partitions : list(TopicPartition)
+        The topic partitions assigned to a group member.
+    """
+    def __init__(self, topic_partitions=[]):
+        self.topic_partitions = topic_partitions
+        if self.topic_partitions is None:
+            self.topic_partitions = []
+
+
+class MemberDescription:
+    """
+    Represents member information.
+    Used by :class:`ConsumerGroupDescription`.
+
+    Parameters
+    ----------
+    member_id : str
+        The consumer id of the group member.
+    client_id : str
+        The client id of the group member.
+    host: str
+        The host where the group member is running.
+    assignment: MemberAssignment
+        The assignment of the group member
+    group_instance_id : str
+        The instance id of the group member.
+    """
+    def __init__(self, member_id, client_id, host, assignment, group_instance_id=None):
+        self.member_id = member_id
+        self.client_id = client_id
+        self.host = host
+        self.assignment = assignment
+        self.group_instance_id = group_instance_id
+
+
+class ConsumerGroupDescription:
+    """
+    Represents consumer group description information for a group used in describe consumer group operation.
+    Used by :meth:`AdminClient.describe_consumer_groups`.
+
+    Parameters
+    ----------
+    group_id : str
+        The consumer group id.
+    is_simple_consumer_group : bool
+        Whether a consumer group is simple or not.
+    members: list(MemberDescription)
+        Description of the memebers of the consumer group.
+    partition_assignor: str
+        Partition assignor.
+    state : ConsumerGroupState
+        Current state of the consumer group.
+    coordinator: Node
+        Consumer group coordinator.
+    """
+    def __init__(self, group_id, is_simple_consumer_group, members, partition_assignor, state,
+                 coordinator):
+        self.group_id = group_id
+        self.is_simple_consumer_group = is_simple_consumer_group
+        self.members = members
+        self.partition_assignor = partition_assignor
+        if state is not None:
+            self.state = ConversionUtil.convert_to_enum(state, ConsumerGroupState)
+        self.coordinator = coordinator
diff --git a/src/confluent_kafka/admin/_metadata.py b/src/confluent_kafka/admin/_metadata.py
new file mode 100644
index 0000000..201e453
--- /dev/null
+++ b/src/confluent_kafka/admin/_metadata.py
@@ -0,0 +1,179 @@
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+class ClusterMetadata (object):
+    """
+    Provides information about the Kafka cluster, brokers, and topics.
+    Returned by list_topics().
+
+    This class is typically not user instantiated.
+    """
+
+    def __init__(self):
+        self.cluster_id = None
+        """Cluster id string, if supported by the broker, else None."""
+        self.controller_id = -1
+        """Current controller broker id, or -1."""
+        self.brokers = {}
+        """Map of brokers indexed by the broker id (int). Value is a BrokerMetadata object."""
+        self.topics = {}
+        """Map of topics indexed by the topic name. Value is a TopicMetadata object."""
+        self.orig_broker_id = -1
+        """The broker this metadata originated from."""
+        self.orig_broker_name = None
+        """The broker name/address this metadata originated from."""
+
+    def __repr__(self):
+        return "ClusterMetadata({})".format(self.cluster_id)
+
+    def __str__(self):
+        return str(self.cluster_id)
+
+
+class BrokerMetadata (object):
+    """
+    Provides information about a Kafka broker.
+
+    This class is typically not user instantiated.
+    """
+
+    def __init__(self):
+        self.id = -1
+        """Broker id"""
+        self.host = None
+        """Broker hostname"""
+        self.port = -1
+        """Broker port"""
+
+    def __repr__(self):
+        return "BrokerMetadata({}, {}:{})".format(self.id, self.host, self.port)
+
+    def __str__(self):
+        return "{}:{}/{}".format(self.host, self.port, self.id)
+
+
+class TopicMetadata (object):
+    """
+    Provides information about a Kafka topic.
+
+    This class is typically not user instantiated.
+    """
+    # The dash in "-topic" and "-error" is needed to circumvent a
+    # Sphinx issue where it tries to reference the same instance variable
+    # on other classes which raises a warning/error.
+
+    def __init__(self):
+        self.topic = None
+        """Topic name"""
+        self.partitions = {}
+        """Map of partitions indexed by partition id. Value is a PartitionMetadata object."""
+        self.error = None
+        """Topic error, or None. Value is a KafkaError object."""
+
+    def __repr__(self):
+        if self.error is not None:
+            return "TopicMetadata({}, {} partitions, {})".format(self.topic, len(self.partitions), self.error)
+        else:
+            return "TopicMetadata({}, {} partitions)".format(self.topic, len(self.partitions))
+
+    def __str__(self):
+        return self.topic
+
+
+class PartitionMetadata (object):
+    """
+    Provides information about a Kafka partition.
+
+    This class is typically not user instantiated.
+
+    :warning: Depending on cluster state the broker ids referenced in
+              leader, replicas and ISRs may temporarily not be reported
+              in ClusterMetadata.brokers. Always check the availability
+              of a broker id in the brokers dict.
+    """
+
+    def __init__(self):
+        self.id = -1
+        """Partition id."""
+        self.leader = -1
+        """Current leader broker for this partition, or -1."""
+        self.replicas = []
+        """List of replica broker ids for this partition."""
+        self.isrs = []
+        """List of in-sync-replica broker ids for this partition."""
+        self.error = None
+        """Partition error, or None. Value is a KafkaError object."""
+
+    def __repr__(self):
+        if self.error is not None:
+            return "PartitionMetadata({}, {})".format(self.id, self.error)
+        else:
+            return "PartitionMetadata({})".format(self.id)
+
+    def __str__(self):
+        return "{}".format(self.id)
+
+
+class GroupMember(object):
+    """Provides information about a group member.
+
+    For more information on the metadata format, refer to:
+    `A Guide To The Kafka Protocol <https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI>`_.
+
+    This class is typically not user instantiated.
+    """  # noqa: E501
+
+    def __init__(self,):
+        self.id = None
+        """Member id (generated by broker)."""
+        self.client_id = None
+        """Client id."""
+        self.client_host = None
+        """Client hostname."""
+        self.metadata = None
+        """Member metadata(binary), format depends on protocol type."""
+        self.assignment = None
+        """Member assignment(binary), format depends on protocol type."""
+
+
+class GroupMetadata(object):
+    """GroupMetadata provides information about a Kafka consumer group
+
+    This class is typically not user instantiated.
+    """
+
+    def __init__(self):
+        self.broker = None
+        """Originating broker metadata."""
+        self.id = None
+        """Group name."""
+        self.error = None
+        """Broker-originated error, or None. Value is a KafkaError object."""
+        self.state = None
+        """Group state."""
+        self.protocol_type = None
+        """Group protocol type."""
+        self.protocol = None
+        """Group protocol."""
+        self.members = []
+        """Group members."""
+
+    def __repr__(self):
+        if self.error is not None:
+            return "GroupMetadata({}, {})".format(self.id, self.error)
+        else:
+            return "GroupMetadata({})".format(self.id)
+
+    def __str__(self):
+        return self.id
diff --git a/src/confluent_kafka/admin/_resource.py b/src/confluent_kafka/admin/_resource.py
new file mode 100644
index 0000000..b786f3a
--- /dev/null
+++ b/src/confluent_kafka/admin/_resource.py
@@ -0,0 +1,48 @@
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from enum import Enum
+from .. import cimpl as _cimpl
+
+
+class ResourceType(Enum):
+    """
+    Enumerates the different types of Kafka resources.
+    """
+    UNKNOWN = _cimpl.RESOURCE_UNKNOWN  #: Resource type is not known or not set.
+    ANY = _cimpl.RESOURCE_ANY  #: Match any resource, used for lookups.
+    TOPIC = _cimpl.RESOURCE_TOPIC  #: Topic resource. Resource name is topic name.
+    GROUP = _cimpl.RESOURCE_GROUP  #: Group resource. Resource name is group.id.
+    BROKER = _cimpl.RESOURCE_BROKER  #: Broker resource. Resource name is broker id.
+
+    def __lt__(self, other):
+        if self.__class__ != other.__class__:
+            return NotImplemented
+        return self.value < other.value
+
+
+class ResourcePatternType(Enum):
+    """
+    Enumerates the different types of Kafka resource patterns.
+    """
+    UNKNOWN = _cimpl.RESOURCE_PATTERN_UNKNOWN  #: Resource pattern type is not known or not set.
+    ANY = _cimpl.RESOURCE_PATTERN_ANY  #: Match any resource, used for lookups.
+    MATCH = _cimpl.RESOURCE_PATTERN_MATCH  #: Match: will perform pattern matching
+    LITERAL = _cimpl.RESOURCE_PATTERN_LITERAL  #: Literal: A literal resource name
+    PREFIXED = _cimpl.RESOURCE_PATTERN_PREFIXED  #: Prefixed: A prefixed resource name
+
+    def __lt__(self, other):
+        if self.__class__ != other.__class__:
+            return NotImplemented
+        return self.value < other.value
diff --git a/src/confluent_kafka/avro/README.md b/src/confluent_kafka/avro/README.md
new file mode 100644
index 0000000..a7769dd
--- /dev/null
+++ b/src/confluent_kafka/avro/README.md
@@ -0,0 +1,4 @@
+# Warning: Deprecated
+
+CachedSchemaRegistryClient, AvroProducer and AvroConsumer have been
+deprecated. Use AvroSerializer and AvroDeserializer instead.
diff --git a/src/confluent_kafka/avro/__init__.py b/src/confluent_kafka/avro/__init__.py
index 70391ec..c5475c9 100644
--- a/src/confluent_kafka/avro/__init__.py
+++ b/src/confluent_kafka/avro/__init__.py
@@ -17,9 +17,10 @@
 
 """
     Avro schema registry module: Deals with encoding and decoding of messages with avro schemas
-
 """
 
+import warnings
+
 from confluent_kafka import Producer, Consumer
 from confluent_kafka.avro.error import ClientError
 from confluent_kafka.avro.load import load, loads  # noqa
@@ -32,10 +33,14 @@ from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
 
 class AvroProducer(Producer):
     """
+        .. deprecated:: 2.0.2
+
+        This class will be removed in a future version of the library.
+
         Kafka Producer client which does avro schema encoding to messages.
         Handles schema registration, Message serialization.
 
-        Constructor takes below parameters.
+        Constructor arguments:
 
         :param dict config: Config parameters containing url for schema registry (``schema.registry.url``)
                             and the standard Kafka client configuration (``bootstrap.servers`` et.al).
@@ -45,6 +50,9 @@ class AvroProducer(Producer):
 
     def __init__(self, config, default_key_schema=None,
                  default_value_schema=None, schema_registry=None, **kwargs):
+        warnings.warn(
+            "AvroProducer has been deprecated. Use AvroSerializer instead.",
+            category=DeprecationWarning, stacklevel=2)
 
         sr_conf = {key.replace("schema.registry.", ""): value
                    for key, value in config.items() if key.startswith("schema.registry")}
@@ -111,10 +119,14 @@ class AvroProducer(Producer):
 
 class AvroConsumer(Consumer):
     """
+    .. deprecated:: 2.0.2
+
+    This class will be removed in a future version of the library.
+
     Kafka Consumer client which does avro schema decoding of messages.
     Handles message deserialization.
 
-    Constructor takes below parameters
+    Constructor arguments:
 
     :param dict config: Config parameters containing url for schema registry (``schema.registry.url``)
                         and the standard Kafka client configuration (``bootstrap.servers`` et.al)
@@ -124,6 +136,9 @@ class AvroConsumer(Consumer):
     """
 
     def __init__(self, config, schema_registry=None, reader_key_schema=None, reader_value_schema=None, **kwargs):
+        warnings.warn(
+            "AvroConsumer has been deprecated. Use AvroDeserializer instead.",
+            category=DeprecationWarning, stacklevel=2)
 
         sr_conf = {key.replace("schema.registry.", ""): value
                    for key, value in config.items() if key.startswith("schema.registry")}
diff --git a/src/confluent_kafka/avro/cached_schema_registry_client.py b/src/confluent_kafka/avro/cached_schema_registry_client.py
index a4ed890..b0ea6c3 100644
--- a/src/confluent_kafka/avro/cached_schema_registry_client.py
+++ b/src/confluent_kafka/avro/cached_schema_registry_client.py
@@ -21,6 +21,8 @@
 #
 import logging
 import warnings
+import urllib3
+import json
 from collections import defaultdict
 
 from requests import Session, utils
@@ -54,6 +56,7 @@ class CachedSchemaRegistryClient(object):
     Use CachedSchemaRegistryClient(dict: config) instead.
     Existing params ca_location, cert_location and key_location will be replaced with their librdkafka equivalents:
     `ssl.ca.location`, `ssl.certificate.location` and `ssl.key.location` respectively.
+    The support for password protected private key is via the Config only using 'ssl.key.password' field.
 
     Errors communicating to the server will result in a ClientError being raised.
 
@@ -109,6 +112,9 @@ class CachedSchemaRegistryClient(object):
         self.url = utils.urldefragauth(self.url)
 
         self._session = s
+        key_password = conf.pop('ssl.key.password', None)
+        self._is_key_password_provided = not key_password
+        self._https_session = self._make_https_session(s.cert[0], s.cert[1], ca_path, s.auth, key_password)
 
         self.auto_register_schemas = conf.pop("auto.register.schemas", True)
 
@@ -125,7 +131,32 @@ class CachedSchemaRegistryClient(object):
         self.close()
 
     def close(self):
-        self._session.close()
+        # Constructor exceptions may occur prior to _session being set.
+        if hasattr(self, '_session'):
+            self._session.close()
+        if hasattr(self, '_https_session'):
+            self._https_session.clear()
+
+    @staticmethod
+    def _make_https_session(cert_location, key_location, ca_certs_path, auth, key_password):
+        https_session = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=ca_certs_path,
+                                            cert_file=cert_location, key_file=key_location, key_password=key_password)
+        https_session.auth = auth
+        return https_session
+
+    def _send_https_session_request(self, url, method, headers, body):
+        request_headers = {'Accept': ACCEPT_HDR}
+        auth = self._https_session.auth
+        if body:
+            body = json.dumps(body).encode('UTF-8')
+            request_headers["Content-Length"] = str(len(body))
+            request_headers["Content-Type"] = "application/vnd.schemaregistry.v1+json"
+        if auth[0] != '' and auth[1] != '':
+            request_headers.update(urllib3.make_headers(basic_auth=auth[0] + ":" +
+                                                        auth[1]))
+        request_headers.update(headers)
+        response = self._https_session.request(method, url, headers=request_headers, body=body)
+        return response
 
     @staticmethod
     def _configure_basic_auth(url, conf):
@@ -156,6 +187,13 @@ class CachedSchemaRegistryClient(object):
         if method not in VALID_METHODS:
             raise ClientError("Method {} is invalid; valid methods include {}".format(method, VALID_METHODS))
 
+        if url.startswith('https') and self._is_key_password_provided:
+            response = self._send_https_session_request(url, method, headers, body)
+            try:
+                return json.loads(response.data), response.status
+            except ValueError:
+                return response.content, response.status
+
         _headers = {'Accept': ACCEPT_HDR}
         if body:
             _headers["Content-Length"] = str(len(body))
diff --git a/src/confluent_kafka/avro/load.py b/src/confluent_kafka/avro/load.py
index dead26c..9db8660 100644
--- a/src/confluent_kafka/avro/load.py
+++ b/src/confluent_kafka/avro/load.py
@@ -23,7 +23,7 @@ def loads(schema_str):
     """ Parse a schema given a schema string """
     try:
         return schema.parse(schema_str)
-    except schema.SchemaParseException as e:
+    except SchemaParseException as e:
         raise ClientError("Schema parse failed: %s" % (str(e)))
 
 
@@ -42,6 +42,13 @@ def _hash_func(self):
 try:
     from avro import schema
 
+    try:
+        # avro >= 1.11.0
+        from avro.errors import SchemaParseException
+    except ImportError:
+        # avro < 1.11.0
+        from avro.schema import SchemaParseException
+
     schema.RecordSchema.__hash__ = _hash_func
     schema.PrimitiveSchema.__hash__ = _hash_func
     schema.UnionSchema.__hash__ = _hash_func
diff --git a/src/confluent_kafka/avro/requirements.txt b/src/confluent_kafka/avro/requirements.txt
index 90bc865..e34a65d 100644
--- a/src/confluent_kafka/avro/requirements.txt
+++ b/src/confluent_kafka/avro/requirements.txt
@@ -1,4 +1,3 @@
 fastavro>=0.23.0
 requests
-avro==1.10.0;python_version<='3.0'
-avro-python3==1.10.0;python_version>='3.0'
+avro>=1.11.1,<2
diff --git a/src/confluent_kafka/avro/serializer/README.md b/src/confluent_kafka/avro/serializer/README.md
new file mode 100644
index 0000000..a7769dd
--- /dev/null
+++ b/src/confluent_kafka/avro/serializer/README.md
@@ -0,0 +1,4 @@
+# Warning: Deprecated
+
+CachedSchemaRegistryClient, AvroProducer and AvroConsumer have been
+deprecated. Use AvroSerializer and AvroDeserializer instead.
diff --git a/src/confluent_kafka/avro/serializer/message_serializer.py b/src/confluent_kafka/avro/serializer/message_serializer.py
index 9023cb6..d92763e 100644
--- a/src/confluent_kafka/avro/serializer/message_serializer.py
+++ b/src/confluent_kafka/avro/serializer/message_serializer.py
@@ -20,6 +20,7 @@
 # derived from https://github.com/verisign/python-confluent-schemaregistry.git
 #
 import io
+import json
 import logging
 import struct
 import sys
@@ -79,7 +80,7 @@ class MessageSerializer(object):
     # Encoder support
     def _get_encoder_func(self, writer_schema):
         if HAS_FAST:
-            schema = writer_schema.to_json()
+            schema = json.loads(str(writer_schema))
             parsed_schema = parse_schema(schema)
             return lambda record, fp: schemaless_writer(fp, parsed_schema, record)
         writer = avro.io.DatumWriter(writer_schema)
@@ -175,8 +176,11 @@ class MessageSerializer(object):
         if HAS_FAST:
             # try to use fast avro
             try:
-                fast_avro_writer_schema = parse_schema(writer_schema_obj.to_json())
-                fast_avro_reader_schema = parse_schema(reader_schema_obj.to_json())
+                fast_avro_writer_schema = parse_schema(json.loads(str(writer_schema_obj)))
+                if reader_schema_obj is not None:
+                    fast_avro_reader_schema = parse_schema(json.loads(str(reader_schema_obj)))
+                else:
+                    fast_avro_reader_schema = None
                 schemaless_reader(payload, fast_avro_writer_schema)
 
                 # If we reach this point, this means we have fastavro and it can
diff --git a/src/confluent_kafka/deserializing_consumer.py b/src/confluent_kafka/deserializing_consumer.py
index fadc748..39f8094 100644
--- a/src/confluent_kafka/deserializing_consumer.py
+++ b/src/confluent_kafka/deserializing_consumer.py
@@ -26,34 +26,20 @@ from .serialization import (SerializationContext,
 
 class DeserializingConsumer(_ConsumerImpl):
     """
-    A client that consumes records from a Kafka cluster. With deserialization
-    capabilities.
+    A high level Kafka consumer with deserialization capabilities.
 
-    Note:
+    `This class is experimental and likely to be removed, or subject to incompatible API
+    changes in future versions of the library. To avoid breaking changes on upgrading, we
+    recommend using deserializers directly.`
 
-        The DeserializingConsumer is an experimental API and subject to change.
+    Derived from the :py:class:`Consumer` class, overriding the :py:func:`Consumer.poll`
+    method to add deserialization capabilities.
 
-    .. versionadded:: 1.4.0
-
-        The ``key.deserializer`` and ``value.deserializer`` classes instruct the
-        DeserializingConsumer on how to convert the message payload bytes to objects.
-
-    Note:
-
-        All configured callbacks are served from the application queue upon
-        calling :py:func:`DeserializingConsumer.poll`
-
-    Notable DeserializingConsumer configuration properties(* indicates required field)
+    Additional configuration properties:
 
     +-------------------------+---------------------+-----------------------------------------------------+
     | Property Name           | Type                | Description                                         |
     +=========================+=====================+=====================================================+
-    | ``bootstrap.servers`` * | str                 | Comma-separated list of brokers.                    |
-    +-------------------------+---------------------+-----------------------------------------------------+
-    |                         |                     | Client group id string.                             |
-    | ``group.id`` *          | str                 | All clients sharing the same group.id belong to the |
-    |                         |                     | same group.                                         |
-    +-------------------------+---------------------+-----------------------------------------------------+
     |                         |                     | Callable(SerializationContext, bytes) -> obj        |
     | ``key.deserializer``    | callable            |                                                     |
     |                         |                     | Deserializer used for message keys.                 |
@@ -62,37 +48,26 @@ class DeserializingConsumer(_ConsumerImpl):
     | ``value.deserializer``  | callable            |                                                     |
     |                         |                     | Deserializer used for message values.               |
     +-------------------------+---------------------+-----------------------------------------------------+
-    |                         |                     | Callable(KafkaError)                                |
-    |                         |                     |                                                     |
-    | ``error_cb``            | callable            | Callback for generic/global error events. These     |
-    |                         |                     | errors are typically to be considered informational |
-    |                         |                     | since the client will automatically try to recover. |
-    +-------------------------+---------------------+-----------------------------------------------------+
-    | ``log_cb``              | ``logging.Handler`` | Logging handler to forward logs                     |
-    +-------------------------+---------------------+-----------------------------------------------------+
-    |                         |                     | Callable(str)                                       |
-    |                         |                     |                                                     |
-    |                         |                     | Callback for statistics. This callback is           |
-    | ``stats_cb``            | callable            | added to the application queue every                |
-    |                         |                     | ``statistics.interval.ms`` (configured separately). |
-    |                         |                     | The function argument is a JSON formatted str       |
-    |                         |                     | containing statistics data.                         |
-    +-------------------------+---------------------+-----------------------------------------------------+
-    |                         |                     | Callable(ThrottleEvent)                             |
-    | ``throttle_cb``         | callable            |                                                     |
-    |                         |                     | Callback for throttled request reporting.           |
-    +-------------------------+---------------------+-----------------------------------------------------+
+
+    Deserializers for string, integer and double (:py:class:`StringDeserializer`, :py:class:`IntegerDeserializer`
+    and :py:class:`DoubleDeserializer`) are supplied out-of-the-box in the ``confluent_kafka.serialization``
+    namespace.
+
+    Deserializers for Protobuf, JSON Schema and Avro (:py:class:`ProtobufDeserializer`, :py:class:`JSONDeserializer`
+    and :py:class:`AvroDeserializer`) with Confluent Schema Registry integration are supplied out-of-the-box
+    in the ``confluent_kafka.schema_registry`` namespace.
 
     See Also:
-        - `CONFIGURATION.md <https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md>`_ for additional configuration property details.
-        - `STATISTICS.md <https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md>`_ for detailed information about the statistics handled by stats_cb
+        - The :ref:`Configuration Guide <pythonclient_configuration>` for in depth information on how to configure the client.
+        - `CONFIGURATION.md <https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md>`_ for a comprehensive set of configuration properties.
+        - `STATISTICS.md <https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md>`_ for detailed information on the statistics provided by stats_cb
+        - The :py:class:`Consumer` class for inherited methods.
 
     Args:
         conf (dict): DeserializingConsumer configuration.
 
     Raises:
         ValueError: if configuration validation fails
-
     """  # noqa: E501
 
     def __init__(self, conf):
@@ -113,15 +88,13 @@ class DeserializingConsumer(_ConsumerImpl):
             :py:class:`Message` or None on timeout
 
         Raises:
-            KeyDeserializationError: If an error occurs during key
-            deserialization.
-
-            ValueDeserializationError: If an error occurs during value
-            deserialization.
+            KeyDeserializationError: If an error occurs during key deserialization.
 
-            ConsumeError if an error was encountered while polling.
+            ValueDeserializationError: If an error occurs during value deserialization.
 
+            ConsumeError: If an error was encountered while polling.
         """
+
         msg = super(DeserializingConsumer, self).poll(timeout)
 
         if msg is None:
@@ -130,7 +103,7 @@ class DeserializingConsumer(_ConsumerImpl):
         if msg.error() is not None:
             raise ConsumeError(msg.error(), kafka_message=msg)
 
-        ctx = SerializationContext(msg.topic(), MessageField.VALUE)
+        ctx = SerializationContext(msg.topic(), MessageField.VALUE, msg.headers())
         value = msg.value()
         if self._value_deserializer is not None:
             try:
@@ -155,4 +128,5 @@ class DeserializingConsumer(_ConsumerImpl):
         :py:func:`Consumer.consume` not implemented, use
         :py:func:`DeserializingConsumer.poll` instead
         """
+
         raise NotImplementedError
diff --git a/src/confluent_kafka/kafkatest/verifiable_client.py b/src/confluent_kafka/kafkatest/verifiable_client.py
index 714783e..56d4383 100644
--- a/src/confluent_kafka/kafkatest/verifiable_client.py
+++ b/src/confluent_kafka/kafkatest/verifiable_client.py
@@ -28,6 +28,7 @@ class VerifiableClient(object):
     Generic base class for a kafkatest verifiable client.
     Implements the common kafkatest protocol and semantics.
     """
+
     def __init__(self, conf):
         """
         """
diff --git a/src/confluent_kafka/kafkatest/verifiable_consumer.py b/src/confluent_kafka/kafkatest/verifiable_consumer.py
index 2e3bfba..94aa48e 100755
--- a/src/confluent_kafka/kafkatest/verifiable_consumer.py
+++ b/src/confluent_kafka/kafkatest/verifiable_consumer.py
@@ -27,6 +27,7 @@ class VerifiableConsumer(VerifiableClient):
     confluent-kafka-python backed VerifiableConsumer class for use with
     Kafka's kafkatests client tests.
     """
+
     def __init__(self, conf):
         """
         conf is a config dict passed to confluent_kafka.Consumer()
@@ -223,6 +224,7 @@ class VerifiableConsumer(VerifiableClient):
 
 class AssignedPartition(object):
     """ Local state container for assigned partition. """
+
     def __init__(self, topic, partition):
         super(AssignedPartition, self).__init__()
         self.topic = topic
diff --git a/src/confluent_kafka/kafkatest/verifiable_producer.py b/src/confluent_kafka/kafkatest/verifiable_producer.py
index fbf66a7..a543e1d 100755
--- a/src/confluent_kafka/kafkatest/verifiable_producer.py
+++ b/src/confluent_kafka/kafkatest/verifiable_producer.py
@@ -26,6 +26,7 @@ class VerifiableProducer(VerifiableClient):
     confluent-kafka-python backed VerifiableProducer class for use with
     Kafka's kafkatests client tests.
     """
+
     def __init__(self, conf):
         """
         conf is a config dict passed to confluent_kafka.Producer()
diff --git a/src/confluent_kafka/schema_registry/avro.py b/src/confluent_kafka/schema_registry/avro.py
index 0a480e9..38ab25d 100644
--- a/src/confluent_kafka/schema_registry/avro.py
+++ b/src/confluent_kafka/schema_registry/avro.py
@@ -14,7 +14,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
+
 from io import BytesIO
 from json import loads
 from struct import pack, unpack
@@ -34,7 +34,6 @@ from confluent_kafka.serialization import (Deserializer,
 class _ContextStringIO(BytesIO):
     """
     Wrapper to allow use of StringIO via 'with' constructs.
-
     """
 
     def __enter__(self):
@@ -47,7 +46,7 @@ class _ContextStringIO(BytesIO):
 
 def _schema_loads(schema_str):
     """
-    Instantiates a Schema instance from a declaration string
+    Instantiate a Schema instance from a declaration string.
 
     Args:
         schema_str (str): Avro Schema declaration.
@@ -56,43 +55,79 @@ def _schema_loads(schema_str):
         https://avro.apache.org/docs/current/spec.html#schemas
 
     Returns:
-        Schema: Schema instance
-
+        Schema: A Schema instance.
     """
+
     schema_str = schema_str.strip()
 
     # canonical form primitive declarations are not supported
-    if schema_str[0] != "{":
-        schema_str = '{"type":"' + schema_str + '"}'
+    if schema_str[0] != "{" and schema_str[0] != "[":
+        schema_str = '{"type":' + schema_str + '}'
 
     return Schema(schema_str, schema_type='AVRO')
 
 
-class AvroSerializer(Serializer):
+def _resolve_named_schema(schema, schema_registry_client, named_schemas=None):
+    """
+    Resolves named schemas referenced by the provided schema recursively.
+    :param schema: Schema to resolve named schemas for.
+    :param schema_registry_client: SchemaRegistryClient to use for retrieval.
+    :param named_schemas: Dict of named schemas resolved recursively.
+    :return: named_schemas dict.
     """
-    AvroSerializer serializes objects in the Confluent Schema Registry binary
-    format for Avro.
+    if named_schemas is None:
+        named_schemas = {}
+    if schema.references is not None:
+        for ref in schema.references:
+            referenced_schema = schema_registry_client.get_version(ref.subject, ref.version)
+            _resolve_named_schema(referenced_schema.schema, schema_registry_client, named_schemas)
+            parse_schema(loads(referenced_schema.schema.schema_str), named_schemas=named_schemas)
+    return named_schemas
 
 
-    AvroSerializer configuration properties:
+class AvroSerializer(Serializer):
+    """
+    Serializer that outputs Avro binary encoded data with Confluent Schema Registry framing.
+
+    Configuration properties:
 
     +---------------------------+----------+--------------------------------------------------+
     | Property Name             | Type     | Description                                      |
     +===========================+==========+==================================================+
-    |                           |          | Registers schemas automatically if not           |
-    | ``auto.register.schemas`` | bool     | previously associated with a particular subject. |
+    |                           |          | If True, automatically register the configured   |
+    | ``auto.register.schemas`` | bool     | schema with Confluent Schema Registry if it has  |
+    |                           |          | not previously been associated with the relevant |
+    |                           |          | subject (determined via subject.name.strategy).  |
+    |                           |          |                                                  |
     |                           |          | Defaults to True.                                |
     +---------------------------+----------+--------------------------------------------------+
+    |                           |          | Whether to normalize schemas, which will         |
+    | ``normalize.schemas``     | bool     | transform schemas to have a consistent format,   |
+    |                           |          | including ordering properties and references.    |
+    +---------------------------+----------+--------------------------------------------------+
+    |                           |          | Whether to use the latest subject version for    |
+    | ``use.latest.version``    | bool     | serialization.                                   |
+    |                           |          |                                                  |
+    |                           |          | WARNING: There is no check that the latest       |
+    |                           |          | schema is backwards compatible with the object   |
+    |                           |          | being serialized.                                |
+    |                           |          |                                                  |
+    |                           |          | Defaults to False.                               |
+    +---------------------------+----------+--------------------------------------------------+
     |                           |          | Callable(SerializationContext, str) -> str       |
     |                           |          |                                                  |
-    | ``subject.name.strategy`` | callable | Instructs the AvroSerializer on how to construct |
-    |                           |          | Schema Registry subject names.                   |
+    | ``subject.name.strategy`` | callable | Defines how Schema Registry subject names are    |
+    |                           |          | constructed. Standard naming strategies are      |
+    |                           |          | defined in the confluent_kafka.schema_registry   |
+    |                           |          | namespace.                                       |
+    |                           |          |                                                  |
     |                           |          | Defaults to topic_subject_name_strategy.         |
     +---------------------------+----------+--------------------------------------------------+
 
-    Schemas are registered to namespaces known as Subjects which define how a
-    schema may evolve over time. By default the subject name is formed by
-    concatenating the topic name with the message field separated by a hyphen.
+    Schemas are registered against subject names in Confluent Schema Registry that
+    define a scope in which the schemas can be evolved. By default, the subject name
+    is formed by concatenating the topic name with the message field (key or value)
+    separated by a hyphen.
 
     i.e. {topic name}-{message field}
 
@@ -114,45 +149,55 @@ class AvroSerializer(Serializer):
     See `Subject name strategy <https://docs.confluent.io/current/schema-registry/serializer-formatter.html#subject-name-strategy>`_ for additional details.
 
     Note:
-        Prior to serialization all ``Complex Types`` must first be converted to
+        Prior to serialization, all values must first be converted to
         a dict instance. This may handled manually prior to calling
-        :py:func:`SerializingProducer.produce()` or by registering a `to_dict`
-        callable with the AvroSerializer.
+        :py:func:`Producer.produce()` or by registering a `to_dict`
+        callable with AvroSerializer.
 
         See ``avro_producer.py`` in the examples directory for example usage.
 
+    Note:
+       Tuple notation can be used to determine which branch of an ambiguous union to take.
+
+       See `fastavro notation <https://fastavro.readthedocs.io/en/latest/writer.html#using-the-tuple-notation-to-specify-which-branch-of-a-union-to-take>`_
+
     Args:
         schema_registry_client (SchemaRegistryClient): Schema Registry client instance.
 
-        schema_str (str): Avro `Schema Declaration. <https://avro.apache.org/docs/current/spec.html#schemas>`_
+        schema_str (str or Schema): Avro `Schema Declaration. <https://avro.apache.org/docs/current/spec.html#schemas>`_ Accepts either a string or a `Schema`(Schema) instance.  Note that string definitions cannot reference other schemas. For referencing other schemas, use a Schema instance.
 
         to_dict (callable, optional): Callable(object, SerializationContext) -> dict. Converts object to a dict.
 
         conf (dict): AvroSerializer configuration.
-
     """  # noqa: E501
-    __slots__ = ['_hash', '_auto_register', '_known_subjects', '_parsed_schema',
+    __slots__ = ['_hash', '_auto_register', '_normalize_schemas', '_use_latest_version',
+                 '_known_subjects', '_parsed_schema',
                  '_registry', '_schema', '_schema_id', '_schema_name',
-                 '_subject_name_func', '_to_dict']
+                 '_subject_name_func', '_to_dict', '_named_schemas']
 
-    # default configuration
     _default_conf = {'auto.register.schemas': True,
+                     'normalize.schemas': False,
+                     'use.latest.version': False,
                      'subject.name.strategy': topic_subject_name_strategy}
 
-    def __init__(self, schema_registry_client, schema_str,
-                 to_dict=None, conf=None):
+    def __init__(self, schema_registry_client, schema_str, to_dict=None, conf=None):
+        if isinstance(schema_str, str):
+            schema = _schema_loads(schema_str)
+        elif isinstance(schema_str, Schema):
+            schema = schema_str
+        else:
+            raise TypeError('You must pass either schema string or schema object')
+
         self._registry = schema_registry_client
         self._schema_id = None
-        # Avoid calling registry if schema is known to be registered
         self._known_subjects = set()
 
         if to_dict is not None and not callable(to_dict):
-            raise ValueError("to_dict must be callable with the signature"
-                             " to_dict(object, SerializationContext)->dict")
+            raise ValueError("to_dict must be callable with the signature "
+                             "to_dict(object, SerializationContext)->dict")
 
         self._to_dict = to_dict
 
-        # handle configuration
         conf_copy = self._default_conf.copy()
         if conf is not None:
             conf_copy.update(conf)
@@ -161,6 +206,16 @@ class AvroSerializer(Serializer):
         if not isinstance(self._auto_register, bool):
             raise ValueError("auto.register.schemas must be a boolean value")
 
+        self._normalize_schemas = conf_copy.pop('normalize.schemas')
+        if not isinstance(self._normalize_schemas, bool):
+            raise ValueError("normalize.schemas must be a boolean value")
+
+        self._use_latest_version = conf_copy.pop('use.latest.version')
+        if not isinstance(self._use_latest_version, bool):
+            raise ValueError("use.latest.version must be a boolean value")
+        if self._use_latest_version and self._auto_register:
+            raise ValueError("cannot enable both use.latest.version and auto.register.schemas")
+
         self._subject_name_func = conf_copy.pop('subject.name.strategy')
         if not callable(self._subject_name_func):
             raise ValueError("subject.name.strategy must be callable")
@@ -169,15 +224,23 @@ class AvroSerializer(Serializer):
             raise ValueError("Unrecognized properties: {}"
                              .format(", ".join(conf_copy.keys())))
 
-        # convert schema_str to Schema instance
-        schema = _schema_loads(schema_str)
         schema_dict = loads(schema.schema_str)
-        parsed_schema = parse_schema(schema_dict)
-        # The Avro spec states primitives have a name equal to their type
-        # i.e. {"type": "string"} has a name of string.
-        # This function does not comply.
-        # https://github.com/fastavro/fastavro/issues/415
-        schema_name = parsed_schema.get('name', schema_dict['type'])
+        self._named_schemas = _resolve_named_schema(schema, schema_registry_client)
+        parsed_schema = parse_schema(schema_dict, named_schemas=self._named_schemas)
+
+        if isinstance(parsed_schema, list):
+            # if parsed_schema is a list, we have an Avro union and there
+            # is no valid schema name. This is fine because the only use of
+            # schema_name is for supplying the subject name to the registry
+            # and union types should use topic_subject_name_strategy, which
+            # just discards the schema name anyway
+            schema_name = None
+        else:
+            # The Avro spec states primitives have a name equal to their type
+            # i.e. {"type": "string"} has a name of string.
+            # This function does not comply.
+            # https://github.com/fastavro/fastavro/issues/415
+            schema_name = parsed_schema.get("name", schema_dict["type"])
 
         self._schema = schema
         self._schema_name = schema_name
@@ -185,41 +248,48 @@ class AvroSerializer(Serializer):
 
     def __call__(self, obj, ctx):
         """
-        Serializes an object to the Confluent Schema Registry's Avro binary
-        format.
+        Serializes an object to Avro binary format, prepending it with Confluent
+        Schema Registry framing.
 
         Args:
-            obj (object): object instance to serializes.
+            obj (object): The object instance to serialize.
 
             ctx (SerializationContext): Metadata pertaining to the serialization operation.
 
-        Note:
-            None objects are represented as Kafka Null.
-
         Raises:
-            SerializerError: if any error occurs serializing obj
+            SerializerError: If any error occurs serializing obj.
+            SchemaRegistryError: If there was an error registering the schema with
+                                 Schema Registry, or auto.register.schemas is
+                                 false and the schema was not registered.
 
         Returns:
-            bytes: Confluent Schema Registry formatted Avro bytes
-
+            bytes: Confluent Schema Registry encoded Avro bytes
         """
+
         if obj is None:
             return None
 
         subject = self._subject_name_func(ctx, self._schema_name)
 
-        # Check to ensure this schema has been registered under subject_name.
-        if self._auto_register and subject not in self._known_subjects:
-            # The schema name will always be the same. We can't however register
-            # a schema without a subject so we set the schema_id here to handle
-            # the initial registration.
-            self._schema_id = self._registry.register_schema(subject,
-                                                             self._schema)
-            self._known_subjects.add(subject)
-        elif not self._auto_register and subject not in self._known_subjects:
-            registered_schema = self._registry.lookup_schema(subject,
-                                                             self._schema)
-            self._schema_id = registered_schema.schema_id
+        if subject not in self._known_subjects:
+            if self._use_latest_version:
+                latest_schema = self._registry.get_latest_version(subject)
+                self._schema_id = latest_schema.schema_id
+
+            else:
+                # Check to ensure this schema has been registered under subject_name.
+                if self._auto_register:
+                    # The schema name will always be the same. We can't however register
+                    # a schema without a subject so we set the schema_id here to handle
+                    # the initial registration.
+                    self._schema_id = self._registry.register_schema(subject,
+                                                                     self._schema,
+                                                                     self._normalize_schemas)
+                else:
+                    registered_schema = self._registry.lookup_schema(subject,
+                                                                     self._schema,
+                                                                     self._normalize_schemas)
+                    self._schema_id = registered_schema.schema_id
             self._known_subjects.add(subject)
 
         if self._to_dict is not None:
@@ -238,13 +308,13 @@ class AvroSerializer(Serializer):
 
 class AvroDeserializer(Deserializer):
     """
-    AvroDeserializer decodes bytes written in the Schema Registry
-    Avro format to an object.
+    Deserializer for Avro binary encoded data with Confluent Schema Registry
+    framing.
 
     Note:
-        ``Complex Types`` are returned as dicts. If a more specific instance
-        type is desired a callable, ``from_dict``, may be registered with
-        the AvroDeserializer which converts a dict to the desired type.
+        By default, Avro complex types are returned as dicts. This behavior can
+        be overriden by registering a callable ``from_dict`` with the deserializer to
+        convert the dicts to the desired type.
 
         See ``avro_consumer.py`` in the examples directory in the examples
         directory for example usage.
@@ -253,11 +323,12 @@ class AvroDeserializer(Deserializer):
         schema_registry_client (SchemaRegistryClient): Confluent Schema Registry
             client instance.
 
-        schema_str (str, optional): Avro reader schema declaration.
-            If not provided, writer schema is used for deserialization.
+        schema_str (str, Schema, optional): Avro reader schema declaration Accepts either a string or a `Schema`(
+        Schema) instance. If not provided, the writer schema will be used as the reader schema. Note that string
+        definitions cannot reference other schemas. For referencing other schemas, use a Schema instance.
 
         from_dict (callable, optional): Callable(dict, SerializationContext) -> object.
-            Converts dict to an instance of some object.
+            Converts a dict to an instance of some object.
 
         return_record_name (bool): If True, when reading a union of records, the result will
                                    be a tuple where the first value is the name of the record and the second value is
@@ -267,64 +338,86 @@ class AvroDeserializer(Deserializer):
         `Apache Avro Schema Declaration <https://avro.apache.org/docs/current/spec.html#schemas>`_
 
         `Apache Avro Schema Resolution <https://avro.apache.org/docs/1.8.2/spec.html#Schema+Resolution>`_
-
     """
-    __slots__ = ['_reader_schema', '_registry', '_from_dict', '_writer_schemas', '_return_record_name']
+
+    __slots__ = ['_reader_schema', '_registry', '_from_dict', '_writer_schemas', '_return_record_name', '_schema',
+                 '_named_schemas']
 
     def __init__(self, schema_registry_client, schema_str=None, from_dict=None, return_record_name=False):
+        schema = None
+        if schema_str is not None:
+            if isinstance(schema_str, str):
+                schema = _schema_loads(schema_str)
+            elif isinstance(schema_str, Schema):
+                schema = schema_str
+            else:
+                raise TypeError('You must pass either schema string or schema object')
+
+        self._schema = schema
         self._registry = schema_registry_client
         self._writer_schemas = {}
 
-        self._reader_schema = parse_schema(loads(schema_str)) if schema_str else None
+        if schema:
+            schema_dict = loads(self._schema.schema_str)
+            self._named_schemas = _resolve_named_schema(self._schema, schema_registry_client)
+            self._reader_schema = parse_schema(schema_dict,
+                                               named_schemas=self._named_schemas)
+        else:
+            self._named_schemas = None
+            self._reader_schema = None
 
         if from_dict is not None and not callable(from_dict):
-            raise ValueError("from_dict must be callable with the signature"
-                             " from_dict(SerializationContext, dict) -> object")
+            raise ValueError("from_dict must be callable with the signature "
+                             "from_dict(SerializationContext, dict) -> object")
         self._from_dict = from_dict
 
         self._return_record_name = return_record_name
         if not isinstance(self._return_record_name, bool):
             raise ValueError("return_record_name must be a boolean value")
 
-    def __call__(self, value, ctx):
+    def __call__(self, data, ctx):
         """
-        Decodes a Confluent Schema Registry formatted Avro bytes to an object.
+        Deserialize Avro binary encoded data with Confluent Schema Registry framing to
+        a dict, or object instance according to from_dict, if specified.
 
         Arguments:
-            value (bytes): bytes
+            data (bytes): bytes
 
-            ctx (SerializationContext): Metadata pertaining to the serialization
+            ctx (SerializationContext): Metadata relevant to the serialization
                 operation.
 
         Raises:
-            SerializerError: if an error occurs ready data.
+            SerializerError: if an error occurs parsing data.
 
         Returns:
-            object: object if ``from_dict`` is set, otherwise dict. If no value is supplied None is returned.
-
+            object: If data is None, then None. Else, a dict, or object instance according
+                    to from_dict, if specified.
         """  # noqa: E501
-        if value is None:
+
+        if data is None:
             return None
 
-        if len(value) <= 5:
-            raise SerializationError("Message too small. This message was not"
-                                     " produced with a Confluent"
-                                     " Schema Registry serializer")
+        if len(data) <= 5:
+            raise SerializationError("Expecting data framing of length 6 bytes or "
+                                     "more but total data size is {} bytes. This "
+                                     "message was not produced with a Confluent "
+                                     "Schema Registry serializer".format(len(data)))
 
-        with _ContextStringIO(value) as payload:
+        with _ContextStringIO(data) as payload:
             magic, schema_id = unpack('>bI', payload.read(5))
             if magic != _MAGIC_BYTE:
-                raise SerializationError("Unknown magic byte. This message was"
-                                         " not produced with a Confluent"
-                                         " Schema Registry serializer")
+                raise SerializationError("Unexpected magic byte {}. This message "
+                                         "was not produced with a Confluent "
+                                         "Schema Registry serializer".format(magic))
 
             writer_schema = self._writer_schemas.get(schema_id, None)
 
             if writer_schema is None:
-                schema = self._registry.get_schema(schema_id)
-                prepared_schema = _schema_loads(schema.schema_str)
+                registered_schema = self._registry.get_schema(schema_id)
+                self._named_schemas = _resolve_named_schema(registered_schema, self._registry)
+                prepared_schema = _schema_loads(registered_schema.schema_str)
                 writer_schema = parse_schema(loads(
-                    prepared_schema.schema_str))
+                    prepared_schema.schema_str), named_schemas=self._named_schemas)
                 self._writer_schemas[schema_id] = writer_schema
 
             obj_dict = schemaless_reader(payload,
diff --git a/src/confluent_kafka/schema_registry/json_schema.py b/src/confluent_kafka/schema_registry/json_schema.py
index 1e702df..92fefc6 100644
--- a/src/confluent_kafka/schema_registry/json_schema.py
+++ b/src/confluent_kafka/schema_registry/json_schema.py
@@ -14,14 +14,13 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
 
 from io import BytesIO
 
 import json
 import struct
 
-from jsonschema import validate, ValidationError
+from jsonschema import validate, ValidationError, RefResolver
 
 from confluent_kafka.schema_registry import (_MAGIC_BYTE,
                                              Schema,
@@ -34,7 +33,6 @@ from confluent_kafka.serialization import (SerializationError,
 class _ContextStringIO(BytesIO):
     """
     Wrapper to allow use of StringIO via 'with' constructs.
-
     """
 
     def __enter__(self):
@@ -45,30 +43,72 @@ class _ContextStringIO(BytesIO):
         return False
 
 
+def _resolve_named_schema(schema, schema_registry_client, named_schemas=None):
+    """
+    Resolves named schemas referenced by the provided schema recursively.
+    :param schema: Schema to resolve named schemas for.
+    :param schema_registry_client: SchemaRegistryClient to use for retrieval.
+    :param named_schemas: Dict of named schemas resolved recursively.
+    :return: named_schemas dict.
+    """
+    if named_schemas is None:
+        named_schemas = {}
+    if schema.references is not None:
+        for ref in schema.references:
+            referenced_schema = schema_registry_client.get_version(ref.subject, ref.version)
+            _resolve_named_schema(referenced_schema.schema, schema_registry_client, named_schemas)
+            referenced_schema_dict = json.loads(referenced_schema.schema.schema_str)
+            named_schemas[ref.name] = referenced_schema_dict
+    return named_schemas
+
+
 class JSONSerializer(Serializer):
     """
-    JsonSerializer serializes objects in the Confluent Schema Registry binary
-    format for JSON.
-
-    JsonSerializer configuration properties:
-
-    +---------------------------+----------+--------------------------------------------------+
-    | Property Name             | Type     | Description                                      |
-    +===========================+==========+==================================================+
-    |                           |          | Registers schemas automatically if not           |
-    | ``auto.register.schemas`` | bool     | previously associated with a particular subject. |
-    |                           |          | Defaults to True.                                |
-    +---------------------------+----------+--------------------------------------------------+
-    |                           |          | Callable(SerializationContext, str) -> str       |
-    |                           |          |                                                  |
-    | ``subject.name.strategy`` | callable | Instructs the JsonSerializer on how to construct |
-    |                           |          | Schema Registry subject names.                   |
-    |                           |          | Defaults to topic_subject_name_strategy.         |
-    +---------------------------+----------+--------------------------------------------------+
-
-    Schemas are registered to namespaces known as Subjects which define how a
-    schema may evolve over time. By default the subject name is formed by
-    concatenating the topic name with the message field separated by a hyphen.
+    Serializer that outputs JSON encoded data with Confluent Schema Registry framing.
+
+    Configuration properties:
+
+    +---------------------------+----------+----------------------------------------------------+
+    | Property Name             | Type     | Description                                        |
+    +===========================+==========+====================================================+
+    |                           |          | If True, automatically register the configured     |
+    | ``auto.register.schemas`` | bool     | schema with Confluent Schema Registry if it has    |
+    |                           |          | not previously been associated with the relevant   |
+    |                           |          | subject (determined via subject.name.strategy).    |
+    |                           |          |                                                    |
+    |                           |          | Defaults to True.                                  |
+    |                           |          |                                                    |
+    |                           |          | Raises SchemaRegistryError if the schema was not   |
+    |                           |          | registered against the subject, or could not be    |
+    |                           |          | successfully registered.                           |
+    +---------------------------+----------+----------------------------------------------------+
+    |                           |          | Whether to normalize schemas, which will           |
+    | ``normalize.schemas``     | bool     | transform schemas to have a consistent format,     |
+    |                           |          | including ordering properties and references.      |
+    +---------------------------+----------+----------------------------------------------------+
+    |                           |          | Whether to use the latest subject version for      |
+    | ``use.latest.version``    | bool     | serialization.                                     |
+    |                           |          |                                                    |
+    |                           |          | WARNING: There is no check that the latest         |
+    |                           |          | schema is backwards compatible with the object     |
+    |                           |          | being serialized.                                  |
+    |                           |          |                                                    |
+    |                           |          | Defaults to False.                                 |
+    +---------------------------+----------+----------------------------------------------------+
+    |                           |          | Callable(SerializationContext, str) -> str         |
+    |                           |          |                                                    |
+    | ``subject.name.strategy`` | callable | Defines how Schema Registry subject names are      |
+    |                           |          | constructed. Standard naming strategies are        |
+    |                           |          | defined in the confluent_kafka.schema_registry     |
+    |                           |          | namespace.                                         |
+    |                           |          |                                                    |
+    |                           |          | Defaults to topic_subject_name_strategy.           |
+    +---------------------------+----------+----------------------------------------------------+
+
+    Schemas are registered against subject names in Confluent Schema Registry that
+    define a scope in which the schemas can be evolved. By default, the subject name
+    is formed by concatenating the topic name with the message field (key or value)
+    separated by a hyphen.
 
     i.e. {topic name}-{message field}
 
@@ -89,16 +129,19 @@ class JSONSerializer(Serializer):
 
     See `Subject name strategy <https://docs.confluent.io/current/schema-registry/serializer-formatter.html#subject-name-strategy>`_ for additional details.
 
-    Note:
-        The ``title`` annotation, referred to as a record name
-        elsewhere in this document, is not strictly required by the JSON Schema
-        specification. It is however required by this Serializer. This
-        annotation(record name) is used to register the Schema with the Schema
-        Registry. See documentation below for additional details on Subjects
-        and schema registration.
+    Notes:
+        The ``title`` annotation, referred to elsewhere as a record name
+        is not strictly required by the JSON Schema specification. It is
+        however required by this serializer in order to register the schema
+        with Confluent Schema Registry.
+
+        Prior to serialization, all objects must first be converted to
+        a dict instance. This may be handled manually prior to calling
+        :py:func:`Producer.produce()` or by registering a `to_dict`
+        callable with JSONSerializer.
 
     Args:
-        schema_str (str): `JSON Schema definition. <https://json-schema.org/understanding-json-schema/reference/generic.html>`_
+        schema_str (str, Schema): `JSON Schema definition. <https://json-schema.org/understanding-json-schema/reference/generic.html>`_ Accepts schema as either a string or a `Schema`(Schema) instance.  Note that string definitions cannot reference other schemas. For referencing other schemas, use a Schema instance.
 
         schema_registry_client (SchemaRegistryClient): Schema Registry
             client instance.
@@ -107,30 +150,36 @@ class JSONSerializer(Serializer):
             Converts object to a dict.
 
         conf (dict): JsonSerializer configuration.
-
     """  # noqa: E501
-    __slots__ = ['_hash', '_auto_register', '_known_subjects', '_parsed_schema',
-                 '_registry', '_schema', '_schema_id', '_schema_name',
-                 '_subject_name_func', '_to_dict']
+    __slots__ = ['_hash', '_auto_register', '_normalize_schemas', '_use_latest_version',
+                 '_known_subjects', '_parsed_schema', '_registry', '_schema', '_schema_id',
+                 '_schema_name', '_subject_name_func', '_to_dict', '_are_references_provided']
 
-    # default configuration
     _default_conf = {'auto.register.schemas': True,
+                     'normalize.schemas': False,
+                     'use.latest.version': False,
                      'subject.name.strategy': topic_subject_name_strategy}
 
-    def __init__(self, schema_str, schema_registry_client, to_dict=None,
-                 conf=None):
+    def __init__(self, schema_str, schema_registry_client, to_dict=None, conf=None):
+        self._are_references_provided = False
+        if isinstance(schema_str, str):
+            self._schema = Schema(schema_str, schema_type="JSON")
+        elif isinstance(schema_str, Schema):
+            self._schema = schema_str
+            self._are_references_provided = bool(schema_str.references)
+        else:
+            raise TypeError('You must pass either str or Schema')
+
         self._registry = schema_registry_client
         self._schema_id = None
-        # Avoid calling registry if schema is known to be registered
         self._known_subjects = set()
 
         if to_dict is not None and not callable(to_dict):
-            raise ValueError("to_dict must be callable with the signature"
-                             " to_dict(object, SerializationContext)->dict")
+            raise ValueError("to_dict must be callable with the signature "
+                             "to_dict(object, SerializationContext)->dict")
 
         self._to_dict = to_dict
 
-        # handle configuration
         conf_copy = self._default_conf.copy()
         if conf is not None:
             conf_copy.update(conf)
@@ -139,6 +188,16 @@ class JSONSerializer(Serializer):
         if not isinstance(self._auto_register, bool):
             raise ValueError("auto.register.schemas must be a boolean value")
 
+        self._normalize_schemas = conf_copy.pop('normalize.schemas')
+        if not isinstance(self._normalize_schemas, bool):
+            raise ValueError("normalize.schemas must be a boolean value")
+
+        self._use_latest_version = conf_copy.pop('use.latest.version')
+        if not isinstance(self._use_latest_version, bool):
+            raise ValueError("use.latest.version must be a boolean value")
+        if self._use_latest_version and self._auto_register:
+            raise ValueError("cannot enable both use.latest.version and auto.register.schemas")
+
         self._subject_name_func = conf_copy.pop('subject.name.strategy')
         if not callable(self._subject_name_func):
             raise ValueError("subject.name.strategy must be callable")
@@ -147,53 +206,57 @@ class JSONSerializer(Serializer):
             raise ValueError("Unrecognized properties: {}"
                              .format(", ".join(conf_copy.keys())))
 
-        schema_dict = json.loads(schema_str)
+        schema_dict = json.loads(self._schema.schema_str)
         schema_name = schema_dict.get('title', None)
         if schema_name is None:
             raise ValueError("Missing required JSON schema annotation title")
 
         self._schema_name = schema_name
         self._parsed_schema = schema_dict
-        self._schema = Schema(schema_str, schema_type="JSON")
 
     def __call__(self, obj, ctx):
         """
-        Serializes an object to the Confluent Schema Registry's JSON binary
-        format.
+        Serializes an object to JSON, prepending it with Confluent Schema Registry
+        framing.
 
         Args:
-            obj (object): object instance to serialize.
+            obj (object): The object instance to serialize.
 
-            ctx (SerializationContext): Metadata pertaining to the serialization
+            ctx (SerializationContext): Metadata relevant to the serialization
                 operation.
 
-        Note:
-            None objects are represented as Kafka Null.
-
         Raises:
-            SerializerError if any error occurs serializing obj
+            SerializerError if any error occurs serializing obj.
 
         Returns:
-            bytes: Confluent Schema Registry formatted JSON bytes
-
+            bytes: None if obj is None, else a byte array containing the JSON
+            serialized data with Confluent Schema Registry framing.
         """
+
         if obj is None:
             return None
 
         subject = self._subject_name_func(ctx, self._schema_name)
 
-        # Check to ensure this schema has been registered under subject_name.
-        if self._auto_register and subject not in self._known_subjects:
-            # The schema name will always be the same. We can't however register
-            # a schema without a subject so we set the schema_id here to handle
-            # the initial registration.
-            self._schema_id = self._registry.register_schema(subject,
-                                                             self._schema)
-            self._known_subjects.add(subject)
-        elif not self._auto_register and subject not in self._known_subjects:
-            registered_schema = self._registry.lookup_schema(subject,
-                                                             self._schema)
-            self._schema_id = registered_schema.schema_id
+        if subject not in self._known_subjects:
+            if self._use_latest_version:
+                latest_schema = self._registry.get_latest_version(subject)
+                self._schema_id = latest_schema.schema_id
+
+            else:
+                # Check to ensure this schema has been registered under subject_name.
+                if self._auto_register:
+                    # The schema name will always be the same. We can't however register
+                    # a schema without a subject so we set the schema_id here to handle
+                    # the initial registration.
+                    self._schema_id = self._registry.register_schema(subject,
+                                                                     self._schema,
+                                                                     self._normalize_schemas)
+                else:
+                    registered_schema = self._registry.lookup_schema(subject,
+                                                                     self._schema,
+                                                                     self._normalize_schemas)
+                    self._schema_id = registered_schema.schema_id
             self._known_subjects.add(subject)
 
         if self._to_dict is not None:
@@ -202,7 +265,14 @@ class JSONSerializer(Serializer):
             value = obj
 
         try:
-            validate(instance=value, schema=self._parsed_schema)
+            if self._are_references_provided:
+                named_schemas = _resolve_named_schema(self._schema, self._registry)
+                validate(instance=value, schema=self._parsed_schema,
+                         resolver=RefResolver(self._parsed_schema.get('$id'),
+                                              self._parsed_schema,
+                                              store=named_schemas))
+            else:
+                validate(instance=value, schema=self._parsed_schema)
         except ValidationError as ve:
             raise SerializationError(ve.message)
 
@@ -218,20 +288,36 @@ class JSONSerializer(Serializer):
 
 class JSONDeserializer(Deserializer):
     """
-    JsonDeserializer decodes bytes written in the Schema Registry
-    JSON format to an object.
+    Deserializer for JSON encoded data with Confluent Schema Registry
+    framing.
 
     Args:
-        schema_str (str): `JSON schema definition <https://json-schema.org/understanding-json-schema/reference/generic.html>`_ use for validating records.
+        schema_str (str, Schema): `JSON schema definition <https://json-schema.org/understanding-json-schema/reference/generic.html>`_ Accepts schema as either a string or a `Schema`(Schema) instance.  Note that string definitions cannot reference other schemas. For referencing other schemas, use a Schema instance.
 
         from_dict (callable, optional): Callable(dict, SerializationContext) -> object.
-            Converts dict to an instance of some object.
+            Converts a dict to a Python object instance.
 
+        schema_registry_client (SchemaRegistryClient, optional): Schema Registry client instance. Needed if ``schema_str`` is a schema referencing other schemas.
     """  # noqa: E501
-    __slots__ = ['_parsed_schema', '_from_dict']
 
-    def __init__(self, schema_str, from_dict=None):
-        self._parsed_schema = json.loads(schema_str)
+    __slots__ = ['_parsed_schema', '_from_dict', '_registry', '_are_references_provided', '_schema']
+
+    def __init__(self, schema_str, from_dict=None, schema_registry_client=None):
+        self._are_references_provided = False
+        if isinstance(schema_str, str):
+            schema = Schema(schema_str, schema_type="JSON")
+        elif isinstance(schema_str, Schema):
+            schema = schema_str
+            self._are_references_provided = bool(schema_str.references)
+            if self._are_references_provided and schema_registry_client is None:
+                raise ValueError(
+                    """schema_registry_client must be provided if "schema_str" is a Schema instance with references""")
+        else:
+            raise TypeError('You must pass either str or Schema')
+
+        self._parsed_schema = json.loads(schema.schema_str)
+        self._schema = schema
+        self._registry = schema_registry_client
 
         if from_dict is not None and not callable(from_dict):
             raise ValueError("from_dict must be callable with the signature"
@@ -239,44 +325,52 @@ class JSONDeserializer(Deserializer):
 
         self._from_dict = from_dict
 
-    def __call__(self, value, ctx):
+    def __call__(self, data, ctx):
         """
-        Deserializes Schema Registry formatted JSON to JSON object literal(dict).
+        Deserialize a JSON encoded record with Confluent Schema Registry framing to
+        a dict, or object instance according to from_dict if from_dict is specified.
 
         Args:
-            value (bytes): Confluent Schema Registry formatted JSON bytes
+            data (bytes): A JSON serialized record with Confluent Schema Regsitry framing.
 
-            ctx (SerializationContext): Metadata pertaining to the serialization
-                operation.
+            ctx (SerializationContext): Metadata relevant to the serialization operation.
 
         Returns:
-            dict: Deserialized JSON
+            A dict, or object instance according to from_dict if from_dict is specified.
 
         Raises:
-            SerializerError: If ``value`` cannot be validated by the schema
-                configured with this JsonDeserializer instance.
-
+            SerializerError: If there was an error reading the Confluent framing data, or
+               if ``data`` was not successfully validated with the configured schema.
         """
-        if value is None:
+
+        if data is None:
             return None
 
-        if len(value) <= 5:
-            raise SerializationError("Message too small. This message was not"
-                                     " produced with a Confluent"
-                                     " Schema Registry serializer")
+        if len(data) <= 5:
+            raise SerializationError("Expecting data framing of length 6 bytes or "
+                                     "more but total data size is {} bytes. This "
+                                     "message was not produced with a Confluent "
+                                     "Schema Registry serializer".format(len(data)))
 
-        with _ContextStringIO(value) as payload:
+        with _ContextStringIO(data) as payload:
             magic, schema_id = struct.unpack('>bI', payload.read(5))
             if magic != _MAGIC_BYTE:
-                raise SerializationError("Unknown magic byte. This message was"
-                                         " not produced with a Confluent"
-                                         " Schema Registry serializer")
+                raise SerializationError("Unexpected magic byte {}. This message "
+                                         "was not produced with a Confluent "
+                                         "Schema Registry serializer".format(magic))
 
             # JSON documents are self-describing; no need to query schema
             obj_dict = json.loads(payload.read())
 
             try:
-                validate(instance=obj_dict, schema=self._parsed_schema)
+                if self._are_references_provided:
+                    named_schemas = _resolve_named_schema(self._schema, self._registry)
+                    validate(instance=obj_dict,
+                             schema=self._parsed_schema, resolver=RefResolver(self._parsed_schema.get('$id'),
+                                                                              self._parsed_schema,
+                                                                              store=named_schemas))
+                else:
+                    validate(instance=obj_dict, schema=self._parsed_schema)
             except ValidationError as ve:
                 raise SerializationError(ve.message)
 
diff --git a/src/confluent_kafka/schema_registry/protobuf.py b/src/confluent_kafka/schema_registry/protobuf.py
index d0b8d7d..b1de067 100644
--- a/src/confluent_kafka/schema_registry/protobuf.py
+++ b/src/confluent_kafka/schema_registry/protobuf.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 #
-# Copyright 2020 Confluent Inc.
+# Copyright 2020-2022 Confluent Inc.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,11 +14,12 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-#
+
 import io
 import sys
 import base64
 import struct
+import warnings
 from collections import deque
 
 from google.protobuf.message import DecodeError
@@ -31,35 +32,33 @@ from .schema_registry_client import (Schema,
                                      SchemaReference)
 from confluent_kafka.serialization import SerializationError
 
-# Converts an int to bytes (opposite of ord)
+
+# Convert an int to bytes (inverse of ord())
 # Python3.chr() -> Unicode
 # Python2.chr() -> str(alias for bytes)
 if sys.version > '3':
-    def _bytes(b):
+    def _bytes(v):
         """
         Convert int to bytes
 
         Args:
-            b (int): int to format as bytes.
-
+            v (int): The int to convert to bytes.
         """
-        return bytes((b,))
+        return bytes((v,))
 else:
-    def _bytes(b):
+    def _bytes(v):
         """
         Convert int to bytes
 
         Args:
-            b (int): int to format as bytes.
-
+            v (int): The int to convert to bytes.
         """
-        return chr(b)
+        return chr(v)
 
 
 class _ContextStringIO(io.BytesIO):
     """
     Wrapper to allow use of StringIO via 'with' constructs.
-
     """
 
     def __enter__(self):
@@ -70,24 +69,26 @@ class _ContextStringIO(io.BytesIO):
         return False
 
 
-def _create_msg_index(msg_desc):
+def _create_index_array(msg_desc):
     """
-    Maps the location of msg_desc within a FileDescriptor.
+    Creates an index array specifying the location of msg_desc in
+    the referenced FileDescriptor.
 
     Args:
         msg_desc (MessageDescriptor): Protobuf MessageDescriptor
 
     Returns:
-        [int]: Protobuf MessageDescriptor index
+        list of int: Protobuf MessageDescriptor index array.
 
     Raises:
         ValueError: If the message descriptor is malformed.
-
     """
+
     msg_idx = deque()
+
+    # Walk the nested MessageDescriptor tree up to the root.
     current = msg_desc
     found = False
-    # Traverse tree upwardly it's root
     while current.containing_type is not None:
         previous = current
         current = previous.containing_type
@@ -100,8 +101,8 @@ def _create_msg_index(msg_desc):
         if not found:
             raise ValueError("Nested MessageDescriptor not found")
 
+    # Add the index of the root MessageDescriptor in the FileDescriptor.
     found = False
-    # find root's position in protofile
     for idx, msg_type_name in enumerate(msg_desc.file.message_types_by_name):
         if msg_type_name == current.name:
             msg_idx.appendleft(idx)
@@ -110,58 +111,95 @@ def _create_msg_index(msg_desc):
     if not found:
         raise ValueError("MessageDescriptor not found in file")
 
-    # The root element at the 0 position does not need a length prefix.
-    if len(msg_idx) == 1 and msg_idx[0] == 0:
-        return [0]
-
-    msg_idx.appendleft(len(msg_idx))
     return list(msg_idx)
 
 
-def _schema_to_str(proto_file):
+def _schema_to_str(file_descriptor):
     """
-    Base64 encodes a FileDescriptor
+    Base64 encode a FileDescriptor
 
     Args:
-        proto_file (FileDescriptor): FileDescriptor to encode.
+        file_descriptor (FileDescriptor): FileDescriptor to encode.
 
     Returns:
         str: Base64 encoded FileDescriptor
-
     """
-    return base64.standard_b64encode(proto_file.serialized_pb).decode('ascii')
+
+    return base64.standard_b64encode(file_descriptor.serialized_pb).decode('ascii')
 
 
 class ProtobufSerializer(object):
     """
-    ProtobufSerializer serializes objects in the Confluent Schema Registry
-    binary format for Protobuf.
+    Serializer for Protobuf Message derived classes. Serialization format is Protobuf,
+    with Confluent Schema Registry framing.
 
-    ProtobufSerializer configuration properties:
+    Configuration properties:
 
     +-------------------------------------+----------+------------------------------------------------------+
     | Property Name                       | Type     | Description                                          |
     +=====================================+==========+======================================================+
-    |                                     |          | Registers schemas automatically if not               |
-    | ``auto.register.schemas``           | bool     | previously associated with a particular subject.     |
+    |                                     |          | If True, automatically register the configured       |
+    | ``auto.register.schemas``           | bool     | schema with Confluent Schema Registry if it has      |
+    |                                     |          | not previously been associated with the relevant     |
+    |                                     |          | subject (determined via subject.name.strategy).      |
+    |                                     |          |                                                      |
     |                                     |          | Defaults to True.                                    |
+    |                                     |          |                                                      |
+    |                                     |          | Raises SchemaRegistryError if the schema was not     |
+    |                                     |          | registered against the subject, or could not be      |
+    |                                     |          | successfully registered.                             |
+    +-------------------------------------+----------+------------------------------------------------------+
+    |                                     |          | Whether to normalize schemas, which will             |
+    | ``normalize.schemas``               | bool     | transform schemas to have a consistent format,       |
+    |                                     |          | including ordering properties and references.        |
+    +-------------------------------------+----------+------------------------------------------------------+
+    |                                     |          | Whether to use the latest subject version for        |
+    | ``use.latest.version``              | bool     | serialization.                                       |
+    |                                     |          |                                                      |
+    |                                     |          | WARNING: There is no check that the latest           |
+    |                                     |          | schema is backwards compatible with the object       |
+    |                                     |          | being serialized.                                    |
+    |                                     |          |                                                      |
+    |                                     |          | Defaults to False.                                   |
+    +-------------------------------------+----------+------------------------------------------------------+
+    |                                     |          | Whether or not to skip known types when resolving    |
+    | ``skip.known.types``                | bool     | schema dependencies.                                 |
+    |                                     |          |                                                      |
+    |                                     |          | Defaults to False.                                   |
     +-------------------------------------+----------+------------------------------------------------------+
     |                                     |          | Callable(SerializationContext, str) -> str           |
     |                                     |          |                                                      |
-    | ``subject.name.strategy``           | callable | Instructs the ProtobufSerializer on how to construct |
-    |                                     |          | Schema Registry subject names.                       |
+    | ``subject.name.strategy``           | callable | Defines how Schema Registry subject names are        |
+    |                                     |          | constructed. Standard naming strategies are          |
+    |                                     |          | defined in the confluent_kafka.schema_registry       |
+    |                                     |          | namespace.                                           |
+    |                                     |          |                                                      |
     |                                     |          | Defaults to topic_subject_name_strategy.             |
     +-------------------------------------+----------+------------------------------------------------------+
     |                                     |          | Callable(SerializationContext, str) -> str           |
     |                                     |          |                                                      |
-    | ``reference.subject.name.strategy`` | callable | Instructs the ProtobufSerializer on how to construct |
-    |                                     |          | Schema Registry subject names for Schema References  |
+    | ``reference.subject.name.strategy`` | callable | Defines how Schema Registry subject names for schema |
+    |                                     |          | references are constructed.                          |
+    |                                     |          |                                                      |
     |                                     |          | Defaults to reference_subject_name_strategy          |
     +-------------------------------------+----------+------------------------------------------------------+
+    | ``use.deprecated.format``           | bool     | Specifies whether the Protobuf serializer should     |
+    |                                     |          | serialize message indexes without zig-zag encoding.  |
+    |                                     |          | This option must be explicitly configured as older   |
+    |                                     |          | and newer Protobuf producers are incompatible.       |
+    |                                     |          | If the consumers of the topic being produced to are  |
+    |                                     |          | using confluent-kafka-python <1.8 then this property |
+    |                                     |          | must be set to True until all old consumers have     |
+    |                                     |          | have been upgraded.                                  |
+    |                                     |          |                                                      |
+    |                                     |          | Warning: This configuration property will be removed |
+    |                                     |          | in a future version of the client.                   |
+    +-------------------------------------+----------+------------------------------------------------------+
 
-    Schemas are registered to namespaces known as Subjects which define how a
-    schema may evolve over time. By default the subject name is formed by
-    concatenating the topic name with the message field separated by a hyphen.
+    Schemas are registered against subject names in Confluent Schema Registry that
+    define a scope in which the schemas can be evolved. By default, the subject name
+    is formed by concatenating the topic name with the message field (key or value)
+    separated by a hyphen.
 
     i.e. {topic name}-{message field}
 
@@ -192,20 +230,30 @@ class ProtobufSerializer(object):
 
     See Also:
         `Protobuf API reference <https://googleapis.dev/python/protobuf/latest/google/protobuf.html>`_
-
     """  # noqa: E501
-    __slots__ = ['_auto_register', '_registry', '_known_subjects',
-                 '_msg_class', '_msg_index', '_schema', '_schema_id',
-                 '_ref_reference_subject_func', '_subject_name_func']
-    # default configuration
+    __slots__ = ['_auto_register', '_normalize_schemas', '_use_latest_version', '_skip_known_types',
+                 '_registry', '_known_subjects', '_msg_class', '_index_array', '_schema', '_schema_id',
+                 '_ref_reference_subject_func', '_subject_name_func', '_use_deprecated_format']
+
     _default_conf = {
         'auto.register.schemas': True,
+        'normalize.schemas': False,
+        'use.latest.version': False,
+        'skip.known.types': False,
         'subject.name.strategy': topic_subject_name_strategy,
-        'reference.subject.name.strategy': reference_subject_name_strategy
+        'reference.subject.name.strategy': reference_subject_name_strategy,
+        'use.deprecated.format': False,
     }
 
     def __init__(self, msg_type, schema_registry_client, conf=None):
-        # handle configuration
+
+        if conf is None or 'use.deprecated.format' not in conf:
+            raise RuntimeError(
+                "ProtobufSerializer: the 'use.deprecated.format' configuration "
+                "property must be explicitly set due to backward incompatibility "
+                "with older confluent-kafka-python Protobuf producers and consumers. "
+                "See the release notes for more details")
+
         conf_copy = self._default_conf.copy()
         if conf is not None:
             conf_copy.update(conf)
@@ -214,6 +262,33 @@ class ProtobufSerializer(object):
         if not isinstance(self._auto_register, bool):
             raise ValueError("auto.register.schemas must be a boolean value")
 
+        self._normalize_schemas = conf_copy.pop('normalize.schemas')
+        if not isinstance(self._normalize_schemas, bool):
+            raise ValueError("normalize.schemas must be a boolean value")
+
+        self._use_latest_version = conf_copy.pop('use.latest.version')
+        if not isinstance(self._use_latest_version, bool):
+            raise ValueError("use.latest.version must be a boolean value")
+        if self._use_latest_version and self._auto_register:
+            raise ValueError("cannot enable both use.latest.version and auto.register.schemas")
+
+        self._skip_known_types = conf_copy.pop('skip.known.types')
+        if not isinstance(self._skip_known_types, bool):
+            raise ValueError("skip.known.types must be a boolean value")
+
+        self._use_deprecated_format = conf_copy.pop('use.deprecated.format')
+        if not isinstance(self._use_deprecated_format, bool):
+            raise ValueError("use.deprecated.format must be a boolean value")
+        if self._use_deprecated_format:
+            warnings.warn("ProtobufSerializer: the 'use.deprecated.format' "
+                          "configuration property, and the ability to use the "
+                          "old incorrect Protobuf serializer heading format "
+                          "introduced in confluent-kafka-python v1.4.0, "
+                          "will be removed in an upcoming release in 2021 Q2. "
+                          "Please migrate your Python Protobuf producers and "
+                          "consumers to 'use.deprecated.format':False as "
+                          "soon as possible")
+
         self._subject_name_func = conf_copy.pop('subject.name.strategy')
         if not callable(self._subject_name_func):
             raise ValueError("subject.name.strategy must be callable")
@@ -229,30 +304,54 @@ class ProtobufSerializer(object):
 
         self._registry = schema_registry_client
         self._schema_id = None
-        # Avoid calling registry if schema is known to be registered
         self._known_subjects = set()
         self._msg_class = msg_type
 
         descriptor = msg_type.DESCRIPTOR
-        self._msg_index = _create_msg_index(descriptor)
+        self._index_array = _create_index_array(descriptor)
         self._schema = Schema(_schema_to_str(descriptor.file),
                               schema_type='PROTOBUF')
 
     @staticmethod
-    def _encode_uvarints(buf, ints):
+    def _write_varint(buf, val, zigzag=True):
+        """
+        Writes val to buf, either using zigzag or uvarint encoding.
+
+        Args:
+            buf (BytesIO): buffer to write to.
+            val (int): integer to be encoded.
+            zigzag (bool): whether to encode in zigzag or uvarint encoding
+        """
+
+        if zigzag:
+            val = (val << 1) ^ (val >> 63)
+
+        while (val & ~0x7f) != 0:
+            buf.write(_bytes((val & 0x7f) | 0x80))
+            val >>= 7
+        buf.write(_bytes(val))
+
+    @staticmethod
+    def _encode_varints(buf, ints, zigzag=True):
         """
         Encodes each int as a uvarint onto buf
 
         Args:
             buf (BytesIO): buffer to write to.
             ints ([int]): ints to be encoded.
-
+            zigzag (bool): whether to encode in zigzag or uvarint encoding
         """
+
+        assert len(ints) > 0
+        # The root element at the 0 position does not need a length prefix.
+        if ints == [0]:
+            buf.write(_bytes(0x00))
+            return
+
+        ProtobufSerializer._write_varint(buf, len(ints), zigzag=zigzag)
+
         for value in ints:
-            while (value & ~0x7f) != 0:
-                buf.write(_bytes((value & 0x7f) | 0x80))
-                value >>= 7
-            buf.write(_bytes(value))
+            ProtobufSerializer._write_varint(buf, value, zigzag=zigzag)
 
     def _resolve_dependencies(self, ctx, file_desc):
         """
@@ -262,10 +361,12 @@ class ProtobufSerializer(object):
             ctx (SerializationContext): Serialization context.
 
             file_desc (FileDescriptor): file descriptor to traverse.
-
         """
+
         schema_refs = []
         for dep in file_desc.dependencies:
+            if self._skip_known_types and dep.name.startswith("google/protobuf/"):
+                continue
             dep_refs = self._resolve_dependencies(ctx, dep)
             subject = self._ref_reference_subject_func(ctx, dep)
             schema = Schema(_schema_to_str(dep),
@@ -281,93 +382,152 @@ class ProtobufSerializer(object):
                                                reference.version))
         return schema_refs
 
-    def __call__(self, message_type, ctx):
+    def __call__(self, message, ctx):
         """
-        Serializes a Protobuf Message to the Confluent Schema Registry
-        Protobuf binary format.
+        Serializes an instance of a class derived from Protobuf Message, and prepends
+        it with Confluent Schema Registry framing.
 
         Args:
-            message_type (Message): Protobuf message instance.
+            message (Message): An instance of a class derived from Protobuf Message.
 
-            ctx (SerializationContext): Metadata pertaining to the serialization
+            ctx (SerializationContext): Metadata relevant to the serialization.
                 operation.
 
-        Note:
-            None objects are represented as Kafka Null.
-
         Raises:
-            SerializerError if any error occurs serializing obj
+            SerializerError if any error occurs during serialization.
 
         Returns:
-            bytes: Confluent Schema Registry formatted Protobuf bytes
-
+            None if messages is None, else a byte array containing the Protobuf
+            serialized message with Confluent Schema Registry framing.
         """
-        if message_type is None:
+
+        if message is None:
             return None
 
-        if not isinstance(message_type, self._msg_class):
+        if not isinstance(message, self._msg_class):
             raise ValueError("message must be of type {} not {}"
-                             .format(self._msg_class, type(message_type)))
+                             .format(self._msg_class, type(message)))
 
         subject = self._subject_name_func(ctx,
-                                          message_type.DESCRIPTOR.full_name)
+                                          message.DESCRIPTOR.full_name)
 
         if subject not in self._known_subjects:
-            self._schema.references = self._resolve_dependencies(
-                ctx, message_type.DESCRIPTOR.file)
+            if self._use_latest_version:
+                latest_schema = self._registry.get_latest_version(subject)
+                self._schema_id = latest_schema.schema_id
 
-            if self._auto_register:
-                self._schema_id = self._registry.register_schema(subject,
-                                                                 self._schema)
             else:
-                self._schema_id = self._registry.lookup_schema(
-                    subject, self._schema).schema_id
+                self._schema.references = self._resolve_dependencies(
+                    ctx, message.DESCRIPTOR.file)
+
+                if self._auto_register:
+                    self._schema_id = self._registry.register_schema(subject,
+                                                                     self._schema,
+                                                                     self._normalize_schemas)
+                else:
+                    self._schema_id = self._registry.lookup_schema(
+                        subject, self._schema, self._normalize_schemas).schema_id
+
+            self._known_subjects.add(subject)
 
         with _ContextStringIO() as fo:
             # Write the magic byte and schema ID in network byte order
             # (big endian)
             fo.write(struct.pack('>bI', _MAGIC_BYTE, self._schema_id))
-            # write the record index to the buffer
-            self._encode_uvarints(fo, self._msg_index)
-            # write the record itself
-            fo.write(message_type.SerializeToString())
+            # write the index array that specifies the message descriptor
+            # of the serialized data.
+            self._encode_varints(fo, self._index_array,
+                                 zigzag=not self._use_deprecated_format)
+            # write the serialized data itself
+            fo.write(message.SerializeToString())
             return fo.getvalue()
 
 
 class ProtobufDeserializer(object):
     """
-    ProtobufDeserializer decodes bytes written in the Schema Registry
-    Protobuf format to an object.
+    Deserializer for Protobuf serialized data with Confluent Schema Registry framing.
 
     Args:
-        message_type (GeneratedProtocolMessageType): Protobuf Message type.
+        message_type (Message derived type): Protobuf Message type.
+        conf (dict): Configuration dictionary.
+
+    ProtobufDeserializer configuration properties:
+
+    +-------------------------------------+----------+------------------------------------------------------+
+    | Property Name                       | Type     | Description                                          |
+    +-------------------------------------+----------+------------------------------------------------------+
+    | ``use.deprecated.format``           | bool     | Specifies whether the Protobuf deserializer should   |
+    |                                     |          | deserialize message indexes without zig-zag encoding.|
+    |                                     |          | This option must be explicitly configured as older   |
+    |                                     |          | and newer Protobuf producers are incompatible.       |
+    |                                     |          | If Protobuf messages in the topic to consume were    |
+    |                                     |          | produced with confluent-kafka-python <1.8 then this  |
+    |                                     |          | property must be set to True until all old messages  |
+    |                                     |          | have been processed and producers have been upgraded.|
+    |                                     |          | Warning: This configuration property will be removed |
+    |                                     |          | in a future version of the client.                   |
+    +-------------------------------------+----------+------------------------------------------------------+
+
 
     See Also:
     `Protobuf API reference <https://googleapis.dev/python/protobuf/latest/google/protobuf.html>`_
-
     """
-    __slots__ = ['_msg_class', '_msg_index']
 
-    def __init__(self, message_type):
+    __slots__ = ['_msg_class', '_index_array', '_use_deprecated_format']
+
+    _default_conf = {
+        'use.deprecated.format': False,
+    }
+
+    def __init__(self, message_type, conf=None):
+
+        # Require use.deprecated.format to be explicitly configured
+        # during a transitionary period since old/new format are
+        # incompatible.
+        if conf is None or 'use.deprecated.format' not in conf:
+            raise RuntimeError(
+                "ProtobufDeserializer: the 'use.deprecated.format' configuration "
+                "property must be explicitly set due to backward incompatibility "
+                "with older confluent-kafka-python Protobuf producers and consumers. "
+                "See the release notes for more details")
+
+        conf_copy = self._default_conf.copy()
+        if conf is not None:
+            conf_copy.update(conf)
+
+        self._use_deprecated_format = conf_copy.pop('use.deprecated.format')
+        if not isinstance(self._use_deprecated_format, bool):
+            raise ValueError("use.deprecated.format must be a boolean value")
+        if self._use_deprecated_format:
+            warnings.warn("ProtobufDeserializer: the 'use.deprecated.format' "
+                          "configuration property, and the ability to use the "
+                          "old incorrect Protobuf serializer heading format "
+                          "introduced in confluent-kafka-python v1.4.0, "
+                          "will be removed in an upcoming release in 2022 Q2. "
+                          "Please migrate your Python Protobuf producers and "
+                          "consumers to 'use.deprecated.format':False as "
+                          "soon as possible")
+
         descriptor = message_type.DESCRIPTOR
-        self._msg_index = _create_msg_index(descriptor)
+        self._index_array = _create_index_array(descriptor)
         self._msg_class = MessageFactory().GetPrototype(descriptor)
 
     @staticmethod
-    def _decode_uvarint(buf):
+    def _decode_varint(buf, zigzag=True):
         """
-        Decodes a single uvarint from a buffer.
+        Decodes a single varint from a buffer.
 
         Args:
             buf (BytesIO): buffer to read from
+            zigzag (bool): decode as zigzag or uvarint
 
         Returns:
-            int: decoded uvarint
+            int: decoded varint
 
         Raises:
             EOFError: if buffer is empty
-
         """
+
         value = 0
         shift = 0
         try:
@@ -377,7 +537,12 @@ class ProtobufDeserializer(object):
                 value |= (i & 0x7f) << shift
                 shift += 7
                 if not (i & 0x80):
-                    return value
+                    break
+
+            if zigzag:
+                value = (value >> 1) ^ -(value & 1)
+
+            return value
 
         except EOFError:
             raise EOFError("Unexpected EOF while reading index")
@@ -385,75 +550,86 @@ class ProtobufDeserializer(object):
     @staticmethod
     def _read_byte(buf):
         """
-        Returns int representation for a byte.
+        Read one byte from buf as an int.
 
         Args:
-            buf (BytesIO): buffer to read from
+            buf (BytesIO): The buffer to read from.
 
         .. _ord:
             https://docs.python.org/2/library/functions.html#ord
         """
+
         i = buf.read(1)
         if i == b'':
             raise EOFError("Unexpected EOF encountered")
         return ord(i)
 
     @staticmethod
-    def _decode_index(buf):
+    def _read_index_array(buf, zigzag=True):
         """
-        Extracts message index from Schema Registry Protobuf formatted bytes.
+        Read an index array from buf that specifies the message
+        descriptor of interest in the file descriptor.
 
         Args:
-            buf (BytesIO): byte buffer
+            buf (BytesIO): The buffer to read from.
 
         Returns:
-            int: Protobuf Message index.
-
+            list of int: The index array.
         """
-        size = ProtobufDeserializer._decode_uvarint(buf)
-        msg_index = [size]
+
+        size = ProtobufDeserializer._decode_varint(buf, zigzag=zigzag)
+        if size < 0 or size > 100000:
+            raise DecodeError("Invalid Protobuf msgidx array length")
+
+        if size == 0:
+            return [0]
+
+        msg_index = []
         for _ in range(size):
-            msg_index.append(ProtobufDeserializer._decode_uvarint(buf))
+            msg_index.append(ProtobufDeserializer._decode_varint(buf,
+                                                                 zigzag=zigzag))
 
         return msg_index
 
-    def __call__(self, value, ctx):
+    def __call__(self, data, ctx):
         """
-        Deserializes Schema Registry formatted Protobuf to Protobuf Message.
+        Deserialize a serialized protobuf message with Confluent Schema Registry
+        framing.
 
         Args:
-            value (bytes): Confluent Schema Registry formatted Protobuf bytes.
+            data (bytes): Serialized protobuf message with Confluent Schema
+                           Registry framing.
 
-            ctx (SerializationContext): Metadata pertaining to the serialization
+            ctx (SerializationContext): Metadata relevant to the serialization
                 operation.
 
         Returns:
             Message: Protobuf Message instance.
 
         Raises:
-            SerializerError: If response payload and expected Message type
-            differ.
-
+            SerializerError: If there was an error reading the Confluent framing
+                data, or parsing the protobuf serialized message.
         """
-        if value is None:
+
+        if data is None:
             return None
 
         # SR wire protocol + msg_index length
-        if len(value) < 6:
-            raise SerializationError("Message too small. This message was not"
-                                     " produced with a Confluent"
-                                     " Schema Registry serializer")
+        if len(data) < 6:
+            raise SerializationError("Expecting data framing of length 6 bytes or "
+                                     "more but total data size is {} bytes. This "
+                                     "message was not produced with a Confluent "
+                                     "Schema Registry serializer".format(len(data)))
 
-        with _ContextStringIO(value) as payload:
+        with _ContextStringIO(data) as payload:
             magic, schema_id = struct.unpack('>bI', payload.read(5))
             if magic != _MAGIC_BYTE:
-                raise SerializationError("Unknown magic byte. This message was"
-                                         " not produced with a Confluent"
-                                         " Schema Registry serializer")
+                raise SerializationError("Unknown magic byte. This message was "
+                                         "not produced with a Confluent "
+                                         "Schema Registry serializer")
 
             # Protobuf Messages are self-describing; no need to query schema
-            # Move the reader cursor past the index
-            _ = ProtobufDeserializer._decode_index(payload)
+            _ = self._read_index_array(payload, zigzag=not self._use_deprecated_format)
             msg = self._msg_class()
             try:
                 msg.ParseFromString(payload.read())
diff --git a/src/confluent_kafka/schema_registry/schema_registry_client.py b/src/confluent_kafka/schema_registry/schema_registry_client.py
index 2e40131..c414c7e 100644
--- a/src/confluent_kafka/schema_registry/schema_registry_client.py
+++ b/src/confluent_kafka/schema_registry/schema_registry_client.py
@@ -26,6 +26,7 @@ from requests import (Session,
 
 from .error import SchemaRegistryError
 
+
 # TODO: consider adding `six` dependency or employing a compat file
 # Python 2.7 is officially EOL so compatibility issue will be come more the norm.
 # We need a better way to handle these issues.
@@ -57,8 +58,8 @@ class _RestClient(object):
 
     Args:
         conf (dict): Dictionary containing _RestClient configuration
-
     """
+
     def __init__(self, conf):
         self.session = Session()
 
@@ -153,8 +154,8 @@ class _RestClient(object):
 
         Returns:
             dict: Schema Registry response content.
-
         """
+
         headers = {'Accept': "application/vnd.schemaregistry.v1+json,"
                              " application/vnd.schemaregistry+json,"
                              " application/json"}
@@ -188,8 +189,8 @@ class _SchemaCache(object):
 
     This cache may be used to retrieve schema ids, schemas or to check
     known subject membership.
-
     """
+
     def __init__(self):
         self.lock = Lock()
         self.schema_id_index = {}
@@ -209,8 +210,8 @@ class _SchemaCache(object):
 
         Returns:
             int: The schema_id
-
         """
+
         with self.lock:
             self.schema_id_index[schema_id] = schema
             self.schema_index[schema] = schema_id
@@ -226,8 +227,8 @@ class _SchemaCache(object):
 
         Returns:
             Schema: The schema if known; else None
-
         """
+
         return self.schema_id_index.get(schema_id, None)
 
     def get_schema_id_by_subject(self, subject, schema):
@@ -241,8 +242,8 @@ class _SchemaCache(object):
 
         Returns:
             int: Schema ID if known; else None
-
         """
+
         with self.lock:
             if schema in self.subject_schemas[subject]:
                 return self.schema_index.get(schema, None)
@@ -250,9 +251,9 @@ class _SchemaCache(object):
 
 class SchemaRegistryClient(object):
     """
-    Schema Registry Client.
+    A Confluent Schema Registry client.
 
-    SchemaRegistryClient configuration properties (* indicates a required field):
+    Configuration properties (* indicates a required field):
 
     +------------------------------+------+-------------------------------------------------+
     | Property name                | type | Description                                     |
@@ -286,8 +287,8 @@ class SchemaRegistryClient(object):
 
     See Also:
         `Confluent Schema Registry documentation <http://confluent.io/docs/current/schema-registry/docs/intro.html>`_
-
     """  # noqa: E501
+
     def __init__(self, conf):
         self._rest_client = _RestClient(conf)
         self._cache = _SchemaCache()
@@ -299,7 +300,7 @@ class SchemaRegistryClient(object):
         if self._rest_client is not None:
             self._rest_client._close()
 
-    def register_schema(self, subject_name, schema):
+    def register_schema(self, subject_name, schema, normalize_schemas=False):
         """
         Registers a schema under ``subject_name``.
 
@@ -317,8 +318,8 @@ class SchemaRegistryClient(object):
 
         See Also:
             `POST Subject API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#post--subjects-(string-%20subject)-versions>`_
-
         """  # noqa: E501
+
         schema_id = self._cache.get_schema_id_by_subject(subject_name, schema)
         if schema_id is not None:
             return schema_id
@@ -334,7 +335,7 @@ class SchemaRegistryClient(object):
                                      for ref in schema.references]
 
         response = self._rest_client.post(
-            'subjects/{}/versions'.format(_urlencode(subject_name)),
+            'subjects/{}/versions?normalize={}'.format(_urlencode(subject_name), normalize_schemas),
             body=request)
 
         schema_id = response['id']
@@ -359,8 +360,8 @@ class SchemaRegistryClient(object):
 
         See Also:
          `GET Schema API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#get--schemas-ids-int-%20id>`_
-
         """  # noqa: E501
+
         schema = self._cache.get_schema(schema_id)
         if schema is not None:
             return schema
@@ -369,18 +370,16 @@ class SchemaRegistryClient(object):
         schema = Schema(schema_str=response['schema'],
                         schema_type=response.get('schemaType', 'AVRO'))
 
-        refs = []
-        for ref in response.get('references', []):
-            refs.append(SchemaReference(name=ref['name'],
-                                        subject=ref['subject'],
-                                        version=ref['version']))
-        schema.references = refs
+        schema.references = [
+            SchemaReference(name=ref['name'], subject=ref['subject'], version=ref['version'])
+            for ref in response.get('references', [])
+        ]
 
         self._cache.set(schema_id, schema)
 
         return schema
 
-    def lookup_schema(self, subject_name, schema):
+    def lookup_schema(self, subject_name, schema, normalize_schemas=False):
         """
         Returns ``schema`` registration information for ``subject``.
 
@@ -397,8 +396,8 @@ class SchemaRegistryClient(object):
 
         See Also:
             `POST Subject API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#post--subjects-(string-%20subject)-versions>`_
-
         """  # noqa: E501
+
         request = {'schema': schema.schema_str}
 
         # CP 5.5 adds new fields (for JSON and Protobuf).
@@ -409,8 +408,8 @@ class SchemaRegistryClient(object):
                                       'version': ref.version}
                                      for ref in schema.references]
 
-        response = self._rest_client.post('subjects/{}'
-                                          .format(_urlencode(subject_name)),
+        response = self._rest_client.post('subjects/{}?normalize={}'
+                                          .format(_urlencode(subject_name), normalize_schemas),
                                           body=request)
 
         schema_type = response.get('schemaType', 'AVRO')
@@ -418,7 +417,12 @@ class SchemaRegistryClient(object):
         return RegisteredSchema(schema_id=response['id'],
                                 schema=Schema(response['schema'],
                                               schema_type,
-                                              response.get('references', [])),
+                                              [
+                                                  SchemaReference(name=ref['name'],
+                                                                  subject=ref['subject'],
+                                                                  version=ref['version'])
+                                                  for ref in response.get('references', [])
+                                              ]),
                                 subject=response['subject'],
                                 version=response['version'])
 
@@ -434,8 +438,8 @@ class SchemaRegistryClient(object):
 
         See Also:
             `GET subjects API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#get--subjects-(string-%20subject)-versions>`_
-
         """  # noqa: E501
+
         return self._rest_client.get('subjects')
 
     def delete_subject(self, subject_name, permanent=False):
@@ -456,8 +460,8 @@ class SchemaRegistryClient(object):
 
         See Also:
             `DELETE Subject API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#delete--subjects-(string-%20subject)>`_
-
         """  # noqa: E501
+
         list = self._rest_client.delete('subjects/{}'
                                         .format(_urlencode(subject_name)))
 
@@ -482,8 +486,8 @@ class SchemaRegistryClient(object):
 
         See Also:
             `GET Subject Version API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#get--subjects-(string-%20subject)-versions-(versionId-%20version)>`_
-
         """  # noqa: E501
+
         response = self._rest_client.get('subjects/{}/versions/{}'
                                          .format(_urlencode(subject_name),
                                                  'latest'))
@@ -492,7 +496,12 @@ class SchemaRegistryClient(object):
         return RegisteredSchema(schema_id=response['id'],
                                 schema=Schema(response['schema'],
                                               schema_type,
-                                              response.get('references', [])),
+                                              [
+                                                  SchemaReference(name=ref['name'],
+                                                                  subject=ref['subject'],
+                                                                  version=ref['version'])
+                                                  for ref in response.get('references', [])
+                                              ]),
                                 subject=response['subject'],
                                 version=response['version'])
 
@@ -513,8 +522,8 @@ class SchemaRegistryClient(object):
 
         See Also:
             `GET Subject Version API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#get--subjects-(string-%20subject)-versions-(versionId-%20version)>`_
-
         """  # noqa: E501
+
         response = self._rest_client.get('subjects/{}/versions/{}'
                                          .format(_urlencode(subject_name),
                                                  version))
@@ -523,7 +532,12 @@ class SchemaRegistryClient(object):
         return RegisteredSchema(schema_id=response['id'],
                                 schema=Schema(response['schema'],
                                               schema_type,
-                                              response.get('references', [])),
+                                              [
+                                                  SchemaReference(name=ref['name'],
+                                                                  subject=ref['subject'],
+                                                                  version=ref['version'])
+                                                  for ref in response.get('references', [])
+                                              ]),
                                 subject=response['subject'],
                                 version=response['version'])
 
@@ -542,8 +556,8 @@ class SchemaRegistryClient(object):
 
         See Also:
             `GET Subject Versions API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#post--subjects-(string-%20subject)-versions>`_
-
         """  # noqa: E501
+
         return self._rest_client.get('subjects/{}/versions'.format(_urlencode(subject_name)))
 
     def delete_version(self, subject_name, version):
@@ -563,8 +577,8 @@ class SchemaRegistryClient(object):
 
         See Also:
             `Delete Subject Version API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#delete--subjects-(string-%20subject)-versions-(versionId-%20version)>`_
-
         """  # noqa: E501
+
         response = self._rest_client.delete('subjects/{}/versions/{}'.
                                             format(_urlencode(subject_name),
                                                    version))
@@ -589,8 +603,8 @@ class SchemaRegistryClient(object):
 
         See Also:
             `PUT Subject Compatibility API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#put--config-(string-%20subject)>`_
-
         """  # noqa: E501
+
         if level is None:
             raise ValueError("level must be set")
 
@@ -618,8 +632,8 @@ class SchemaRegistryClient(object):
 
         See Also:
             `GET Subject Compatibility API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#get--config-(string-%20subject)>`_
-
         """  # noqa: E501
+
         if subject_name is not None:
             url = 'config/{}'.format(_urlencode(subject_name))
         else:
@@ -647,6 +661,7 @@ class SchemaRegistryClient(object):
         See Also:
             `POST Test Compatibility API Reference <https://docs.confluent.io/current/schema-registry/develop/api.html#post--compatibility-subjects-(string-%20subject)-versions-(versionId-%20version)>`_
         """  # noqa: E501
+
         request = {"schema": schema.schema_str}
         if schema.schema_type != "AVRO":
             request['schemaType'] = schema.schema_type
@@ -671,12 +686,11 @@ class Schema(object):
     Args:
         schema_str (str): String representation of the schema.
 
-        references ([SchemaReference]): SchemaReferences used in this schema.
-
         schema_type (str): The schema type: AVRO, PROTOBUF or JSON.
 
+        references ([SchemaReference]): SchemaReferences used in this schema.
     """
-    __slots__ = ['schema_str', 'references', 'schema_type', '_hash']
+    __slots__ = ['schema_str', 'schema_type', 'references', '_hash']
 
     def __init__(self, schema_str, schema_type, references=[]):
         super(Schema, self).__init__()
@@ -709,8 +723,8 @@ class RegisteredSchema(object):
         subject (str): Subject this schema is registered under
 
         version (int): Version of this subject this schema is registered to
-
     """
+
     def __init__(self, schema_id, schema, subject, version):
         self.schema_id = schema_id
         self.schema = schema
@@ -732,8 +746,8 @@ class SchemaReference(object):
         subject (str): Subject this Schema is registered with
 
         version (int): This Schema's version
-
     """
+
     def __init__(self, name, subject, version):
         super(SchemaReference, self).__init__()
         self.name = name
diff --git a/src/confluent_kafka/serialization/__init__.py b/src/confluent_kafka/serialization/__init__.py
index 0a5f596..13cfc1d 100644
--- a/src/confluent_kafka/serialization/__init__.py
+++ b/src/confluent_kafka/serialization/__init__.py
@@ -39,8 +39,9 @@ class MessageField(object):
         KEY (str): Message key
 
         VALUE (str): Message value
-
     """
+
+    NONE = 'none'
     KEY = 'key'
     VALUE = 'value'
 
@@ -56,10 +57,13 @@ class SerializationContext(object):
         field (MessageField): Describes what part of the message is
             being serialized.
 
+        headers (list): List of message header tuples. Defaults to None.
     """
-    def __init__(self, topic, field):
+
+    def __init__(self, topic, field, headers=None):
         self.topic = topic
         self.field = field
+        self.headers = headers
 
 
 class SerializationError(KafkaException):
@@ -101,11 +105,11 @@ class Serializer(object):
         * - StringSerializer
           - unicode
           - unicode(encoding)
-
     """
+
     __slots__ = []
 
-    def __call__(self, obj, ctx):
+    def __call__(self, obj, ctx=None):
         """
         Converts obj to bytes.
 
@@ -120,8 +124,8 @@ class Serializer(object):
 
         Returns:
             bytes if obj is not None, otherwise None
-
         """
+
         raise NotImplementedError
 
 
@@ -158,11 +162,11 @@ class Deserializer(object):
         * - StringDeserializer
           - unicode
           - unicode(encoding)
-
     """
+
     __slots__ = []
 
-    def __call__(self, value, ctx):
+    def __call__(self, value, ctx=None):
         """
         Convert bytes to object
 
@@ -177,8 +181,8 @@ class Deserializer(object):
 
         Returns:
             object if data is not None, otherwise None
-
         """
+
         raise NotImplementedError
 
 
@@ -190,7 +194,8 @@ class DoubleSerializer(Serializer):
         `DoubleSerializer Javadoc <https://docs.confluent.io/current/clients/javadocs/org/apache/kafka/common/serialization/DoubleSerializer.html>`_
 
     """  # noqa: E501
-    def __call__(self, obj, ctx):
+
+    def __call__(self, obj, ctx=None):
         """
         Args:
             obj (object): object to be serialized
@@ -206,8 +211,8 @@ class DoubleSerializer(Serializer):
 
         Returns:
             IEEE 764 binary64 bytes if obj is not None, otherwise None
-
         """
+
         if obj is None:
             return None
 
@@ -223,9 +228,9 @@ class DoubleDeserializer(Deserializer):
 
     See Also:
         `DoubleDeserializer Javadoc <https://docs.confluent.io/current/clients/javadocs/org/apache/kafka/common/serialization/DoubleDeserializer.html>`_
-
     """  # noqa: E501
-    def __call__(self, value, ctx):
+
+    def __call__(self, value, ctx=None):
         """
         Deserializes float from IEEE 764 binary64 bytes.
 
@@ -240,8 +245,8 @@ class DoubleDeserializer(Deserializer):
 
         Returns:
             float if data is not None, otherwise None
-
         """
+
         if value is None:
             return None
 
@@ -257,9 +262,9 @@ class IntegerSerializer(Serializer):
 
     See Also:
         `IntegerSerializer Javadoc <https://docs.confluent.io/current/clients/javadocs/org/apache/kafka/common/serialization/IntegerSerializer.html>`_
-
     """  # noqa: E501
-    def __call__(self, obj, ctx):
+
+    def __call__(self, obj, ctx=None):
         """
         Serializes int as int32 bytes.
 
@@ -277,8 +282,8 @@ class IntegerSerializer(Serializer):
 
         Returns:
             int32 bytes if obj is not None, else None
-
         """
+
         if obj is None:
             return None
 
@@ -294,9 +299,9 @@ class IntegerDeserializer(Deserializer):
 
     See Also:
         `IntegerDeserializer Javadoc <https://docs.confluent.io/current/clients/javadocs/org/apache/kafka/common/serialization/IntegerDeserializer.html>`_
-
     """  # noqa: E501
-    def __call__(self, value, ctx):
+
+    def __call__(self, value, ctx=None):
         """
         Deserializes int from int32 bytes.
 
@@ -311,8 +316,8 @@ class IntegerDeserializer(Deserializer):
 
         Returns:
             int if data is not None, otherwise None
-
         """
+
         if value is None:
             return None
 
@@ -336,12 +341,12 @@ class StringSerializer(Serializer):
         `Supported encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_
 
         `StringSerializer Javadoc <https://docs.confluent.io/current/clients/javadocs/org/apache/kafka/common/serialization/StringSerializer.html>`_
-
     """  # noqa: E501
+
     def __init__(self, codec='utf_8'):
         self.codec = codec
 
-    def __call__(self, obj, ctx):
+    def __call__(self, obj, ctx=None):
         """
         Serializes a str(py2:unicode) to bytes.
 
@@ -360,8 +365,8 @@ class StringSerializer(Serializer):
 
         Returns:
             serialized bytes if obj is not None, otherwise None
-
         """
+
         if obj is None:
             return None
 
@@ -382,12 +387,12 @@ class StringDeserializer(Deserializer):
         `Supported encodings <https://docs.python.org/3/library/codecs.html#standard-encodings>`_
 
         `StringDeserializer Javadoc <https://docs.confluent.io/current/clients/javadocs/org/apache/kafka/common/serialization/StringDeserializer.html>`_
-
     """  # noqa: E501
+
     def __init__(self, codec='utf_8'):
         self.codec = codec
 
-    def __call__(self, value, ctx):
+    def __call__(self, value, ctx=None):
         """
         Serializes unicode to bytes per the configured codec. Defaults to ``utf_8``.
 
@@ -408,8 +413,8 @@ class StringDeserializer(Deserializer):
 
         Returns:
             unicode if data is not None, otherwise None
-
         """
+
         if value is None:
             return None
 
diff --git a/src/confluent_kafka/serializing_producer.py b/src/confluent_kafka/serializing_producer.py
index fee253b..3b3ff82 100644
--- a/src/confluent_kafka/serializing_producer.py
+++ b/src/confluent_kafka/serializing_producer.py
@@ -25,43 +25,20 @@ from .error import (KeySerializationError,
 
 class SerializingProducer(_ProducerImpl):
     """
-    A high level Kafka Producer with serialization capabilities.
+    A high level Kafka producer with serialization capabilities.
 
-    Note:
+    `This class is experimental and likely to be removed, or subject to incompatible API
+    changes in future versions of the library. To avoid breaking changes on upgrading, we
+    recommend using serializers directly.`
 
-        The SerializingProducer is an experimental API and subject to change.
+    Derived from the :py:class:`Producer` class, overriding the :py:func:`Producer.produce`
+    method to add serialization capabilities.
 
-    The SerializingProducer is thread safe and sharing a single instance across
-    threads will generally be more efficient than having multiple instances.
-
-    The :py:func:`SerializingProducer.produce` method is asynchronous.
-    When called it adds the message to a queue of pending messages and
-    immediately returns. This allows the Producer to batch together individual
-    messages for efficiency.
-
-    The Producer will automatically retry failed produce requests up to
-    ``message.timeout.ms`` .
-
-    .. versionadded:: 1.4.0
-
-        The Transactional Producer allows an application to send messages to
-        multiple partitions (and topics) atomically.
-
-        The ``key.serializer`` and ``value.serializer`` classes instruct the
-        SerializingProducer on how to convert the message payload to bytes.
-
-    Note:
-
-        All configured callbacks are served from the application queue upon
-        calling :py:func:`SerializingProducer.poll` or :py:func:`SerializingProducer.flush`
-
-    Notable SerializingProducer configuration properties(* indicates required field)
+    Additional configuration properties:
 
     +-------------------------+---------------------+-----------------------------------------------------+
     | Property Name           | Type                | Description                                         |
     +=========================+=====================+=====================================================+
-    | ``bootstrap.servers`` * | str                 | Comma-separated list of brokers.                    |
-    +-------------------------+---------------------+-----------------------------------------------------+
     |                         |                     | Callable(obj, SerializationContext) -> bytes        |
     | ``key.serializer``      | callable            |                                                     |
     |                         |                     | Serializer used for message keys.                   |
@@ -70,36 +47,25 @@ class SerializingProducer(_ProducerImpl):
     | ``value.serializer``    | callable            |                                                     |
     |                         |                     | Serializer used for message values.                 |
     +-------------------------+---------------------+-----------------------------------------------------+
-    |                         |                     | Callable(KafkaError)                                |
-    |                         |                     |                                                     |
-    | ``error_cb``            | callable            | Callback for generic/global error events. These     |
-    |                         |                     | errors are typically to be considered informational |
-    |                         |                     | since the client will automatically try to recover. |
-    +-------------------------+---------------------+-----------------------------------------------------+
-    | ``log_cb``              | ``logging.Handler`` | Logging handler to forward logs                     |
-    +-------------------------+---------------------+-----------------------------------------------------+
-    |                         |                     | Callable(str)                                       |
-    |                         |                     |                                                     |
-    |                         |                     | Callback for statistics. This callback is           |
-    | ``stats_cb``            | callable            | added to the application queue every                |
-    |                         |                     | ``statistics.interval.ms`` (configured separately). |
-    |                         |                     | The function argument is a JSON formatted str       |
-    |                         |                     | containing statistics data.                         |
-    +-------------------------+---------------------+-----------------------------------------------------+
-    |                         |                     | Callable(ThrottleEvent)                             |
-    | ``throttle_cb``         | callable            |                                                     |
-    |                         |                     | Callback for throttled request reporting.           |
-    |                         |                     | Callback for throttled request reporting.           |
-    +-------------------------+---------------------+-----------------------------------------------------+
+
+    Serializers for string, integer and double (:py:class:`StringSerializer`, :py:class:`IntegerSerializer`
+    and :py:class:`DoubleSerializer`) are supplied out-of-the-box in the ``confluent_kafka.serialization``
+    namespace.
+
+    Serializers for Protobuf, JSON Schema and Avro (:py:class:`ProtobufSerializer`, :py:class:`JSONSerializer`
+    and :py:class:`AvroSerializer`) with Confluent Schema Registry integration are supplied out-of-the-box
+    in the ``confluent_kafka.schema_registry`` namespace.
 
     See Also:
-        - `CONFIGURATION.md <https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md>`_ for additional configuration property details.
-        - `STATISTICS.md <https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md>`_ for detailed information about the statistics handled by stats_cb
+        - The :ref:`Configuration Guide <pythonclient_configuration>` for in depth information on how to configure the client.
+        - `CONFIGURATION.md <https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md>`_ for a comprehensive set of configuration properties.
+        - `STATISTICS.md <https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md>`_ for detailed information on the statistics provided by stats_cb
+        - The :py:class:`Producer` class for inherited methods.
 
     Args:
         conf (producer): SerializingProducer configuration.
-
     """  # noqa E501
+
     def __init__(self, conf):
         conf_copy = conf.copy()
 
@@ -111,56 +77,56 @@ class SerializingProducer(_ProducerImpl):
     def produce(self, topic, key=None, value=None, partition=-1,
                 on_delivery=None, timestamp=0, headers=None):
         """
-        Produce message to topic.
+        Produce a message.
 
-        This is an asynchronous operation, an application may use the
+        This is an asynchronous operation. An application may use the
         ``on_delivery`` argument to pass a function (or lambda) that will be
         called from :py:func:`SerializingProducer.poll` when the message has
         been successfully delivered or permanently fails delivery.
 
-        Currently message headers are not supported on the message returned to
-        the callback. The ``msg.headers()`` will return None even if the
-        original message had headers set.
+        Note:
+            Currently message headers are not supported on the message returned to
+            the callback. The ``msg.headers()`` will return None even if the
+            original message had headers set.
 
         Args:
             topic (str): Topic to produce message to.
 
-            key (object, optional): Message key.
+            key (object, optional): Message payload key.
 
-            value (object, optional): Message payload.
+            value (object, optional): Message payload value.
 
-            partition (int, optional): Partition to produce to, else uses the
-                configured built-in partitioner.
+            partition (int, optional): Partition to produce to, else the
+                configured built-in partitioner will be used.
 
             on_delivery (callable(KafkaError, Message), optional): Delivery
-                report callback to call (from
+                report callback. Called as a side effect of
                 :py:func:`SerializingProducer.poll` or
                 :py:func:`SerializingProducer.flush` on successful or
                 failed delivery.
 
-            timestamp (float, optional): Message timestamp (CreateTime) in ms
-                since epoch UTC (requires broker >= 0.10.0.0). Default value
-                is current time.
+            timestamp (float, optional): Message timestamp (CreateTime) in
+                milliseconds since Unix epoch UTC (requires broker >= 0.10.0.0).
+                Default value is current time.
 
-            headers (dict, optional): Message headers to set on the message.
-                The header key must be a str while the value must be binary,
-                unicode or None. (Requires broker version >= 0.11.0.0)
+            headers (dict, optional): Message headers. The header key must be
+                a str while the value must be binary, unicode or None. (Requires
+                broker version >= 0.11.0.0)
 
         Raises:
             BufferError: if the internal producer message queue is full.
-                ( ``queue.buffering.max.messages`` exceeded). If this happens
+                (``queue.buffering.max.messages`` exceeded). If this happens
                 the application should call :py:func:`SerializingProducer.Poll`
                 and try again.
 
             KeySerializationError: If an error occurs during key serialization.
 
-            ValueSerializationError: If an error occurs during value
-            serialization.
+            ValueSerializationError: If an error occurs during value serialization.
 
             KafkaException: For all other errors
-
         """
-        ctx = SerializationContext(topic, MessageField.KEY)
+
+        ctx = SerializationContext(topic, MessageField.KEY, headers)
         if self._key_serializer is not None:
             try:
                 key = self._key_serializer(key, ctx)
diff --git a/src/confluent_kafka/src/Admin.c b/src/confluent_kafka/src/Admin.c
index ee0d68b..25a0e1e 100644
--- a/src/confluent_kafka/src/Admin.c
+++ b/src/confluent_kafka/src/Admin.c
@@ -69,23 +69,34 @@ static int Admin_traverse (Handle *self,
  */
 #define Admin_options_def_int   (-12345)
 #define Admin_options_def_float ((float)Admin_options_def_int)
+#define Admin_options_def_ptr   (NULL)
+#define Admin_options_def_cnt   (0)
 
 struct Admin_options {
-        int   validate_only;      /* needs special bool parsing */
-        float request_timeout;    /* parser: f */
-        float operation_timeout;  /* parser: f */
-        int   broker;             /* parser: i */
+        int   validate_only;                            /* needs special bool parsing */
+        float request_timeout;                          /* parser: f */
+        float operation_timeout;                        /* parser: f */
+        int   broker;                                   /* parser: i */
+        int require_stable_offsets;                     /* needs special bool parsing */
+        rd_kafka_consumer_group_state_t* states;
+        int states_cnt;
 };
 
 /**@brief "unset" value initializers for Admin_options
  * Make sure this is kept up to date with Admin_options above. */
-#define Admin_options_INITIALIZER {                                     \
-                Admin_options_def_int, Admin_options_def_float,         \
-                        Admin_options_def_float, Admin_options_def_int, \
-                        }
+#define Admin_options_INITIALIZER {              \
+                Admin_options_def_int,           \
+                Admin_options_def_float,         \
+                Admin_options_def_float,         \
+                Admin_options_def_int,           \
+                Admin_options_def_int,           \
+                Admin_options_def_ptr,           \
+                Admin_options_def_cnt,           \
+        }
 
 #define Admin_options_is_set_int(v) ((v) != Admin_options_def_int)
 #define Admin_options_is_set_float(v) Admin_options_is_set_int((int)(v))
+#define Admin_options_is_set_ptr(v) ((v) != NULL)
 
 
 /**
@@ -104,6 +115,7 @@ Admin_options_to_c (Handle *self, rd_kafka_admin_op_t for_api,
                     PyObject *future) {
         rd_kafka_AdminOptions_t *c_options;
         rd_kafka_resp_err_t err;
+        rd_kafka_error_t *err_obj = NULL;
         char errstr[512];
 
         c_options = rd_kafka_AdminOptions_new(self->rk, for_api);
@@ -141,16 +153,89 @@ Admin_options_to_c (Handle *self, rd_kafka_admin_op_t for_api,
                     errstr, sizeof(errstr))))
                 goto err;
 
+        if (Admin_options_is_set_int(options->require_stable_offsets) &&
+            (err_obj = rd_kafka_AdminOptions_set_require_stable_offsets(
+                    c_options, options->require_stable_offsets))) {
+                strcpy(errstr, rd_kafka_error_string(err_obj));
+                goto err;
+        }
+
+        if (Admin_options_is_set_ptr(options->states) &&
+            (err_obj = rd_kafka_AdminOptions_set_match_consumer_group_states(
+                c_options, options->states, options->states_cnt))) {
+                strcpy(errstr, rd_kafka_error_string(err_obj));
+                goto err;
+        }
+
         return c_options;
 
  err:
-        rd_kafka_AdminOptions_destroy(c_options);
+        if (c_options) rd_kafka_AdminOptions_destroy(c_options);
         PyErr_Format(PyExc_ValueError, "%s", errstr);
+        if(err_obj) {
+                rd_kafka_error_destroy(err_obj);
+        }
         return NULL;
 }
 
 
+/**
+ * @brief Convert py AclBinding to C
+ */
+static rd_kafka_AclBinding_t *
+Admin_py_to_c_AclBinding (const PyObject *py_obj_arg,
+                        char *errstr,
+                        size_t errstr_size) {
+        int restype, resource_pattern_type, operation, permission_type;
+        char *resname = NULL, *principal = NULL, *host = NULL;
+        rd_kafka_AclBinding_t *ret = NULL;
+
+        PyObject *py_obj = (PyObject *) py_obj_arg;
+        if(cfl_PyObject_GetInt(py_obj, "restype_int", &restype, 0, 1)
+            && cfl_PyObject_GetString(py_obj, "name", &resname, NULL, 1, 0)
+            && cfl_PyObject_GetInt(py_obj, "resource_pattern_type_int", &resource_pattern_type, 0, 1)
+            && cfl_PyObject_GetString(py_obj, "principal", &principal, NULL, 1, 0)
+            && cfl_PyObject_GetString(py_obj, "host", &host, NULL, 1, 0)
+            && cfl_PyObject_GetInt(py_obj, "operation_int", &operation, 0, 1)
+            && cfl_PyObject_GetInt(py_obj, "permission_type_int", &permission_type, 0, 1)) {
+                    ret = rd_kafka_AclBinding_new(restype, resname, \
+                        resource_pattern_type, principal, host, \
+                        operation, permission_type, errstr, errstr_size);
+        }
+        if (resname) free(resname);
+        if (principal) free(principal);
+        if (host) free(host);
+        return ret;
+}
 
+/**
+ * @brief Convert py AclBindingFilter to C
+ */
+static rd_kafka_AclBindingFilter_t*
+Admin_py_to_c_AclBindingFilter (const PyObject *py_obj_arg,
+                        char *errstr,
+                        size_t errstr_size) {
+        int restype, resource_pattern_type, operation, permission_type;
+        char *resname = NULL, *principal = NULL, *host = NULL;
+        PyObject *py_obj = (PyObject *) py_obj_arg;
+        rd_kafka_AclBindingFilter_t* ret = NULL;
+
+        if(cfl_PyObject_GetInt(py_obj, "restype_int", &restype, 0, 1)
+            && cfl_PyObject_GetString(py_obj, "name", &resname, NULL, 1, 1)
+            && cfl_PyObject_GetInt(py_obj, "resource_pattern_type_int", &resource_pattern_type, 0, 1)
+            && cfl_PyObject_GetString(py_obj, "principal", &principal, NULL, 1, 1)
+            && cfl_PyObject_GetString(py_obj, "host", &host, NULL, 1, 1)
+            && cfl_PyObject_GetInt(py_obj, "operation_int", &operation, 0, 1)
+            && cfl_PyObject_GetInt(py_obj, "permission_type_int", &permission_type, 0, 1)) {
+                    ret = rd_kafka_AclBindingFilter_new(restype, resname, \
+                        resource_pattern_type, principal, host, \
+                        operation, permission_type, errstr, errstr_size);
+        }
+        if (resname) free(resname);
+        if (principal) free(principal);
+        if (host) free(host);
+        return ret;
+}
 
 /**
  * @brief Translate Python list(list(int)) replica assignments and set
@@ -333,6 +418,7 @@ static PyObject *Admin_create_topics (Handle *self, PyObject *args,
         rd_kafka_AdminOptions_t *c_options = NULL;
         int tcnt;
         int i;
+        int topic_partition_count;
         rd_kafka_NewTopic_t **c_objs;
         rd_kafka_queue_t *rkqu;
         CallState cs;
@@ -407,10 +493,16 @@ static PyObject *Admin_create_topics (Handle *self, PyObject *args,
                                 goto err;
                         }
 
+                        if (newt->num_partitions == -1) {
+                                topic_partition_count = PyList_Size(newt->replica_assignment);
+                        } else {
+                                topic_partition_count = newt->num_partitions;
+                        }
                         if (!Admin_set_replica_assignment(
                                     "CreateTopics", (void *)c_objs[i],
                                     newt->replica_assignment,
-                                    newt->num_partitions, newt->num_partitions,
+                                    topic_partition_count,
+                                    topic_partition_count, 
                                     "num_partitions")) {
                                 i++;
                                 goto err;
@@ -770,7 +862,7 @@ static PyObject *Admin_describe_configs (Handle *self, PyObject *args,
                 if (!cfl_PyObject_GetInt(res, "restype_int", &restype, 0, 0))
                         goto err;
 
-                if (!cfl_PyObject_GetString(res, "name", &resname, NULL, 0))
+                if (!cfl_PyObject_GetString(res, "name", &resname, NULL, 0, 0))
                         goto err;
 
                 c_objs[i] = rd_kafka_ConfigResource_new(
@@ -912,7 +1004,7 @@ static PyObject *Admin_alter_configs (Handle *self, PyObject *args,
                 if (!cfl_PyObject_GetInt(res, "restype_int", &restype, 0, 0))
                         goto err;
 
-                if (!cfl_PyObject_GetString(res, "name", &resname, NULL, 0))
+                if (!cfl_PyObject_GetString(res, "name", &resname, NULL, 0, 0))
                         goto err;
 
                 c_objs[i] = rd_kafka_ConfigResource_new(
@@ -930,7 +1022,7 @@ static PyObject *Admin_alter_configs (Handle *self, PyObject *args,
                  * Translate and apply config entries in the various dicts.
                  */
                 if (!cfl_PyObject_GetAttr(res, "set_config_dict", &dict,
-                                          &PyDict_Type, 1)) {
+                                          &PyDict_Type, 1, 0)) {
                         i++;
                         goto err;
                 }
@@ -977,184 +1069,1177 @@ static PyObject *Admin_alter_configs (Handle *self, PyObject *args,
 }
 
 
-
 /**
- * @brief Call rd_kafka_poll() and keep track of crashing callbacks.
- * @returns -1 if callback crashed (or poll() failed), else the number
- * of events served.
+ * @brief create_acls
  */
-static int Admin_poll0 (Handle *self, int tmout) {
-        int r;
+static PyObject *Admin_create_acls (Handle *self, PyObject *args, PyObject *kwargs) {
+        PyObject *acls_list, *future;
+        int cnt, i = 0;
+        struct Admin_options options = Admin_options_INITIALIZER;
+        PyObject *AclBinding_type = NULL;
+        rd_kafka_AdminOptions_t *c_options = NULL;
+        rd_kafka_AclBinding_t **c_objs = NULL;
         CallState cs;
+        rd_kafka_queue_t *rkqu;
+        char errstr[512];
 
-        CallState_begin(self, &cs);
+        static char *kws[] = {"acls",
+                             "future",
+                             /* options */
+                             "request_timeout",
+                             NULL};
 
-        r = rd_kafka_poll(self->rk, tmout);
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|f", kws,
+                                         &acls_list,
+                                         &future,
+                                         &options.request_timeout))
+                goto err;
 
-        if (!CallState_end(self, &cs)) {
-                return -1;
+        if (!PyList_Check(acls_list) ||
+            (cnt = (int)PyList_Size(acls_list)) < 1) {
+                PyErr_SetString(PyExc_ValueError,
+                        "Expected non-empty list of AclBinding "
+                        "objects");
+                goto err;
         }
 
-        return r;
-}
-
 
-static PyObject *Admin_poll (Handle *self, PyObject *args,
-                             PyObject *kwargs) {
-        double tmout;
-        int r;
-        static char *kws[] = { "timeout", NULL };
+        /* Look up the AclBinding class so we can check if the provided
+         * topics are of correct type.
+         * Since this is not in the fast path we treat ourselves
+         * to the luxury of looking up this for each call. */
+        AclBinding_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                                  "AclBinding");
+        if (!AclBinding_type) {
+                goto err;
+        }
 
-        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "d", kws, &tmout))
-                return NULL;
+        c_options = Admin_options_to_c(self, RD_KAFKA_ADMIN_OP_CREATEACLS,
+                                       &options, future);
+        if (!c_options)
+                goto err; /* Exception raised by options_to_c() */
 
-        r = Admin_poll0(self, (int)(tmout * 1000));
-        if (r == -1)
-                return NULL;
+        /* options_to_c() sets future as the opaque, which is used in the
+         * background_event_cb to set the results on the future as the
+         * admin operation is finished, so we need to keep our own refcount. */
+        Py_INCREF(future);
 
-        return cfl_PyInt_FromInt(r);
-}
+        /*
+         * Parse the list of AclBinding and convert to
+         * corresponding C types.
+         */
+        c_objs = malloc(sizeof(*c_objs) * cnt);
 
+        for (i = 0 ; i < cnt ; i++) {
+                int r;
+                PyObject *res = PyList_GET_ITEM(acls_list, i);
 
+                r = PyObject_IsInstance(res, AclBinding_type);
+                if (r == -1)
+                        goto err; /* Exception raised by IsInstance() */
+                else if (r == 0) {
+                        PyErr_SetString(PyExc_ValueError,
+                                        "Expected list of "
+                                        "AclBinding objects");
+                        goto err;
+                }
 
-static PyMethodDef Admin_methods[] = {
-        { "create_topics", (PyCFunction)Admin_create_topics,
-          METH_VARARGS|METH_KEYWORDS,
-          ".. py:function:: create_topics(topics, future, [validate_only, request_timeout, operation_timeout])\n"
-          "\n"
-          "  Create new topics.\n"
-          "\n"
-          "  This method should not be used directly, use confluent_kafka.AdminClient.create_topics()\n"
-        },
 
-        { "delete_topics", (PyCFunction)Admin_delete_topics,
-          METH_VARARGS|METH_KEYWORDS,
-          ".. py:function:: delete_topics(topics, future, [request_timeout, operation_timeout])\n"
-          "\n"
-          "  This method should not be used directly, use confluent_kafka.AdminClient.delete_topics()\n"
-        },
+                c_objs[i] = Admin_py_to_c_AclBinding(res, errstr, sizeof(errstr));
+                if (!c_objs[i]) {
+                        PyErr_SetString(PyExc_ValueError, errstr);
+                        goto err;
+                }
+        }
 
-        { "create_partitions", (PyCFunction)Admin_create_partitions,
-          METH_VARARGS|METH_KEYWORDS,
-          ".. py:function:: create_partitions(topics, future, [validate_only, request_timeout, operation_timeout])\n"
-          "\n"
-          "  This method should not be used directly, use confluent_kafka.AdminClient.create_partitions()\n"
-        },
+        /* Use librdkafka's background thread queue to automatically dispatch
+        * Admin_background_event_cb() when the admin operation is finished. */
+        rkqu = rd_kafka_queue_get_background(self->rk);
 
-        { "describe_configs", (PyCFunction)Admin_describe_configs,
-          METH_VARARGS|METH_KEYWORDS,
-          ".. py:function:: describe_configs(resources, future, [request_timeout, broker])\n"
-          "\n"
-          "  This method should not be used directly, use confluent_kafka.AdminClient.describe_configs()\n"
-        },
+        /*
+         * Call CreateAcls
+         *
+         * We need to set up a CallState and release GIL here since
+         * the event_cb may be triggered immediately.
+         */
+        CallState_begin(self, &cs);
+        rd_kafka_CreateAcls(self->rk, c_objs, cnt, c_options, rkqu);
+        CallState_end(self, &cs);
 
-        { "alter_configs", (PyCFunction)Admin_alter_configs,
-          METH_VARARGS|METH_KEYWORDS,
-          ".. py:function:: alter_configs(resources, future, [request_timeout, broker])\n"
-          "\n"
-          "  This method should not be used directly, use confluent_kafka.AdminClient.alter_configs()\n"
-        },
+        rd_kafka_queue_destroy(rkqu); /* drop reference from get_background */
+        rd_kafka_AclBinding_destroy_array(c_objs, cnt);
+        free(c_objs);
+        Py_DECREF(AclBinding_type); /* from lookup() */
+        rd_kafka_AdminOptions_destroy(c_options);
 
+        Py_RETURN_NONE;
+err:
+        if (c_objs) {
+                rd_kafka_AclBinding_destroy_array(c_objs, i);
+                free(c_objs);
+        }
+        if (AclBinding_type) Py_DECREF(AclBinding_type);
+        if (c_options) {
+                rd_kafka_AdminOptions_destroy(c_options);
+                Py_DECREF(future);
+        }
+        return NULL;
+}
 
-        { "poll", (PyCFunction)Admin_poll, METH_VARARGS|METH_KEYWORDS,
-          ".. py:function:: poll([timeout])\n"
-          "\n"
-          "  Polls the Admin client for event callbacks, such as error_cb, "
-          "stats_cb, etc, if registered.\n"
-          "\n"
-          "  There is no need to call poll() if no callbacks have been registered.\n"
-          "\n"
-          "  :param float timeout: Maximum time to block waiting for events. (Seconds)\n"
-          "  :returns: Number of events processed (callbacks served)\n"
-          "  :rtype: int\n"
-          "\n"
-        },
 
-        { "list_topics", (PyCFunction)list_topics, METH_VARARGS|METH_KEYWORDS,
-          list_topics_doc
-        },
+static const char Admin_create_acls_doc[] = PyDoc_STR(
+        ".. py:function:: create_acls(acl_bindings, future, [request_timeout])\n"
+        "\n"
+        "  Create a list of ACL bindings.\n"
+        "\n"
+        "  This method should not be used directly, use confluent_kafka.AdminClient.create_acls()\n"
+);
 
-        { "list_groups", (PyCFunction)list_groups, METH_VARARGS|METH_KEYWORDS,
-          list_groups_doc
-        },
 
-        { NULL }
-};
+/**
+ * @brief describe_acls
+ */
+static PyObject *Admin_describe_acls (Handle *self, PyObject *args, PyObject *kwargs) {
+        PyObject *acl_binding_filter, *future;
+        int r;
+        struct Admin_options options = Admin_options_INITIALIZER;
+        PyObject *AclBindingFilter_type = NULL;
+        rd_kafka_AdminOptions_t *c_options = NULL;
+        rd_kafka_AclBindingFilter_t *c_obj = NULL;
+        CallState cs;
+        rd_kafka_queue_t *rkqu;
+        char errstr[512];
 
+        static char *kws[] = {"acl_binding_filter",
+                             "future",
+                             /* options */
+                             "request_timeout",
+                             NULL};
 
-static Py_ssize_t Admin__len__ (Handle *self) {
-        return rd_kafka_outq_len(self->rk);
-}
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|f", kws,
+                                         &acl_binding_filter,
+                                         &future,
+                                         &options.request_timeout))
+                goto err;
 
 
-static PySequenceMethods Admin_seq_methods = {
-        (lenfunc)Admin__len__ /* sq_length */
-};
+        /* Look up the AclBindingFilter class so we can check if the provided
+         * topics are of correct type.
+         * Since this is not in the fast path we treat ourselves
+         * to the luxury of looking up this for each call. */
+        AclBindingFilter_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                                  "AclBindingFilter");
+        if (!AclBindingFilter_type) {
+                goto err;
+        }
 
+        c_options = Admin_options_to_c(self, RD_KAFKA_ADMIN_OP_CREATEACLS,
+                                       &options, future);
+        if (!c_options)
+                goto err; /* Exception raised by options_to_c() */
 
-/**
- * @brief Convert C topic_result_t array to topic-indexed dict.
- */
-static PyObject *
-Admin_c_topic_result_to_py (const rd_kafka_topic_result_t **c_result,
-                            size_t cnt) {
-        PyObject *result;
-        size_t ti;
+        /* options_to_c() sets future as the opaque, which is used in the
+         * background_event_cb to set the results on the future as the
+         * admin operation is finished, so we need to keep our own refcount. */
+        Py_INCREF(future);
 
-        result = PyDict_New();
+        /*
+         * convert the AclBindingFilter to the
+         * corresponding C type.
+         */
+        r = PyObject_IsInstance(acl_binding_filter, AclBindingFilter_type);
+        if (r == -1)
+                goto err; /* Exception raised by IsInstance() */
+        else if (r == 0) {
+                PyErr_SetString(PyExc_TypeError,
+                                "Expected an "
+                                "AclBindingFilter object");
+                goto err;
+        }
 
-        for (ti = 0 ; ti < cnt ; ti++) {
-                PyObject *error;
+        c_obj = Admin_py_to_c_AclBindingFilter(acl_binding_filter, errstr, sizeof(errstr));
+        if (!c_obj) {
+                PyErr_SetString(PyExc_ValueError, errstr);
+                goto err;
+        }
 
-                error = KafkaError_new_or_None(
-                        rd_kafka_topic_result_error(c_result[ti]),
-                        rd_kafka_topic_result_error_string(c_result[ti]));
+        /* Use librdkafka's background thread queue to automatically dispatch
+        * Admin_background_event_cb() when the admin operation is finished. */
+        rkqu = rd_kafka_queue_get_background(self->rk);
 
-                PyDict_SetItemString(
-                        result,
-                        rd_kafka_topic_result_name(c_result[ti]),
-                        error);
+        /*
+         * Call DeleteAcls
+         *
+         * We need to set up a CallState and release GIL here since
+         * the event_cb may be triggered immediately.
+         */
+        CallState_begin(self, &cs);
+        rd_kafka_DescribeAcls(self->rk, c_obj, c_options, rkqu);
+        CallState_end(self, &cs);
 
-                Py_DECREF(error);
+        rd_kafka_queue_destroy(rkqu); /* drop reference from get_background */
+        rd_kafka_AclBinding_destroy(c_obj);
+        Py_DECREF(AclBindingFilter_type); /* from lookup() */
+        rd_kafka_AdminOptions_destroy(c_options);
+        Py_RETURN_NONE;
+err:
+        if(AclBindingFilter_type) Py_DECREF(AclBindingFilter_type);
+        if(c_options) {
+                rd_kafka_AdminOptions_destroy(c_options);
+                Py_DECREF(future);
         }
-
-        return result;
+        return NULL;
 }
 
 
+static const char Admin_describe_acls_doc[] = PyDoc_STR(
+        ".. py:function:: describe_acls(acl_binding_filter, future, [request_timeout])\n"
+        "\n"
+        "  Get a list of ACL bindings matching an ACL binding filter.\n"
+        "\n"
+        "  This method should not be used directly, use confluent_kafka.AdminClient.describe_acls()\n"
+);
 
 /**
- * @brief Convert C ConfigEntry array to dict of py ConfigEntry objects.
+ * @brief delete_acls
  */
-static PyObject *
-Admin_c_ConfigEntries_to_py (PyObject *ConfigEntry_type,
-                             const rd_kafka_ConfigEntry_t **c_configs,
-                             size_t config_cnt) {
-        PyObject *dict;
-        size_t ci;
+static PyObject *Admin_delete_acls (Handle *self, PyObject *args, PyObject *kwargs) {
+        PyObject *acls_list, *future;
+        int cnt, i = 0;
+        struct Admin_options options = Admin_options_INITIALIZER;
+        PyObject *AclBindingFilter_type = NULL;
+        rd_kafka_AdminOptions_t *c_options = NULL;
+        rd_kafka_AclBindingFilter_t **c_objs = NULL;
+        CallState cs;
+        rd_kafka_queue_t *rkqu;
+        char errstr[512];
 
-        dict = PyDict_New();
+        static char *kws[] = {"acls",
+                             "future",
+                             /* options */
+                             "request_timeout",
+                             NULL};
 
-        for (ci = 0 ; ci < config_cnt ; ci++) {
-                PyObject *kwargs, *args;
-                const rd_kafka_ConfigEntry_t *ent = c_configs[ci];
-                const rd_kafka_ConfigEntry_t **c_synonyms;
-                PyObject *entry, *synonyms;
-                size_t synonym_cnt;
-                const char *val;
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|f", kws,
+                                         &acls_list,
+                                         &future,
+                                         &options.request_timeout))
+                goto err;
 
-                kwargs = PyDict_New();
+        if (!PyList_Check(acls_list) ||
+            (cnt = (int)PyList_Size(acls_list)) < 1) {
+                PyErr_SetString(PyExc_ValueError,
+                        "Expected non-empty list of AclBindingFilter "
+                        "objects");
+                goto err;
+        }
 
-                cfl_PyDict_SetString(kwargs, "name",
-                                     rd_kafka_ConfigEntry_name(ent));
-                val = rd_kafka_ConfigEntry_value(ent);
-                if (val)
-                        cfl_PyDict_SetString(kwargs, "value", val);
-                else
-                        PyDict_SetItemString(kwargs, "value", Py_None);
-                cfl_PyDict_SetInt(kwargs, "source",
-                                  (int)rd_kafka_ConfigEntry_source(ent));
-                cfl_PyDict_SetInt(kwargs, "is_read_only",
+
+        /* Look up the AclBindingFilter class so we can check if the provided
+         * topics are of correct type.
+         * Since this is not in the fast path we treat ourselves
+         * to the luxury of looking up this for each call. */
+        AclBindingFilter_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                                  "AclBindingFilter");
+        if (!AclBindingFilter_type) {
+                goto err;
+        }
+
+        c_options = Admin_options_to_c(self, RD_KAFKA_ADMIN_OP_DELETEACLS,
+                                       &options, future);
+        if (!c_options)
+                goto err; /* Exception raised by options_to_c() */
+
+        /* options_to_c() sets future as the opaque, which is used in the
+         * background_event_cb to set the results on the future as the
+         * admin operation is finished, so we need to keep our own refcount. */
+        Py_INCREF(future);
+
+        /*
+         * Parse the list of AclBindingFilter and convert to
+         * corresponding C types.
+         */
+        c_objs = malloc(sizeof(*c_objs) * cnt);
+
+        for (i = 0 ; i < cnt ; i++) {
+                int r;
+                PyObject *res = PyList_GET_ITEM(acls_list, i);
+
+                r = PyObject_IsInstance(res, AclBindingFilter_type);
+                if (r == -1)
+                        goto err; /* Exception raised by IsInstance() */
+                else if (r == 0) {
+                        PyErr_SetString(PyExc_ValueError,
+                                        "Expected list of "
+                                        "AclBindingFilter objects");
+                        goto err;
+                }
+
+
+                c_objs[i] = Admin_py_to_c_AclBindingFilter(res, errstr, sizeof(errstr));
+                if (!c_objs[i]) {
+                        PyErr_SetString(PyExc_ValueError, errstr);
+                        goto err;
+                }
+        }
+
+        /* Use librdkafka's background thread queue to automatically dispatch
+        * Admin_background_event_cb() when the admin operation is finished. */
+        rkqu = rd_kafka_queue_get_background(self->rk);
+
+        /*
+         * Call DeleteAcls
+         *
+         * We need to set up a CallState and release GIL here since
+         * the event_cb may be triggered immediately.
+         */
+        CallState_begin(self, &cs);
+        rd_kafka_DeleteAcls(self->rk, c_objs, cnt, c_options, rkqu);
+        CallState_end(self, &cs);
+
+        rd_kafka_queue_destroy(rkqu); /* drop reference from get_background */
+        rd_kafka_AclBinding_destroy_array(c_objs, cnt);
+        free(c_objs);
+        Py_DECREF(AclBindingFilter_type); /* from lookup() */
+        rd_kafka_AdminOptions_destroy(c_options);
+
+        Py_RETURN_NONE;
+err:
+        if (c_objs) {
+                rd_kafka_AclBinding_destroy_array(c_objs, i);
+                free(c_objs);
+        }
+        if(AclBindingFilter_type) Py_DECREF(AclBindingFilter_type);
+        if (c_options) {
+                rd_kafka_AdminOptions_destroy(c_options);
+                Py_DECREF(future);
+        }
+        return NULL;
+}
+
+
+static const char Admin_delete_acls_doc[] = PyDoc_STR(
+        ".. py:function:: delete_acls(acl_binding_filters, future, [request_timeout])\n"
+        "\n"
+        "  Deletes ACL bindings matching one or more ACL binding filter.\n"
+        "\n"
+        "  This method should not be used directly, use confluent_kafka.AdminClient.delete_acls()\n"
+);
+
+
+/**
+ * @brief List consumer groups
+ */
+PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kwargs) {
+        PyObject *future, *states_int = NULL;
+        struct Admin_options options = Admin_options_INITIALIZER;
+        rd_kafka_AdminOptions_t *c_options = NULL;
+        CallState cs;
+        rd_kafka_queue_t *rkqu;
+        rd_kafka_consumer_group_state_t *c_states = NULL;
+        int states_cnt = 0;
+        int i = 0;
+
+        static char *kws[] = {"future",
+                             /* options */
+                             "states_int",
+                             "request_timeout",
+                             NULL};
+
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|Of", kws,
+                                         &future,
+                                         &states_int,
+                                         &options.request_timeout)) {
+                goto err;
+        }
+
+        if(states_int != NULL && states_int != Py_None) {
+                if(!PyList_Check(states_int)) {
+                        PyErr_SetString(PyExc_ValueError,
+                                "states must of type list");
+                        goto err;
+                }
+
+                states_cnt = (int)PyList_Size(states_int);
+
+                if(states_cnt > 0) {
+                        c_states = (rd_kafka_consumer_group_state_t *)
+                                        malloc(states_cnt*sizeof(rd_kafka_consumer_group_state_t));
+                        for(i = 0 ; i < states_cnt ; i++) {
+                                PyObject *state = PyList_GET_ITEM(states_int, i);
+                                if(!cfl_PyInt_Check(state)) {
+                                        PyErr_SetString(PyExc_ValueError,
+                                                "Element of states must be a valid state");
+                                        goto err;
+                                }
+                                c_states[i] = (rd_kafka_consumer_group_state_t) cfl_PyInt_AsInt(state);
+                        }
+                        options.states = c_states;
+                        options.states_cnt = states_cnt;
+                }
+        }
+
+        c_options = Admin_options_to_c(self, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS,
+                                       &options, future);
+        if (!c_options)  {
+                goto err; /* Exception raised by options_to_c() */
+        }
+
+        /* options_to_c() sets future as the opaque, which is used in the
+         * background_event_cb to set the results on the future as the
+         * admin operation is finished, so we need to keep our own refcount. */
+        Py_INCREF(future);
+
+        /* Use librdkafka's background thread queue to automatically dispatch
+        * Admin_background_event_cb() when the admin operation is finished. */
+        rkqu = rd_kafka_queue_get_background(self->rk);
+
+        /*
+         * Call ListConsumerGroupOffsets
+         *
+         * We need to set up a CallState and release GIL here since
+         * the event_cb may be triggered immediately.
+         */
+        CallState_begin(self, &cs);
+        rd_kafka_ListConsumerGroups(self->rk, c_options, rkqu);
+        CallState_end(self, &cs);
+
+        if(c_states) {
+                free(c_states);
+        }
+        rd_kafka_queue_destroy(rkqu); /* drop reference from get_background */
+        rd_kafka_AdminOptions_destroy(c_options);
+
+        Py_RETURN_NONE;
+err:
+        if(c_states) {
+                free(c_states);
+        }
+        if (c_options) {
+                rd_kafka_AdminOptions_destroy(c_options);
+                Py_DECREF(future);
+        }
+        return NULL;
+}
+
+
+const char Admin_list_consumer_groups_doc[] = PyDoc_STR(
+        ".. py:function:: list_consumer_groups(future, [states_int], [request_timeout])\n"
+        "\n"
+        "  List all the consumer groups.\n"
+        "\n"
+        "  This method should not be used directly, use confluent_kafka.AdminClient.list_consumer_groups()\n");
+
+
+/**
+ * @brief Describe consumer groups
+ */
+PyObject *Admin_describe_consumer_groups (Handle *self, PyObject *args, PyObject *kwargs) {
+        PyObject *future, *group_ids;
+        struct Admin_options options = Admin_options_INITIALIZER;
+        const char **c_groups = NULL;
+        rd_kafka_AdminOptions_t *c_options = NULL;
+        CallState cs;
+        rd_kafka_queue_t *rkqu;
+        int groups_cnt = 0;
+        int i = 0;
+
+        static char *kws[] = {"future",
+                             "group_ids",
+                             /* options */
+                             "request_timeout",
+                             NULL};
+
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|f", kws,
+                                         &group_ids,
+                                         &future,
+                                         &options.request_timeout)) {
+                goto err;
+        }
+
+        if (!PyList_Check(group_ids) || (groups_cnt = (int)PyList_Size(group_ids)) < 1) {
+                PyErr_SetString(PyExc_ValueError,
+                                "Expected non-empty list of group_ids");
+                goto err;
+        }
+
+        c_groups = malloc(sizeof(char *) * groups_cnt);
+
+        for (i = 0 ; i < groups_cnt ; i++) {
+                PyObject *group = PyList_GET_ITEM(group_ids, i);
+                PyObject *ugroup;
+                PyObject *uogroup = NULL;
+
+                if (group == Py_None ||
+                    !(ugroup = cfl_PyObject_Unistr(group))) {
+                        PyErr_Format(PyExc_ValueError,
+                                     "Expected list of group strings, "
+                                     "not %s",
+                                     ((PyTypeObject *)PyObject_Type(group))->
+                                     tp_name);
+                        goto err;
+                }
+
+                c_groups[i] = cfl_PyUnistr_AsUTF8(ugroup, &uogroup);
+
+                Py_XDECREF(ugroup);
+                Py_XDECREF(uogroup);
+        }
+
+        c_options = Admin_options_to_c(self, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS,
+                                       &options, future);
+        if (!c_options)  {
+                goto err; /* Exception raised by options_to_c() */
+        }
+
+        /* options_to_c() sets future as the opaque, which is used in the
+         * background_event_cb to set the results on the future as the
+         * admin operation is finished, so we need to keep our own refcount. */
+        Py_INCREF(future);
+
+        /* Use librdkafka's background thread queue to automatically dispatch
+        * Admin_background_event_cb() when the admin operation is finished. */
+        rkqu = rd_kafka_queue_get_background(self->rk);
+
+        /*
+         * Call ListConsumerGroupOffsets
+         *
+         * We need to set up a CallState and release GIL here since
+         * the event_cb may be triggered immediately.
+         */
+        CallState_begin(self, &cs);
+        rd_kafka_DescribeConsumerGroups(self->rk, c_groups, groups_cnt, c_options, rkqu);
+        CallState_end(self, &cs);
+
+        if(c_groups) {
+                free(c_groups);
+        }
+        rd_kafka_queue_destroy(rkqu); /* drop reference from get_background */
+        rd_kafka_AdminOptions_destroy(c_options);
+
+        Py_RETURN_NONE;
+err:
+        if(c_groups) {
+                free(c_groups);
+        }
+        if (c_options) {
+                rd_kafka_AdminOptions_destroy(c_options);
+                Py_DECREF(future);
+        }
+        return NULL;
+}
+
+
+const char Admin_describe_consumer_groups_doc[] = PyDoc_STR(
+        ".. py:function:: describe_consumer_groups(future, group_ids, [request_timeout])\n"
+        "\n"
+        "  Describes the provided consumer groups.\n"
+        "\n"
+        "  This method should not be used directly, use confluent_kafka.AdminClient.describe_consumer_groups()\n");
+
+
+/**
+ * @brief Delete consumer groups offsets
+ */
+PyObject *Admin_delete_consumer_groups (Handle *self, PyObject *args, PyObject *kwargs) {
+        PyObject *group_ids, *future;
+        PyObject *group_id;
+        int group_ids_cnt;
+        struct Admin_options options = Admin_options_INITIALIZER;
+        rd_kafka_AdminOptions_t *c_options = NULL;
+        rd_kafka_DeleteGroup_t **c_delete_group_ids = NULL;
+        CallState cs;
+        rd_kafka_queue_t *rkqu;
+        int i;
+
+        static char *kws[] = {"group_ids",
+                             "future",
+                             /* options */
+                             "request_timeout",
+                             NULL};
+
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|f", kws,
+                                         &group_ids,
+                                         &future,
+                                         &options.request_timeout)) {
+                goto err;
+        }
+
+        c_options = Admin_options_to_c(self, RD_KAFKA_ADMIN_OP_DELETEGROUPS,
+                                       &options, future);
+        if (!c_options)  {
+                goto err; /* Exception raised by options_to_c() */
+        }
+
+        /* options_to_c() sets future as the opaque, which is used in the
+         * background_event_cb to set the results on the future as the
+         * admin operation is finished, so we need to keep our own refcount. */
+        Py_INCREF(future);
+
+        if (!PyList_Check(group_ids)) {
+                PyErr_SetString(PyExc_ValueError, "Expected 'group_ids' to be a list");
+                goto err;
+        }
+
+        group_ids_cnt = (int)PyList_Size(group_ids);
+
+        c_delete_group_ids = malloc(sizeof(rd_kafka_DeleteGroup_t *) * group_ids_cnt);
+        for(i = 0 ; i < group_ids_cnt ; i++) {
+                group_id = PyList_GET_ITEM(group_ids, i);
+
+                PyObject *ks, *ks8;
+                const char *group_id_string;
+                if (!(ks = cfl_PyObject_Unistr(group_id))) {
+                        PyErr_SetString(PyExc_TypeError,
+                                        "Expected element of 'group_ids' "
+                                        "to be unicode string");
+                        goto err;
+                }
+
+                group_id_string = cfl_PyUnistr_AsUTF8(ks, &ks8);
+
+                Py_DECREF(ks);
+                Py_XDECREF(ks8);
+
+                c_delete_group_ids[i] = rd_kafka_DeleteGroup_new(group_id_string);
+        }
+
+        /* Use librdkafka's background thread queue to automatically dispatch
+        * Admin_background_event_cb() when the admin operation is finished. */
+        rkqu = rd_kafka_queue_get_background(self->rk);
+
+        /*
+         * Call DeleteGroups
+         *
+         * We need to set up a CallState and release GIL here since
+         * the event_cb may be triggered immediately.
+         */
+        CallState_begin(self, &cs);
+        rd_kafka_DeleteGroups(self->rk, c_delete_group_ids, group_ids_cnt, c_options, rkqu);
+        CallState_end(self, &cs);
+
+        rd_kafka_queue_destroy(rkqu); /* drop reference from get_background */
+        rd_kafka_DeleteGroup_destroy_array(c_delete_group_ids, group_ids_cnt);
+        free(c_delete_group_ids);
+        rd_kafka_AdminOptions_destroy(c_options);
+
+        Py_RETURN_NONE;
+err:
+        if (c_delete_group_ids) {
+                rd_kafka_DeleteGroup_destroy_array(c_delete_group_ids, i);
+                free(c_delete_group_ids);
+        }
+        if (c_options) {
+                rd_kafka_AdminOptions_destroy(c_options);
+                Py_DECREF(future);
+        }
+        return NULL;
+}
+
+
+const char Admin_delete_consumer_groups_doc[] = PyDoc_STR(
+        ".. py:function:: delete_consumer_groups(request, future, [request_timeout])\n"
+        "\n"
+        "  Deletes consumer groups provided in the request.\n"
+        "\n"
+        "  This method should not be used directly, use confluent_kafka.AdminClient.delete_consumer_groups()\n");
+
+
+/**
+ * @brief List consumer groups offsets
+ */
+PyObject *Admin_list_consumer_group_offsets (Handle *self, PyObject *args, PyObject *kwargs) {
+        PyObject *request, *future, *require_stable_obj = NULL;
+        int requests_cnt;
+        struct Admin_options options = Admin_options_INITIALIZER;
+        PyObject *ConsumerGroupTopicPartitions_type = NULL;
+        rd_kafka_AdminOptions_t *c_options = NULL;
+        rd_kafka_ListConsumerGroupOffsets_t **c_obj = NULL;
+        rd_kafka_topic_partition_list_t *c_topic_partitions = NULL;
+        CallState cs;
+        rd_kafka_queue_t *rkqu;
+        PyObject *topic_partitions = NULL;
+        char *group_id = NULL;
+
+        static char *kws[] = {"request",
+                             "future",
+                             /* options */
+                             "require_stable",
+                             "request_timeout",
+                             NULL};
+
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|Of", kws,
+                                         &request,
+                                         &future,
+                                         &require_stable_obj,
+                                         &options.request_timeout)) {
+                goto err;
+        }
+
+        if (require_stable_obj &&
+            !cfl_PyBool_get(require_stable_obj, "require_stable",
+                            &options.require_stable_offsets))
+                return NULL;
+
+        c_options = Admin_options_to_c(self, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS,
+                                       &options, future);
+        if (!c_options)  {
+                goto err; /* Exception raised by options_to_c() */
+        }
+
+        /* options_to_c() sets future as the opaque, which is used in the
+         * background_event_cb to set the results on the future as the
+         * admin operation is finished, so we need to keep our own refcount. */
+        Py_INCREF(future);
+
+        if (PyList_Check(request) &&
+            (requests_cnt = (int)PyList_Size(request)) != 1) {
+                PyErr_SetString(PyExc_ValueError,
+                        "Currently we support listing only 1 consumer groups offset information");
+                goto err;
+        }
+
+        PyObject *single_request = PyList_GET_ITEM(request, 0);
+
+        /* Look up the ConsumerGroupTopicPartition class so we can check if the provided
+         * topics are of correct type.
+         * Since this is not in the fast path we treat ourselves
+         * to the luxury of looking up this for each call. */
+        ConsumerGroupTopicPartitions_type = cfl_PyObject_lookup("confluent_kafka",
+                                                  "ConsumerGroupTopicPartitions");
+        if (!ConsumerGroupTopicPartitions_type) {
+                PyErr_SetString(PyExc_ImportError,
+                        "Not able to load ConsumerGroupTopicPartitions type");
+                goto err;
+        }
+
+        if(!PyObject_IsInstance(single_request, ConsumerGroupTopicPartitions_type)) {
+                PyErr_SetString(PyExc_ImportError,
+                        "Each request should be of ConsumerGroupTopicPartitions type");
+                goto err;
+        }
+
+        cfl_PyObject_GetString(single_request, "group_id", &group_id, NULL, 1, 0);
+
+        if(group_id == NULL) {
+                PyErr_SetString(PyExc_ValueError,
+                        "Group name is mandatory for list consumer offset operation");
+                goto err;
+        }
+
+        cfl_PyObject_GetAttr(single_request, "topic_partitions", &topic_partitions, &PyList_Type, 0, 1);
+
+        if(topic_partitions != Py_None) {
+                c_topic_partitions = py_to_c_parts(topic_partitions);
+        }
+
+        c_obj = malloc(sizeof(rd_kafka_ListConsumerGroupOffsets_t *) * requests_cnt);
+        c_obj[0] = rd_kafka_ListConsumerGroupOffsets_new(group_id, c_topic_partitions);
+
+        /* Use librdkafka's background thread queue to automatically dispatch
+        * Admin_background_event_cb() when the admin operation is finished. */
+        rkqu = rd_kafka_queue_get_background(self->rk);
+
+        /*
+         * Call ListConsumerGroupOffsets
+         *
+         * We need to set up a CallState and release GIL here since
+         * the event_cb may be triggered immediately.
+         */
+        CallState_begin(self, &cs);
+        rd_kafka_ListConsumerGroupOffsets(self->rk, c_obj, requests_cnt, c_options, rkqu);
+        CallState_end(self, &cs);
+
+        if (c_topic_partitions) {
+                rd_kafka_topic_partition_list_destroy(c_topic_partitions);
+        }
+        rd_kafka_queue_destroy(rkqu); /* drop reference from get_background */
+        rd_kafka_ListConsumerGroupOffsets_destroy_array(c_obj, requests_cnt);
+        free(c_obj);
+        free(group_id);
+        Py_DECREF(ConsumerGroupTopicPartitions_type); /* from lookup() */
+        Py_XDECREF(topic_partitions);
+        rd_kafka_AdminOptions_destroy(c_options);
+
+        Py_RETURN_NONE;
+err:
+        if (c_topic_partitions) {
+                rd_kafka_topic_partition_list_destroy(c_topic_partitions);
+        }
+        if (c_obj) {
+                rd_kafka_ListConsumerGroupOffsets_destroy_array(c_obj, requests_cnt);
+                free(c_obj);
+        }
+        if (c_options) {
+                rd_kafka_AdminOptions_destroy(c_options);
+                Py_DECREF(future);
+        }
+        if(group_id) {
+                free(group_id);
+        }
+        Py_XDECREF(topic_partitions);
+        Py_XDECREF(ConsumerGroupTopicPartitions_type);
+        return NULL;
+}
+
+
+const char Admin_list_consumer_group_offsets_doc[] = PyDoc_STR(
+        ".. py:function:: list_consumer_group_offsets(request, future, [require_stable], [request_timeout])\n"
+        "\n"
+        "  List offset information for the consumer group and (optional) topic partition provided in the request.\n"
+        "\n"
+        "  This method should not be used directly, use confluent_kafka.AdminClient.list_consumer_group_offsets()\n");
+
+
+/**
+ * @brief Alter consumer groups offsets
+ */
+PyObject *Admin_alter_consumer_group_offsets (Handle *self, PyObject *args, PyObject *kwargs) {
+        PyObject *request, *future;
+        int requests_cnt;
+        struct Admin_options options = Admin_options_INITIALIZER;
+        PyObject *ConsumerGroupTopicPartitions_type = NULL;
+        rd_kafka_AdminOptions_t *c_options = NULL;
+        rd_kafka_AlterConsumerGroupOffsets_t **c_obj = NULL;
+        rd_kafka_topic_partition_list_t *c_topic_partitions = NULL;
+        CallState cs;
+        rd_kafka_queue_t *rkqu;
+        PyObject *topic_partitions = NULL;
+        char *group_id = NULL;
+
+        static char *kws[] = {"request",
+                             "future",
+                             /* options */
+                             "request_timeout",
+                             NULL};
+
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|f", kws,
+                                         &request,
+                                         &future,
+                                         &options.request_timeout)) {
+                goto err;
+        }
+
+        c_options = Admin_options_to_c(self, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS,
+                                       &options, future);
+        if (!c_options)  {
+                goto err; /* Exception raised by options_to_c() */
+        }
+
+        /* options_to_c() sets future as the opaque, which is used in the
+         * background_event_cb to set the results on the future as the
+         * admin operation is finished, so we need to keep our own refcount. */
+        Py_INCREF(future);
+
+        if (PyList_Check(request) &&
+            (requests_cnt = (int)PyList_Size(request)) != 1) {
+                PyErr_SetString(PyExc_ValueError,
+                        "Currently we support alter consumer groups offset request for 1 group only");
+                goto err;
+        }
+
+        PyObject *single_request = PyList_GET_ITEM(request, 0);
+
+        /* Look up the ConsumerGroupTopicPartition class so we can check if the provided
+         * topics are of correct type.
+         * Since this is not in the fast path we treat ourselves
+         * to the luxury of looking up this for each call. */
+        ConsumerGroupTopicPartitions_type = cfl_PyObject_lookup("confluent_kafka",
+                                                  "ConsumerGroupTopicPartitions");
+        if (!ConsumerGroupTopicPartitions_type) {
+                PyErr_SetString(PyExc_ImportError,
+                        "Not able to load ConsumerGroupTopicPartitions type");
+                goto err;
+        }
+
+        if(!PyObject_IsInstance(single_request, ConsumerGroupTopicPartitions_type)) {
+                PyErr_SetString(PyExc_ImportError,
+                        "Each request should be of ConsumerGroupTopicPartitions type");
+                goto err;
+        }
+
+        cfl_PyObject_GetString(single_request, "group_id", &group_id, NULL, 1, 0);
+
+        if(group_id == NULL) {
+                PyErr_SetString(PyExc_ValueError,
+                        "Group name is mandatory for alter consumer offset operation");
+                goto err;
+        }
+
+        cfl_PyObject_GetAttr(single_request, "topic_partitions", &topic_partitions, &PyList_Type, 0, 1);
+
+        if(topic_partitions != Py_None) {
+                c_topic_partitions = py_to_c_parts(topic_partitions);
+        }
+
+        c_obj = malloc(sizeof(rd_kafka_AlterConsumerGroupOffsets_t *) * requests_cnt);
+        c_obj[0] = rd_kafka_AlterConsumerGroupOffsets_new(group_id, c_topic_partitions);
+
+        /* Use librdkafka's background thread queue to automatically dispatch
+        * Admin_background_event_cb() when the admin operation is finished. */
+        rkqu = rd_kafka_queue_get_background(self->rk);
+
+        /*
+         * Call AlterConsumerGroupOffsets
+         *
+         * We need to set up a CallState and release GIL here since
+         * the event_cb may be triggered immediately.
+         */
+        CallState_begin(self, &cs);
+        rd_kafka_AlterConsumerGroupOffsets(self->rk, c_obj, requests_cnt, c_options, rkqu);
+        CallState_end(self, &cs);
+
+        rd_kafka_queue_destroy(rkqu); /* drop reference from get_background */
+        rd_kafka_AlterConsumerGroupOffsets_destroy_array(c_obj, requests_cnt);
+        free(c_obj);
+        free(group_id);
+        Py_DECREF(ConsumerGroupTopicPartitions_type); /* from lookup() */
+        Py_XDECREF(topic_partitions);
+        rd_kafka_AdminOptions_destroy(c_options);
+        rd_kafka_topic_partition_list_destroy(c_topic_partitions);
+
+        Py_RETURN_NONE;
+err:
+        if (c_obj) {
+                rd_kafka_AlterConsumerGroupOffsets_destroy_array(c_obj, requests_cnt);
+                free(c_obj);
+        }
+        if (c_options) {
+                rd_kafka_AdminOptions_destroy(c_options);
+                Py_DECREF(future);
+        }
+        if(c_topic_partitions) {
+                rd_kafka_topic_partition_list_destroy(c_topic_partitions);
+        }
+        if(group_id) {
+                free(group_id);
+        }
+        Py_XDECREF(topic_partitions);
+        Py_XDECREF(ConsumerGroupTopicPartitions_type);
+        return NULL;
+}
+
+
+const char Admin_alter_consumer_group_offsets_doc[] = PyDoc_STR(
+        ".. py:function:: alter_consumer_group_offsets(request, future, [request_timeout])\n"
+        "\n"
+        "  Alter offset for the consumer group and topic partition provided in the request.\n"
+        "\n"
+        "  This method should not be used directly, use confluent_kafka.AdminClient.alter_consumer_group_offsets()\n");
+
+
+/**
+ * @brief Call rd_kafka_poll() and keep track of crashing callbacks.
+ * @returns -1 if callback crashed (or poll() failed), else the number
+ * of events served.
+ */
+static int Admin_poll0 (Handle *self, int tmout) {
+        int r;
+        CallState cs;
+
+        CallState_begin(self, &cs);
+
+        r = rd_kafka_poll(self->rk, tmout);
+
+        if (!CallState_end(self, &cs)) {
+                return -1;
+        }
+
+        return r;
+}
+
+
+static PyObject *Admin_poll (Handle *self, PyObject *args,
+                             PyObject *kwargs) {
+        double tmout;
+        int r;
+        static char *kws[] = { "timeout", NULL };
+
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "d", kws, &tmout))
+                return NULL;
+
+        r = Admin_poll0(self, (int)(tmout * 1000));
+        if (r == -1)
+                return NULL;
+
+        return cfl_PyInt_FromInt(r);
+}
+
+
+
+static PyMethodDef Admin_methods[] = {
+        { "create_topics", (PyCFunction)Admin_create_topics,
+          METH_VARARGS|METH_KEYWORDS,
+          ".. py:function:: create_topics(topics, future, [validate_only, request_timeout, operation_timeout])\n"
+          "\n"
+          "  Create new topics.\n"
+          "\n"
+          "  This method should not be used directly, use confluent_kafka.AdminClient.create_topics()\n"
+        },
+
+        { "delete_topics", (PyCFunction)Admin_delete_topics,
+          METH_VARARGS|METH_KEYWORDS,
+          ".. py:function:: delete_topics(topics, future, [request_timeout, operation_timeout])\n"
+          "\n"
+          "  This method should not be used directly, use confluent_kafka.AdminClient.delete_topics()\n"
+        },
+
+        { "create_partitions", (PyCFunction)Admin_create_partitions,
+          METH_VARARGS|METH_KEYWORDS,
+          ".. py:function:: create_partitions(topics, future, [validate_only, request_timeout, operation_timeout])\n"
+          "\n"
+          "  This method should not be used directly, use confluent_kafka.AdminClient.create_partitions()\n"
+        },
+
+        { "describe_configs", (PyCFunction)Admin_describe_configs,
+          METH_VARARGS|METH_KEYWORDS,
+          ".. py:function:: describe_configs(resources, future, [request_timeout, broker])\n"
+          "\n"
+          "  This method should not be used directly, use confluent_kafka.AdminClient.describe_configs()\n"
+        },
+
+        { "alter_configs", (PyCFunction)Admin_alter_configs,
+          METH_VARARGS|METH_KEYWORDS,
+          ".. py:function:: alter_configs(resources, future, [request_timeout, broker])\n"
+          "\n"
+          "  This method should not be used directly, use confluent_kafka.AdminClient.alter_configs()\n"
+        },
+
+        { "poll", (PyCFunction)Admin_poll, METH_VARARGS|METH_KEYWORDS,
+          ".. py:function:: poll([timeout])\n"
+          "\n"
+          "  Polls the Admin client for event callbacks, such as error_cb, "
+          "stats_cb, etc, if registered.\n"
+          "\n"
+          "  There is no need to call poll() if no callbacks have been registered.\n"
+          "\n"
+          "  :param float timeout: Maximum time to block waiting for events. (Seconds)\n"
+          "  :returns: Number of events processed (callbacks served)\n"
+          "  :rtype: int\n"
+          "\n"
+        },
+
+        { "list_topics", (PyCFunction)list_topics, METH_VARARGS|METH_KEYWORDS,
+          list_topics_doc
+        },
+
+        { "list_groups", (PyCFunction)list_groups, METH_VARARGS|METH_KEYWORDS,
+          list_groups_doc
+        },
+
+        { "list_consumer_groups", (PyCFunction)Admin_list_consumer_groups, METH_VARARGS|METH_KEYWORDS,
+          Admin_list_consumer_groups_doc
+        },
+
+        { "describe_consumer_groups", (PyCFunction)Admin_describe_consumer_groups, METH_VARARGS|METH_KEYWORDS,
+          Admin_describe_consumer_groups_doc
+        },
+
+        { "delete_consumer_groups", (PyCFunction)Admin_delete_consumer_groups, METH_VARARGS|METH_KEYWORDS,
+          Admin_delete_consumer_groups_doc
+        },
+
+        { "list_consumer_group_offsets", (PyCFunction)Admin_list_consumer_group_offsets, METH_VARARGS|METH_KEYWORDS,
+          Admin_list_consumer_group_offsets_doc
+        },
+
+        { "alter_consumer_group_offsets", (PyCFunction)Admin_alter_consumer_group_offsets, METH_VARARGS|METH_KEYWORDS,
+          Admin_alter_consumer_group_offsets_doc
+        },
+
+        { "create_acls", (PyCFunction)Admin_create_acls, METH_VARARGS|METH_KEYWORDS,
+           Admin_create_acls_doc
+        },
+
+        { "describe_acls", (PyCFunction)Admin_describe_acls, METH_VARARGS|METH_KEYWORDS,
+           Admin_describe_acls_doc
+        },
+
+        { "delete_acls", (PyCFunction)Admin_delete_acls, METH_VARARGS|METH_KEYWORDS,
+           Admin_delete_acls_doc
+        },
+
+        { "set_sasl_credentials", (PyCFunction)set_sasl_credentials, METH_VARARGS|METH_KEYWORDS,
+           set_sasl_credentials_doc
+        },
+
+        { NULL }
+};
+
+
+static Py_ssize_t Admin__len__ (Handle *self) {
+        return rd_kafka_outq_len(self->rk);
+}
+
+
+static PySequenceMethods Admin_seq_methods = {
+        (lenfunc)Admin__len__ /* sq_length */
+};
+
+
+/**
+ * @brief Convert C topic_result_t array to topic-indexed dict.
+ */
+static PyObject *
+Admin_c_topic_result_to_py (const rd_kafka_topic_result_t **c_result,
+                            size_t cnt) {
+        PyObject *result;
+        size_t i;
+
+        result = PyDict_New();
+
+        for (i = 0 ; i < cnt ; i++) {
+                PyObject *error;
+
+                error = KafkaError_new_or_None(
+                        rd_kafka_topic_result_error(c_result[i]),
+                        rd_kafka_topic_result_error_string(c_result[i]));
+
+                PyDict_SetItemString(
+                        result,
+                        rd_kafka_topic_result_name(c_result[i]),
+                        error);
+
+                Py_DECREF(error);
+        }
+
+        return result;
+}
+
+
+
+/**
+ * @brief Convert C ConfigEntry array to dict of py ConfigEntry objects.
+ */
+static PyObject *
+Admin_c_ConfigEntries_to_py (PyObject *ConfigEntry_type,
+                             const rd_kafka_ConfigEntry_t **c_configs,
+                             size_t config_cnt) {
+        PyObject *dict;
+        size_t ci;
+
+        dict = PyDict_New();
+
+        for (ci = 0 ; ci < config_cnt ; ci++) {
+                PyObject *kwargs, *args;
+                const rd_kafka_ConfigEntry_t *ent = c_configs[ci];
+                const rd_kafka_ConfigEntry_t **c_synonyms;
+                PyObject *entry, *synonyms;
+                size_t synonym_cnt;
+                const char *val;
+
+                kwargs = PyDict_New();
+
+                cfl_PyDict_SetString(kwargs, "name",
+                                     rd_kafka_ConfigEntry_name(ent));
+                val = rd_kafka_ConfigEntry_value(ent);
+                if (val)
+                        cfl_PyDict_SetString(kwargs, "value", val);
+                else
+                        PyDict_SetItemString(kwargs, "value", Py_None);
+                cfl_PyDict_SetInt(kwargs, "source",
+                                  (int)rd_kafka_ConfigEntry_source(ent));
+                cfl_PyDict_SetInt(kwargs, "is_read_only",
                                   rd_kafka_ConfigEntry_is_read_only(ent));
                 cfl_PyDict_SetInt(kwargs, "is_default",
                                   rd_kafka_ConfigEntry_is_default(ent));
@@ -1163,121 +2248,694 @@ Admin_c_ConfigEntries_to_py (PyObject *ConfigEntry_type,
                 cfl_PyDict_SetInt(kwargs, "is_synonym",
                                   rd_kafka_ConfigEntry_is_synonym(ent));
 
-                c_synonyms = rd_kafka_ConfigEntry_synonyms(ent,
-                                                           &synonym_cnt);
-                synonyms = Admin_c_ConfigEntries_to_py(ConfigEntry_type,
-                                                       c_synonyms,
-                                                       synonym_cnt);
-                if (!synonyms) {
+                c_synonyms = rd_kafka_ConfigEntry_synonyms(ent,
+                                                           &synonym_cnt);
+                synonyms = Admin_c_ConfigEntries_to_py(ConfigEntry_type,
+                                                       c_synonyms,
+                                                       synonym_cnt);
+                if (!synonyms) {
+                        Py_DECREF(kwargs);
+                        Py_DECREF(dict);
+                        return NULL;
+                }
+                PyDict_SetItemString(kwargs, "synonyms", synonyms);
+                Py_DECREF(synonyms);
+
+                args = PyTuple_New(0);
+                entry = PyObject_Call(ConfigEntry_type, args, kwargs);
+                Py_DECREF(args);
+                Py_DECREF(kwargs);
+                if (!entry) {
+                        Py_DECREF(dict);
+                        return NULL;
+                }
+
+                PyDict_SetItemString(dict, rd_kafka_ConfigEntry_name(ent),
+                                     entry);
+                Py_DECREF(entry);
+        }
+
+
+        return dict;
+}
+
+
+/**
+ * @brief Convert C ConfigResource array to dict indexed by ConfigResource
+ *        with the value of dict(ConfigEntry).
+ *
+ * @param ret_configs If true, return configs rather than None.
+ */
+static PyObject *
+Admin_c_ConfigResource_result_to_py (const rd_kafka_ConfigResource_t **c_resources,
+                                     size_t cnt,
+                                     int ret_configs) {
+        PyObject *result;
+        PyObject *ConfigResource_type;
+        PyObject *ConfigEntry_type;
+        size_t ri;
+
+        ConfigResource_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                                  "ConfigResource");
+        if (!ConfigResource_type)
+                return NULL;
+
+        ConfigEntry_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                               "ConfigEntry");
+        if (!ConfigEntry_type) {
+                Py_DECREF(ConfigResource_type);
+                return NULL;
+        }
+
+        result = PyDict_New();
+
+        for (ri = 0 ; ri < cnt ; ri++) {
+                const rd_kafka_ConfigResource_t *c_res = c_resources[ri];
+                const rd_kafka_ConfigEntry_t **c_configs;
+                PyObject *kwargs, *wrap;
+                PyObject *key;
+                PyObject *configs, *error;
+                size_t config_cnt;
+
+                c_configs = rd_kafka_ConfigResource_configs(c_res, &config_cnt);
+                configs = Admin_c_ConfigEntries_to_py(ConfigEntry_type,
+                                                      c_configs, config_cnt);
+                if (!configs)
+                        goto err;
+
+                error = KafkaError_new_or_None(
+                        rd_kafka_ConfigResource_error(c_res),
+                        rd_kafka_ConfigResource_error_string(c_res));
+
+                kwargs = PyDict_New();
+                cfl_PyDict_SetInt(kwargs, "restype",
+                                  (int)rd_kafka_ConfigResource_type(c_res));
+                cfl_PyDict_SetString(kwargs, "name",
+                                     rd_kafka_ConfigResource_name(c_res));
+                PyDict_SetItemString(kwargs, "described_configs", configs);
+                PyDict_SetItemString(kwargs, "error", error);
+                Py_DECREF(error);
+
+                /* Instantiate ConfigResource */
+                wrap = PyTuple_New(0);
+                key = PyObject_Call(ConfigResource_type, wrap, kwargs);
+                Py_DECREF(wrap);
+                Py_DECREF(kwargs);
+                if (!key) {
+                        Py_DECREF(configs);
+                        goto err;
+                }
+
+                /* Set result to dict[ConfigResource(..)] = configs | None
+                 * depending on ret_configs */
+                if (ret_configs)
+                        PyDict_SetItem(result, key, configs);
+                else
+                        PyDict_SetItem(result, key, Py_None);
+
+                Py_DECREF(configs);
+                Py_DECREF(key);
+        }
+        return result;
+
+ err:
+        Py_DECREF(ConfigResource_type);
+        Py_DECREF(ConfigEntry_type);
+        Py_DECREF(result);
+        return NULL;
+}
+
+/**
+ * @brief Convert C AclBinding to py
+ */
+static PyObject *
+Admin_c_AclBinding_to_py (const rd_kafka_AclBinding_t *c_acl_binding) {
+
+        PyObject *args, *kwargs, *AclBinding_type, *acl_binding;
+
+        AclBinding_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                        "AclBinding");
+        if (!AclBinding_type) {
+                return NULL;
+        }
+
+        kwargs = PyDict_New();
+
+        cfl_PyDict_SetInt(kwargs, "restype",
+                                     rd_kafka_AclBinding_restype(c_acl_binding));
+        cfl_PyDict_SetString(kwargs, "name",
+                                     rd_kafka_AclBinding_name(c_acl_binding));
+        cfl_PyDict_SetInt(kwargs, "resource_pattern_type",
+                                rd_kafka_AclBinding_resource_pattern_type(c_acl_binding));
+        cfl_PyDict_SetString(kwargs, "principal",
+                                     rd_kafka_AclBinding_principal(c_acl_binding));
+        cfl_PyDict_SetString(kwargs, "host",
+                                     rd_kafka_AclBinding_host(c_acl_binding));
+        cfl_PyDict_SetInt(kwargs, "operation",
+                                     rd_kafka_AclBinding_operation(c_acl_binding));
+        cfl_PyDict_SetInt(kwargs, "permission_type",
+                                     rd_kafka_AclBinding_permission_type(c_acl_binding));
+
+        args = PyTuple_New(0);
+        acl_binding = PyObject_Call(AclBinding_type, args, kwargs);
+
+        Py_DECREF(args);
+        Py_DECREF(kwargs);
+        Py_DECREF(AclBinding_type);
+        return acl_binding;
+}
+
+/**
+ * @brief Convert C AclBinding array to py list.
+ */
+static PyObject *
+Admin_c_AclBindings_to_py (const rd_kafka_AclBinding_t **c_acls,
+                                          size_t c_acls_cnt) {
+        size_t i;
+        PyObject *result;
+        PyObject *acl_binding;
+
+        result = PyList_New(c_acls_cnt);
+
+        for (i = 0 ; i < c_acls_cnt ; i++) {
+                acl_binding = Admin_c_AclBinding_to_py(c_acls[i]);
+                if (!acl_binding) {
+                        Py_DECREF(result);
+                        return NULL;
+                }
+                PyList_SET_ITEM(result, i, acl_binding);
+        }
+
+        return result;
+}
+
+
+/**
+ * @brief Convert C acl_result_t array to py list.
+ */
+static PyObject *
+Admin_c_acl_result_to_py (const rd_kafka_acl_result_t **c_result,
+                            size_t cnt) {
+        PyObject *result;
+        size_t i;
+
+        result = PyList_New(cnt);
+
+        for (i = 0 ; i < cnt ; i++) {
+                PyObject *error;
+                const rd_kafka_error_t *c_error = rd_kafka_acl_result_error(c_result[i]);
+
+                error = KafkaError_new_or_None(
+                        rd_kafka_error_code(c_error),
+                        rd_kafka_error_string(c_error));
+
+                PyList_SET_ITEM(result, i, error);
+        }
+
+        return result;
+}
+
+/**
+ * @brief Convert C DeleteAcls result response array to py list.
+ */
+static PyObject *
+Admin_c_DeleteAcls_result_responses_to_py (const rd_kafka_DeleteAcls_result_response_t **c_result_responses,
+                            size_t cnt) {
+        const rd_kafka_AclBinding_t **c_matching_acls;
+        size_t c_matching_acls_cnt;
+        PyObject *result;
+        PyObject *acl_bindings;
+        size_t i;
+
+        result = PyList_New(cnt);
+
+        for (i = 0 ; i < cnt ; i++) {
+                PyObject *error;
+                const rd_kafka_error_t *c_error = rd_kafka_DeleteAcls_result_response_error(c_result_responses[i]);
+
+                if (c_error) {
+                        error = KafkaError_new_or_None(
+                                rd_kafka_error_code(c_error),
+                                rd_kafka_error_string(c_error));
+                        PyList_SET_ITEM(result, i, error);
+                } else {
+                        c_matching_acls = rd_kafka_DeleteAcls_result_response_matching_acls(
+                                                                        c_result_responses[i],
+                                                                        &c_matching_acls_cnt);
+                        acl_bindings = Admin_c_AclBindings_to_py(c_matching_acls,c_matching_acls_cnt);
+                        if (!acl_bindings) {
+                                Py_DECREF(result);
+                                return NULL;
+                        }
+                        PyList_SET_ITEM(result, i, acl_bindings);
+                }
+        }
+
+        return result;
+}
+
+
+/**
+ * @brief
+ *
+ */
+static PyObject *Admin_c_ListConsumerGroupsResults_to_py(
+                        const rd_kafka_ConsumerGroupListing_t **c_valid_responses,
+                        size_t valid_cnt,
+                        const rd_kafka_error_t **c_errors_responses,
+                        size_t errors_cnt) {
+
+        PyObject *result = NULL;
+        PyObject *ListConsumerGroupsResult_type = NULL;
+        PyObject *ConsumerGroupListing_type = NULL;
+        PyObject *args = NULL;
+        PyObject *kwargs = NULL;
+        PyObject *valid_result = NULL;
+        PyObject *valid_results = NULL;
+        PyObject *error_result = NULL;
+        PyObject *error_results = NULL;
+        PyObject *py_is_simple_consumer_group = NULL;
+        size_t i = 0;
+        valid_results = PyList_New(valid_cnt);
+        error_results = PyList_New(errors_cnt);
+        if(valid_cnt > 0) {
+                ConsumerGroupListing_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                                                "ConsumerGroupListing");
+                if (!ConsumerGroupListing_type) {
+                        goto err;
+                }
+                for(i = 0; i < valid_cnt; i++) {
+
+                        kwargs = PyDict_New();
+
+                        cfl_PyDict_SetString(kwargs,
+                                             "group_id",
+                                             rd_kafka_ConsumerGroupListing_group_id(c_valid_responses[i]));
+
+
+                        py_is_simple_consumer_group = PyBool_FromLong(
+                                rd_kafka_ConsumerGroupListing_is_simple_consumer_group(c_valid_responses[i]));
+                        if(PyDict_SetItemString(kwargs,
+                                                "is_simple_consumer_group",
+                                                py_is_simple_consumer_group) == -1) {
+                                PyErr_Format(PyExc_RuntimeError,
+                                             "Not able to set 'is_simple_consumer_group' in ConsumerGroupLising");
+                                Py_DECREF(py_is_simple_consumer_group);
+                                goto err;
+                        }
+                        Py_DECREF(py_is_simple_consumer_group);
+
+                        cfl_PyDict_SetInt(kwargs, "state", rd_kafka_ConsumerGroupListing_state(c_valid_responses[i]));
+
+                        args = PyTuple_New(0);
+
+                        valid_result = PyObject_Call(ConsumerGroupListing_type, args, kwargs);
+                        PyList_SET_ITEM(valid_results, i, valid_result);
+
+                        Py_DECREF(args);
                         Py_DECREF(kwargs);
-                        Py_DECREF(dict);
-                        return NULL;
                 }
-                PyDict_SetItemString(kwargs, "synonyms", synonyms);
-                Py_DECREF(synonyms);
+                Py_DECREF(ConsumerGroupListing_type);
+        }
+
+        if(errors_cnt > 0) {
+                for(i = 0; i < errors_cnt; i++) {
+
+                        error_result = KafkaError_new_or_None(
+                                rd_kafka_error_code(c_errors_responses[i]),
+                                rd_kafka_error_string(c_errors_responses[i]));
+                        PyList_SET_ITEM(error_results, i, error_result);
 
-                args = PyTuple_New(0);
-                entry = PyObject_Call(ConfigEntry_type, args, kwargs);
-                Py_DECREF(args);
-                Py_DECREF(kwargs);
-                if (!entry) {
-                        Py_DECREF(dict);
-                        return NULL;
                 }
+        }
 
-                PyDict_SetItemString(dict, rd_kafka_ConfigEntry_name(ent),
-                                     entry);
-                Py_DECREF(entry);
+        ListConsumerGroupsResult_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                                              "ListConsumerGroupsResult");
+        if (!ListConsumerGroupsResult_type) {
+                return NULL;
         }
+        kwargs = PyDict_New();
+        PyDict_SetItemString(kwargs, "valid", valid_results);
+        PyDict_SetItemString(kwargs, "errors", error_results);
+        args = PyTuple_New(0);
+        result = PyObject_Call(ListConsumerGroupsResult_type, args, kwargs);
+
+        Py_DECREF(args);
+        Py_DECREF(kwargs);
+        Py_DECREF(valid_results);
+        Py_DECREF(error_results);
+        Py_DECREF(ListConsumerGroupsResult_type);
 
+        return result;
+err:
+        Py_XDECREF(ListConsumerGroupsResult_type);
+        Py_XDECREF(ConsumerGroupListing_type);
+        Py_XDECREF(result);
+        Py_XDECREF(args);
+        Py_XDECREF(kwargs);
 
-        return dict;
+        return NULL;
+}
+
+static PyObject *Admin_c_MemberAssignment_to_py(const rd_kafka_MemberAssignment_t *c_assignment) {
+        PyObject *MemberAssignment_type = NULL;
+        PyObject *assignment = NULL;
+        PyObject *args = NULL;
+        PyObject *kwargs = NULL;
+        PyObject *topic_partitions = NULL;
+        const rd_kafka_topic_partition_list_t *c_topic_partitions = NULL;
+
+        MemberAssignment_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                                     "MemberAssignment");
+        if (!MemberAssignment_type) {
+                goto err;
+        }
+        c_topic_partitions = rd_kafka_MemberAssignment_partitions(c_assignment);
+
+        topic_partitions = c_parts_to_py(c_topic_partitions);
+
+        kwargs = PyDict_New();
+
+        PyDict_SetItemString(kwargs, "topic_partitions", topic_partitions);
+
+        args = PyTuple_New(0);
+
+        assignment = PyObject_Call(MemberAssignment_type, args, kwargs);
+
+        Py_DECREF(MemberAssignment_type);
+        Py_DECREF(args);
+        Py_DECREF(kwargs);
+        Py_DECREF(topic_partitions);
+        return assignment;
+
+err:
+        Py_XDECREF(MemberAssignment_type);
+        Py_XDECREF(args);
+        Py_XDECREF(kwargs);
+        Py_XDECREF(topic_partitions);
+        Py_XDECREF(assignment);
+        return NULL;
+
+}
+
+static PyObject *Admin_c_MemberDescription_to_py(const rd_kafka_MemberDescription_t *c_member) {
+        PyObject *member = NULL;
+        PyObject *MemberDescription_type = NULL;
+        PyObject *args = NULL;
+        PyObject *kwargs = NULL;
+        PyObject *assignment = NULL;
+        const rd_kafka_MemberAssignment_t *c_assignment;
+
+        MemberDescription_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                                     "MemberDescription");
+        if (!MemberDescription_type) {
+                goto err;
+        }
+
+        kwargs = PyDict_New();
+
+        cfl_PyDict_SetString(kwargs,
+                             "member_id",
+                             rd_kafka_MemberDescription_consumer_id(c_member));
+
+        cfl_PyDict_SetString(kwargs,
+                             "client_id",
+                             rd_kafka_MemberDescription_client_id(c_member));
+
+        cfl_PyDict_SetString(kwargs,
+                             "host",
+                             rd_kafka_MemberDescription_host(c_member));
+
+        const char * c_group_instance_id = rd_kafka_MemberDescription_group_instance_id(c_member);
+        if(c_group_instance_id) {
+                cfl_PyDict_SetString(kwargs, "group_instance_id", c_group_instance_id);
+        }
+
+        c_assignment = rd_kafka_MemberDescription_assignment(c_member);
+        assignment = Admin_c_MemberAssignment_to_py(c_assignment);
+        if (!assignment) {
+                goto err;
+        }
+
+        PyDict_SetItemString(kwargs, "assignment", assignment);
+
+        args = PyTuple_New(0);
+
+        member = PyObject_Call(MemberDescription_type, args, kwargs);
+
+        Py_DECREF(args);
+        Py_DECREF(kwargs);
+        Py_DECREF(MemberDescription_type);
+        Py_DECREF(assignment);
+        return member;
+
+err:
+
+        Py_XDECREF(args);
+        Py_XDECREF(kwargs);
+        Py_XDECREF(MemberDescription_type);
+        Py_XDECREF(assignment);
+        Py_XDECREF(member);
+        return NULL;
+}
+
+static PyObject *Admin_c_MemberDescriptions_to_py_from_ConsumerGroupDescription(
+    const rd_kafka_ConsumerGroupDescription_t *c_consumer_group_description) {
+        PyObject *member_description = NULL;
+        PyObject *members = NULL;
+        size_t c_members_cnt;
+        const rd_kafka_MemberDescription_t *c_member;
+        size_t i = 0;
+
+        c_members_cnt = rd_kafka_ConsumerGroupDescription_member_count(c_consumer_group_description);
+        members = PyList_New(c_members_cnt);
+        if(c_members_cnt > 0) {
+                for(i = 0; i < c_members_cnt; i++) {
+
+                        c_member = rd_kafka_ConsumerGroupDescription_member(c_consumer_group_description, i);
+                        member_description = Admin_c_MemberDescription_to_py(c_member);
+                        if(!member_description) {
+                                goto err;
+                        }
+                        PyList_SET_ITEM(members, i, member_description);
+                }
+        }
+        return members;
+err:
+        Py_XDECREF(members);
+        return NULL;
+}
+
+
+static PyObject *Admin_c_ConsumerGroupDescription_to_py(
+    const rd_kafka_ConsumerGroupDescription_t *c_consumer_group_description) {
+        PyObject *consumer_group_description = NULL;
+        PyObject *ConsumerGroupDescription_type = NULL;
+        PyObject *args = NULL;
+        PyObject *kwargs = NULL;
+        PyObject *py_is_simple_consumer_group = NULL;
+        PyObject *coordinator = NULL;
+        PyObject *members = NULL;
+        const rd_kafka_Node_t *c_coordinator = NULL;
+
+        ConsumerGroupDescription_type = cfl_PyObject_lookup("confluent_kafka.admin",
+                                                            "ConsumerGroupDescription");
+        if (!ConsumerGroupDescription_type) {
+                PyErr_Format(PyExc_TypeError, "Not able to load ConsumerGroupDescrition type");
+                goto err;
+        }
+
+        kwargs = PyDict_New();
+
+        cfl_PyDict_SetString(kwargs,
+                             "group_id",
+                             rd_kafka_ConsumerGroupDescription_group_id(c_consumer_group_description));
+
+        cfl_PyDict_SetString(kwargs,
+                             "partition_assignor",
+                             rd_kafka_ConsumerGroupDescription_partition_assignor(c_consumer_group_description));
+
+        members = Admin_c_MemberDescriptions_to_py_from_ConsumerGroupDescription(c_consumer_group_description);
+        if(!members) {
+                goto err;
+        }
+        PyDict_SetItemString(kwargs, "members", members);
+
+        c_coordinator = rd_kafka_ConsumerGroupDescription_coordinator(c_consumer_group_description);
+        coordinator = c_Node_to_py(c_coordinator);
+        if(!coordinator) {
+                goto err;
+        }
+        PyDict_SetItemString(kwargs, "coordinator", coordinator);
+
+        py_is_simple_consumer_group = PyBool_FromLong(
+                rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(c_consumer_group_description));
+        if(PyDict_SetItemString(kwargs, "is_simple_consumer_group", py_is_simple_consumer_group) == -1) {
+                goto err;
+        }
+
+        cfl_PyDict_SetInt(kwargs, "state", rd_kafka_ConsumerGroupDescription_state(c_consumer_group_description));
+
+        args = PyTuple_New(0);
+
+        consumer_group_description = PyObject_Call(ConsumerGroupDescription_type, args, kwargs);
+
+        Py_DECREF(py_is_simple_consumer_group);
+        Py_DECREF(args);
+        Py_DECREF(kwargs);
+        Py_DECREF(ConsumerGroupDescription_type);
+        Py_DECREF(coordinator);
+        Py_DECREF(members);
+        return consumer_group_description;
+
+err:
+        Py_XDECREF(py_is_simple_consumer_group);
+        Py_XDECREF(args);
+        Py_XDECREF(kwargs);
+        Py_XDECREF(coordinator);
+        Py_XDECREF(ConsumerGroupDescription_type);
+        Py_XDECREF(members);
+        return NULL;
+
+}
+
+static PyObject *Admin_c_DescribeConsumerGroupsResults_to_py(
+    const rd_kafka_ConsumerGroupDescription_t **c_result_responses,
+    size_t cnt) {
+        PyObject *consumer_group_description = NULL;
+        PyObject *results = NULL;
+        size_t i = 0;
+        results = PyList_New(cnt);
+        if(cnt > 0) {
+                for(i = 0; i < cnt; i++) {
+                        PyObject *error;
+                        const rd_kafka_error_t *c_error =
+                            rd_kafka_ConsumerGroupDescription_error(c_result_responses[i]);
+
+                        if (c_error) {
+                                error = KafkaError_new_or_None(
+                                        rd_kafka_error_code(c_error),
+                                        rd_kafka_error_string(c_error));
+                                PyList_SET_ITEM(results, i, error);
+                        } else {
+                                consumer_group_description =
+                                    Admin_c_ConsumerGroupDescription_to_py(c_result_responses[i]);
+
+                                if(!consumer_group_description) {
+                                        goto err;
+                                }
+
+                                PyList_SET_ITEM(results, i, consumer_group_description);
+                        }
+                }
+        }
+        return results;
+err:
+        Py_XDECREF(results);
+        return NULL;
 }
 
 
 /**
- * @brief Convert C ConfigResource array to dict indexed by ConfigResource
- *        with the value of dict(ConfigEntry).
  *
- * @param ret_configs If true, return configs rather than None.
+ * @brief Convert C delete groups result response to pyobject.
+ *
  */
 static PyObject *
-Admin_c_ConfigResource_result_to_py (const rd_kafka_ConfigResource_t **c_resources,
-                                     size_t cnt,
-                                     int ret_configs) {
-        PyObject *result;
-        PyObject *ConfigResource_type;
-        PyObject *ConfigEntry_type;
-        size_t ri;
+Admin_c_DeleteGroupResults_to_py (const rd_kafka_group_result_t **c_result_responses,
+                                  size_t cnt) {
 
-        ConfigResource_type = cfl_PyObject_lookup("confluent_kafka.admin",
-                                                  "ConfigResource");
-        if (!ConfigResource_type)
-                return NULL;
+        PyObject *delete_groups_result = NULL;
+        size_t i;
 
-        ConfigEntry_type = cfl_PyObject_lookup("confluent_kafka.admin",
-                                               "ConfigEntry");
-        if (!ConfigEntry_type) {
-                Py_DECREF(ConfigResource_type);
+        delete_groups_result = PyList_New(cnt);
+
+        for (i = 0; i < cnt; i++) {
+                PyObject *error;
+                const rd_kafka_error_t *c_error = rd_kafka_group_result_error(c_result_responses[i]);
+                error = KafkaError_new_or_None(
+                        rd_kafka_error_code(c_error),
+                        rd_kafka_error_string(c_error));
+                PyList_SET_ITEM(delete_groups_result, i, error);
+        }
+
+        return delete_groups_result;
+}
+
+
+static PyObject * Admin_c_SingleGroupResult_to_py(const rd_kafka_group_result_t *c_group_result_response) {
+
+        PyObject *args = NULL;
+        PyObject *kwargs = NULL;
+        PyObject *GroupResult_type = NULL;
+        PyObject *group_result = NULL;
+        const rd_kafka_topic_partition_list_t *c_topic_partition_offset_list;
+        PyObject *topic_partition_offset_list = NULL;
+
+        GroupResult_type = cfl_PyObject_lookup("confluent_kafka",
+                                               "ConsumerGroupTopicPartitions");
+        if (!GroupResult_type) {
                 return NULL;
         }
 
-        result = PyDict_New();
+        kwargs = PyDict_New();
 
-        for (ri = 0 ; ri < cnt ; ri++) {
-                const rd_kafka_ConfigResource_t *c_res = c_resources[ri];
-                const rd_kafka_ConfigEntry_t **c_configs;
-                PyObject *kwargs, *wrap;
-                PyObject *key;
-                PyObject *configs, *error;
-                size_t config_cnt;
+        cfl_PyDict_SetString(kwargs, "group_id", rd_kafka_group_result_name(c_group_result_response));
 
-                c_configs = rd_kafka_ConfigResource_configs(c_res, &config_cnt);
-                configs = Admin_c_ConfigEntries_to_py(ConfigEntry_type,
-                                                      c_configs, config_cnt);
-                if (!configs)
-                        goto err;
+        c_topic_partition_offset_list = rd_kafka_group_result_partitions(c_group_result_response);
+        if(c_topic_partition_offset_list) {
+                topic_partition_offset_list = c_parts_to_py(c_topic_partition_offset_list);
+                PyDict_SetItemString(kwargs, "topic_partitions", topic_partition_offset_list);
+        }
 
-                error = KafkaError_new_or_None(
-                        rd_kafka_ConfigResource_error(c_res),
-                        rd_kafka_ConfigResource_error_string(c_res));
+        args = PyTuple_New(0);
+        group_result = PyObject_Call(GroupResult_type, args, kwargs);
 
-                kwargs = PyDict_New();
-                cfl_PyDict_SetInt(kwargs, "restype",
-                                  (int)rd_kafka_ConfigResource_type(c_res));
-                cfl_PyDict_SetString(kwargs, "name",
-                                     rd_kafka_ConfigResource_name(c_res));
-                PyDict_SetItemString(kwargs, "described_configs", configs);
-                PyDict_SetItemString(kwargs, "error", error);
-                Py_DECREF(error);
+        Py_DECREF(args);
+        Py_DECREF(kwargs);
+        Py_DECREF(GroupResult_type);
+        Py_XDECREF(topic_partition_offset_list);
 
-                /* Instantiate ConfigResource */
-                wrap = PyTuple_New(0);
-                key = PyObject_Call(ConfigResource_type, wrap, kwargs);
-                Py_DECREF(wrap);
-                Py_DECREF(kwargs);
-                if (!key) {
-                        Py_DECREF(configs);
-                        goto err;
-                }
+        return group_result;
+}
 
-                /* Set result to dict[ConfigResource(..)] = configs | None
-                 * depending on ret_configs */
-                if (ret_configs)
-                        PyDict_SetItem(result, key, configs);
-                else
-                        PyDict_SetItem(result, key, Py_None);
 
-                Py_DECREF(configs);
-                Py_DECREF(key);
+/**
+ *
+ * @brief Convert C group result response to pyobject.
+ *
+ */
+static PyObject *
+Admin_c_GroupResults_to_py (const rd_kafka_group_result_t **c_result_responses,
+                            size_t cnt) {
+
+        size_t i;
+        PyObject *all_groups_result = NULL;
+        PyObject *single_group_result = NULL;
+
+        all_groups_result = PyList_New(cnt);
+
+        for (i = 0; i < cnt; i++) {
+                PyObject *error;
+                const rd_kafka_error_t *c_error = rd_kafka_group_result_error(c_result_responses[i]);
+
+                if (c_error) {
+                        error = KafkaError_new_or_None(
+                                rd_kafka_error_code(c_error),
+                                rd_kafka_error_string(c_error));
+                        PyList_SET_ITEM(all_groups_result, i, error);
+                } else {
+                        single_group_result =
+                                Admin_c_SingleGroupResult_to_py(c_result_responses[i]);
+                        if (!single_group_result) {
+                                Py_XDECREF(all_groups_result);
+                                return NULL;
+                        }
+                        PyList_SET_ITEM(all_groups_result, i, single_group_result);
+                }
         }
-        return result;
 
- err:
-        Py_DECREF(ConfigResource_type);
-        Py_DECREF(ConfigEntry_type);
-        Py_DECREF(result);
-        return NULL;
+        return all_groups_result;
 }
 
 
@@ -1383,6 +3041,161 @@ static void Admin_background_event_cb (rd_kafka_t *rk, rd_kafka_event_t *rkev,
                 break;
         }
 
+
+        case RD_KAFKA_EVENT_CREATEACLS_RESULT:
+        {
+                const rd_kafka_acl_result_t **c_acl_results;
+                size_t c_acl_results_cnt;
+
+                c_acl_results = rd_kafka_CreateAcls_result_acls(
+                        rd_kafka_event_CreateAcls_result(rkev),
+                        &c_acl_results_cnt
+                );
+                result = Admin_c_acl_result_to_py(
+                        c_acl_results,
+                        c_acl_results_cnt);
+                break;
+        }
+
+        case RD_KAFKA_EVENT_DESCRIBEACLS_RESULT:
+        {
+                const rd_kafka_DescribeAcls_result_t *c_acl_result;
+                const rd_kafka_AclBinding_t **c_acls;
+                size_t c_acl_cnt;
+
+                c_acl_result = rd_kafka_event_DescribeAcls_result(rkev);
+
+                c_acls = rd_kafka_DescribeAcls_result_acls(
+                        c_acl_result,
+                        &c_acl_cnt
+                );
+
+                result = Admin_c_AclBindings_to_py(c_acls,
+                                                   c_acl_cnt);
+
+                break;
+        }
+
+
+        case RD_KAFKA_EVENT_DELETEACLS_RESULT:
+        {
+                const rd_kafka_DeleteAcls_result_t *c_acl_result;
+                const rd_kafka_DeleteAcls_result_response_t **c_acl_result_responses;
+                size_t c_acl_results_cnt;
+
+                c_acl_result = rd_kafka_event_DeleteAcls_result(rkev);
+
+                c_acl_result_responses = rd_kafka_DeleteAcls_result_responses(
+                        c_acl_result,
+                        &c_acl_results_cnt
+                );
+
+                result = Admin_c_DeleteAcls_result_responses_to_py(c_acl_result_responses,
+                                                        c_acl_results_cnt);
+
+                break;
+        }
+
+        case RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT:
+        {
+                const  rd_kafka_ListConsumerGroups_result_t *c_list_consumer_groups_res;
+                const rd_kafka_ConsumerGroupListing_t **c_list_consumer_groups_valid_responses;
+                size_t c_list_consumer_groups_valid_cnt;
+                const rd_kafka_error_t **c_list_consumer_groups_errors_responses;
+                size_t c_list_consumer_groups_errors_cnt;
+
+                c_list_consumer_groups_res = rd_kafka_event_ListConsumerGroups_result(rkev);
+
+                c_list_consumer_groups_valid_responses =
+                        rd_kafka_ListConsumerGroups_result_valid(c_list_consumer_groups_res,
+                                                                 &c_list_consumer_groups_valid_cnt);
+                c_list_consumer_groups_errors_responses =
+                        rd_kafka_ListConsumerGroups_result_errors(c_list_consumer_groups_res,
+                                                                  &c_list_consumer_groups_errors_cnt);
+
+                result = Admin_c_ListConsumerGroupsResults_to_py(c_list_consumer_groups_valid_responses,
+                                                                 c_list_consumer_groups_valid_cnt,
+                                                                 c_list_consumer_groups_errors_responses,
+                                                                 c_list_consumer_groups_errors_cnt);
+
+                break;
+        }
+
+        case RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT:
+        {
+                const rd_kafka_DescribeConsumerGroups_result_t *c_describe_consumer_groups_res;
+                const rd_kafka_ConsumerGroupDescription_t **c_describe_consumer_groups_res_responses;
+                size_t c_describe_consumer_groups_res_cnt;
+
+                c_describe_consumer_groups_res = rd_kafka_event_DescribeConsumerGroups_result(rkev);
+
+                c_describe_consumer_groups_res_responses = rd_kafka_DescribeConsumerGroups_result_groups
+                                                           (c_describe_consumer_groups_res,
+                                                           &c_describe_consumer_groups_res_cnt);
+
+                result = Admin_c_DescribeConsumerGroupsResults_to_py(c_describe_consumer_groups_res_responses,
+                                                                     c_describe_consumer_groups_res_cnt);
+
+                break;
+        }
+
+        case RD_KAFKA_EVENT_DELETEGROUPS_RESULT:
+        {
+
+                const  rd_kafka_DeleteGroups_result_t *c_delete_groups_res;
+                const rd_kafka_group_result_t **c_delete_groups_res_responses;
+                size_t c_delete_groups_res_cnt;
+
+                c_delete_groups_res = rd_kafka_event_DeleteGroups_result(rkev);
+
+                c_delete_groups_res_responses =
+                        rd_kafka_DeleteConsumerGroupOffsets_result_groups(
+                            c_delete_groups_res,
+                            &c_delete_groups_res_cnt);
+
+                result = Admin_c_DeleteGroupResults_to_py(c_delete_groups_res_responses,
+                                                          c_delete_groups_res_cnt);
+
+                break;
+        }
+
+        case RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT:
+        {
+                const  rd_kafka_ListConsumerGroupOffsets_result_t *c_list_group_offset_res;
+                const rd_kafka_group_result_t **c_list_group_offset_res_responses;
+                size_t c_list_group_offset_res_cnt;
+
+                c_list_group_offset_res = rd_kafka_event_ListConsumerGroupOffsets_result(rkev);
+
+                c_list_group_offset_res_responses =
+                        rd_kafka_ListConsumerGroupOffsets_result_groups(
+                                c_list_group_offset_res,
+                                &c_list_group_offset_res_cnt);
+
+                result = Admin_c_GroupResults_to_py(c_list_group_offset_res_responses,
+                                                    c_list_group_offset_res_cnt);
+
+                break;
+        }
+
+        case RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT:
+        {
+                const  rd_kafka_AlterConsumerGroupOffsets_result_t *c_alter_group_offset_res;
+                const rd_kafka_group_result_t **c_alter_group_offset_res_responses;
+                size_t c_alter_group_offset_res_cnt;
+
+                c_alter_group_offset_res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
+
+                c_alter_group_offset_res_responses =
+                        rd_kafka_AlterConsumerGroupOffsets_result_groups(c_alter_group_offset_res,
+                                                                         &c_alter_group_offset_res_cnt);
+
+                result = Admin_c_GroupResults_to_py(c_alter_group_offset_res_responses,
+                                                    c_alter_group_offset_res_cnt);
+
+                break;
+        }
+
         default:
                 Py_DECREF(error); /* Py_None */
                 error = KafkaError_new0(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
@@ -1473,7 +3286,7 @@ static void Admin_background_event_cb (rd_kafka_t *rk, rd_kafka_event_t *rkev,
 
 static int Admin_init (PyObject *selfobj, PyObject *args, PyObject *kwargs) {
         Handle *self = (Handle *)selfobj;
-        char errstr[256];
+        char errstr[512];
         rd_kafka_conf_t *conf;
 
         if (self->rk) {
@@ -1571,7 +3384,3 @@ PyTypeObject AdminType = {
         0,                         /* tp_alloc */
         Admin_new                  /* tp_new */
 };
-
-
-
-
diff --git a/src/confluent_kafka/src/AdminTypes.c b/src/confluent_kafka/src/AdminTypes.c
index 8a0dfad..4012d51 100644
--- a/src/confluent_kafka/src/AdminTypes.c
+++ b/src/confluent_kafka/src/AdminTypes.c
@@ -74,11 +74,12 @@ static int NewTopic_init (PyObject *self0, PyObject *args,
                                "config",
                                NULL };
 
+        self->num_partitions = -1;
         self->replication_factor = -1;
         self->replica_assignment = NULL;
         self->config = NULL;
 
-        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "si|iOO", kws,
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|iiOO", kws,
                                          &topic, &self->num_partitions,
                                          &self->replication_factor,
                                          &self->replica_assignment,
@@ -86,6 +87,7 @@ static int NewTopic_init (PyObject *self0, PyObject *args,
                 return -1;
 
 
+
         if (self->config) {
                 if (!PyDict_Check(self->config)) {
                         PyErr_SetString(PyExc_TypeError,
@@ -125,7 +127,8 @@ static PyMemberDef NewTopic_members[] = {
         { "topic", T_STRING, offsetof(NewTopic, topic), READONLY,
           ":py:attribute:topic - Topic name (string)" },
         { "num_partitions", T_INT, offsetof(NewTopic, num_partitions), 0,
-          ":py:attribute: Number of partitions (int)" },
+          ":py:attribute: Number of partitions (int).\n"
+          "Or -1 if a replica_assignment is specified" },
         { "replication_factor", T_INT, offsetof(NewTopic, replication_factor),
           0,
           " :py:attribute: Replication factor (int).\n"
@@ -147,6 +150,11 @@ static PyMemberDef NewTopic_members[] = {
 
 
 static PyObject *NewTopic_str0 (NewTopic *self) {
+        if (self->num_partitions == -1) {
+                return cfl_PyUnistr(
+                _FromFormat("NewTopic(topic=%s)",
+                            self->topic));
+        }
         return cfl_PyUnistr(
                 _FromFormat("NewTopic(topic=%s,num_partitions=%d)",
                             self->topic, self->num_partitions));
@@ -202,7 +210,12 @@ NewTopic_richcompare (NewTopic *self, PyObject *o2, int op) {
 
 static long NewTopic_hash (NewTopic *self) {
         PyObject *topic = cfl_PyUnistr(_FromString(self->topic));
-        long r = PyObject_Hash(topic) ^ self->num_partitions;
+        long r;
+        if (self->num_partitions == -1) {
+                r = PyObject_Hash(topic);
+        } else {
+                r = PyObject_Hash(topic) ^ self->num_partitions;
+        }
         Py_DECREF(topic);
         return r;
 }
@@ -233,12 +246,12 @@ PyTypeObject NewTopicType = {
         "NewTopic specifies per-topic settings for passing to "
         "AdminClient.create_topics().\n"
         "\n"
-        ".. py:function:: NewTopic(topic, num_partitions, [replication_factor], [replica_assignment], [config])\n"
+        ".. py:function:: NewTopic(topic, [num_partitions], [replication_factor], [replica_assignment], [config])\n"
         "\n"
         "  Instantiate a NewTopic object.\n"
         "\n"
         "  :param string topic: Topic name\n"
-        "  :param int num_partitions: Number of partitions to create\n"
+        "  :param int num_partitions: Number of partitions to create, or -1 if replica_assignment is used.\n"
         "  :param int replication_factor: Replication factor of partitions, or -1 if replica_assignment is used.\n"
         "  :param list replica_assignment: List of lists with the replication assignment for each new partition.\n"
         "  :param dict config: Dict (str:str) of topic configuration. See http://kafka.apache.org/documentation.html#topicconfigs\n"
@@ -487,15 +500,7 @@ int AdminTypes_Ready (void) {
 }
 
 
-/**
- * @brief Add Admin types to module
- */
-void AdminTypes_AddObjects (PyObject *m) {
-        Py_INCREF(&NewTopicType);
-        PyModule_AddObject(m, "NewTopic", (PyObject *)&NewTopicType);
-        Py_INCREF(&NewPartitionsType);
-        PyModule_AddObject(m, "NewPartitions", (PyObject *)&NewPartitionsType);
-
+static void AdminTypes_AddObjectsConfigSource (PyObject *m) {
         /* rd_kafka_ConfigSource_t */
         PyModule_AddIntConstant(m, "CONFIG_SOURCE_UNKNOWN_CONFIG",
                                 RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG);
@@ -509,7 +514,10 @@ void AdminTypes_AddObjects (PyObject *m) {
                                 RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG);
         PyModule_AddIntConstant(m, "CONFIG_SOURCE_DEFAULT_CONFIG",
                                 RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG);
+}
 
+
+static void AdminTypes_AddObjectsResourceType (PyObject *m) {
         /* rd_kafka_ResourceType_t */
         PyModule_AddIntConstant(m, "RESOURCE_UNKNOWN", RD_KAFKA_RESOURCE_UNKNOWN);
         PyModule_AddIntConstant(m, "RESOURCE_ANY", RD_KAFKA_RESOURCE_ANY);
@@ -517,3 +525,64 @@ void AdminTypes_AddObjects (PyObject *m) {
         PyModule_AddIntConstant(m, "RESOURCE_GROUP", RD_KAFKA_RESOURCE_GROUP);
         PyModule_AddIntConstant(m, "RESOURCE_BROKER", RD_KAFKA_RESOURCE_BROKER);
 }
+
+static void AdminTypes_AddObjectsResourcePatternType (PyObject *m) {
+        /* rd_kafka_ResourcePatternType_t */
+        PyModule_AddIntConstant(m, "RESOURCE_PATTERN_UNKNOWN", RD_KAFKA_RESOURCE_PATTERN_UNKNOWN);
+        PyModule_AddIntConstant(m, "RESOURCE_PATTERN_ANY", RD_KAFKA_RESOURCE_PATTERN_ANY);
+        PyModule_AddIntConstant(m, "RESOURCE_PATTERN_MATCH", RD_KAFKA_RESOURCE_PATTERN_MATCH);
+        PyModule_AddIntConstant(m, "RESOURCE_PATTERN_LITERAL", RD_KAFKA_RESOURCE_PATTERN_LITERAL);
+        PyModule_AddIntConstant(m, "RESOURCE_PATTERN_PREFIXED", RD_KAFKA_RESOURCE_PATTERN_PREFIXED);
+}
+
+static void AdminTypes_AddObjectsAclOperation (PyObject *m) {
+        /* rd_kafka_AclOperation_t */
+        PyModule_AddIntConstant(m, "ACL_OPERATION_UNKNOWN", RD_KAFKA_ACL_OPERATION_UNKNOWN);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_ANY", RD_KAFKA_ACL_OPERATION_ANY);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_ALL", RD_KAFKA_ACL_OPERATION_ALL);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_READ", RD_KAFKA_ACL_OPERATION_READ);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_WRITE", RD_KAFKA_ACL_OPERATION_WRITE);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_CREATE", RD_KAFKA_ACL_OPERATION_CREATE);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_DELETE", RD_KAFKA_ACL_OPERATION_DELETE);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_ALTER", RD_KAFKA_ACL_OPERATION_ALTER);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_DESCRIBE", RD_KAFKA_ACL_OPERATION_DESCRIBE);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_CLUSTER_ACTION", RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_DESCRIBE_CONFIGS", RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_ALTER_CONFIGS", RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS);
+        PyModule_AddIntConstant(m, "ACL_OPERATION_IDEMPOTENT_WRITE", RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE);
+}
+
+static void AdminTypes_AddObjectsAclPermissionType (PyObject *m) {
+        /* rd_kafka_AclPermissionType_t */
+        PyModule_AddIntConstant(m, "ACL_PERMISSION_TYPE_UNKNOWN", RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN);
+        PyModule_AddIntConstant(m, "ACL_PERMISSION_TYPE_ANY", RD_KAFKA_ACL_PERMISSION_TYPE_ANY);
+        PyModule_AddIntConstant(m, "ACL_PERMISSION_TYPE_DENY", RD_KAFKA_ACL_PERMISSION_TYPE_DENY);
+        PyModule_AddIntConstant(m, "ACL_PERMISSION_TYPE_ALLOW", RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW);
+}
+
+static void AdminTypes_AddObjectsConsumerGroupStates (PyObject *m) {
+        /* rd_kafka_consumer_group_state_t */
+        PyModule_AddIntConstant(m, "CONSUMER_GROUP_STATE_UNKNOWN", RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN);
+        PyModule_AddIntConstant(m, "CONSUMER_GROUP_STATE_PREPARING_REBALANCE", RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE);
+        PyModule_AddIntConstant(m, "CONSUMER_GROUP_STATE_COMPLETING_REBALANCE", RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE);
+        PyModule_AddIntConstant(m, "CONSUMER_GROUP_STATE_STABLE", RD_KAFKA_CONSUMER_GROUP_STATE_STABLE);
+        PyModule_AddIntConstant(m, "CONSUMER_GROUP_STATE_DEAD", RD_KAFKA_CONSUMER_GROUP_STATE_DEAD);
+        PyModule_AddIntConstant(m, "CONSUMER_GROUP_STATE_EMPTY", RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY);
+}
+
+/**
+ * @brief Add Admin types to module
+ */
+void AdminTypes_AddObjects (PyObject *m) {
+        Py_INCREF(&NewTopicType);
+        PyModule_AddObject(m, "NewTopic", (PyObject *)&NewTopicType);
+        Py_INCREF(&NewPartitionsType);
+        PyModule_AddObject(m, "NewPartitions", (PyObject *)&NewPartitionsType);
+
+        AdminTypes_AddObjectsConfigSource(m);
+        AdminTypes_AddObjectsResourceType(m);
+        AdminTypes_AddObjectsResourcePatternType(m);
+        AdminTypes_AddObjectsAclOperation(m);
+        AdminTypes_AddObjectsAclPermissionType(m);
+        AdminTypes_AddObjectsConsumerGroupStates(m);
+}
diff --git a/src/confluent_kafka/src/Consumer.c b/src/confluent_kafka/src/Consumer.c
index 0a8fe78..de574be 100644
--- a/src/confluent_kafka/src/Consumer.c
+++ b/src/confluent_kafka/src/Consumer.c
@@ -486,6 +486,7 @@ static PyObject *Consumer_commit (Handle *self, PyObject *args,
 	} else if (msg) {
 		Message *m;
                 PyObject *uo8;
+                rd_kafka_topic_partition_t *rktpar;
 
 		if (PyObject_Type((PyObject *)msg) !=
 		    (PyObject *)&MessageType) {
@@ -497,9 +498,12 @@ static PyObject *Consumer_commit (Handle *self, PyObject *args,
 		m = (Message *)msg;
 
 		c_offsets = rd_kafka_topic_partition_list_new(1);
-		rd_kafka_topic_partition_list_add(
-			c_offsets, cfl_PyUnistr_AsUTF8(m->topic, &uo8),
-			m->partition)->offset =m->offset + 1;
+		rktpar = rd_kafka_topic_partition_list_add(
+			        c_offsets, cfl_PyUnistr_AsUTF8(m->topic, &uo8),
+			        m->partition);
+                rktpar->offset =m->offset + 1;
+                rd_kafka_topic_partition_set_leader_epoch(rktpar,
+                        m->leader_epoch);
                 Py_XDECREF(uo8);
 
 	} else {
@@ -612,6 +616,7 @@ static PyObject *Consumer_store_offsets (Handle *self, PyObject *args,
 	} else {
 		Message *m;
 		PyObject *uo8;
+                rd_kafka_topic_partition_t *rktpar;
 
 		if (PyObject_Type((PyObject *)msg) !=
 		    (PyObject *)&MessageType) {
@@ -623,9 +628,12 @@ static PyObject *Consumer_store_offsets (Handle *self, PyObject *args,
 		m = (Message *)msg;
 
 		c_offsets = rd_kafka_topic_partition_list_new(1);
-		rd_kafka_topic_partition_list_add(
+		rktpar = rd_kafka_topic_partition_list_add(
 			c_offsets, cfl_PyUnistr_AsUTF8(m->topic, &uo8),
-			m->partition)->offset = m->offset + 1;
+			m->partition);
+                rktpar->offset = m->offset + 1;
+                rd_kafka_topic_partition_set_leader_epoch(rktpar,
+                        m->leader_epoch);
 		Py_XDECREF(uo8);
 	}
 
@@ -783,9 +791,11 @@ static PyObject *Consumer_resume (Handle *self, PyObject *args,
 static PyObject *Consumer_seek (Handle *self, PyObject *args, PyObject *kwargs) {
 
         TopicPartition *tp;
-        rd_kafka_resp_err_t err;
+        rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
         static char *kws[] = { "partition", NULL };
-        rd_kafka_topic_t *rkt;
+        rd_kafka_topic_partition_list_t *seek_partitions;
+        rd_kafka_topic_partition_t *rktpar;
+        rd_kafka_error_t *error;
 
         if (!self->rk) {
                 PyErr_SetString(PyExc_RuntimeError, "Consumer closed");
@@ -803,21 +813,26 @@ static PyObject *Consumer_seek (Handle *self, PyObject *args, PyObject *kwargs)
                 return NULL;
         }
 
-        rkt = rd_kafka_topic_new(self->rk, tp->topic, NULL);
-        if (!rkt) {
-                cfl_PyErr_Format(rd_kafka_last_error(),
-                                 "Failed to get topic object for "
-                                 "topic \"%s\": %s",
-                                 tp->topic,
-                                 rd_kafka_err2str(rd_kafka_last_error()));
-                return NULL;
-        }
+        seek_partitions = rd_kafka_topic_partition_list_new(1);
+        rktpar = rd_kafka_topic_partition_list_add(seek_partitions,
+                        tp->topic, tp->partition);
+        rktpar->offset = tp->offset;
+        rd_kafka_topic_partition_set_leader_epoch(rktpar, tp->leader_epoch);
 
         Py_BEGIN_ALLOW_THREADS;
-        err = rd_kafka_seek(rkt, tp->partition, tp->offset, -1);
+        error = rd_kafka_seek_partitions(self->rk, seek_partitions, -1);
         Py_END_ALLOW_THREADS;
 
-        rd_kafka_topic_destroy(rkt);
+        if (error) {
+                err = rd_kafka_error_code(error);
+                rd_kafka_error_destroy(error);
+        }
+
+        if (!err && seek_partitions->elems[0].err) {
+                err = seek_partitions->elems[0].err;
+        }
+
+        rd_kafka_topic_partition_list_destroy(seek_partitions);
 
         if (err) {
                 cfl_PyErr_Format(err,
@@ -970,8 +985,8 @@ static PyObject *Consumer_poll (Handle *self, PyObject *args,
 
         msgobj = Message_new0(self, rkm);
 #ifdef RD_KAFKA_V_HEADERS
-        // Have to detach headers outside Message_new0 because it declares the
-        // rk message as a const
+        /** Have to detach headers outside Message_new0 because it declares the
+          * rk message as a const */
         rd_kafka_message_detach_headers(rkm, &((Message *)msgobj)->c_headers);
 #endif
         rd_kafka_message_destroy(rkm);
@@ -980,6 +995,33 @@ static PyObject *Consumer_poll (Handle *self, PyObject *args,
 }
 
 
+static PyObject *Consumer_memberid (Handle *self, PyObject *args,
+                                    PyObject *kwargs) {
+        char *memberid;
+        PyObject *memberidobj;
+        if (!self->rk) {
+                PyErr_SetString(PyExc_RuntimeError,
+                                "Consumer closed");
+                return NULL;
+        }
+
+        memberid = rd_kafka_memberid(self->rk);
+
+        if (!memberid)
+                Py_RETURN_NONE;
+
+        if (!*memberid) {
+                rd_kafka_mem_free(self->rk, memberid);
+                Py_RETURN_NONE;
+        }
+
+        memberidobj = Py_BuildValue("s", memberid);
+        rd_kafka_mem_free(self->rk, memberid);
+
+        return memberidobj;
+}
+
+
 static PyObject *Consumer_consume (Handle *self, PyObject *args,
                                         PyObject *kwargs) {
         unsigned int num_messages = 1;
@@ -1035,8 +1077,8 @@ static PyObject *Consumer_consume (Handle *self, PyObject *args,
         for (i = 0; i < n; i++) {
                 PyObject *msgobj = Message_new0(self, rkmessages[i]);
 #ifdef RD_KAFKA_V_HEADERS
-                // Have to detach headers outside Message_new0 because it declares the
-                // rk message as a const
+                /** Have to detach headers outside Message_new0 because it declares the
+                  * rk message as a const */
                 rd_kafka_message_detach_headers(rkmessages[i], &((Message *)msgobj)->c_headers);
 #endif
                 PyList_SET_ITEM(msglist, i, msgobj);
@@ -1409,6 +1451,19 @@ static PyMethodDef Consumer_methods[] = {
           "  :raises: RuntimeError if called on a closed consumer\n"
           "\n"
         },
+        { "memberid", (PyCFunction)Consumer_memberid, METH_NOARGS,
+          ".. py:function:: memberid()\n"
+          "\n"
+          " Return this client's broker-assigned group member id.\n"
+          "\n"
+          " The member id is assigned by the group coordinator and"
+          " is propagated to the consumer during rebalance.\n"
+          "\n"
+          "  :returns: Member id string or None\n"
+          "  :rtype: string\n"
+          "  :raises: RuntimeError if called on a closed consumer\n"
+          "\n"
+        },
 	{ "close", (PyCFunction)Consumer_close, METH_NOARGS,
 	  "\n"
 	  "  Close down and terminate the Kafka Consumer.\n"
@@ -1437,6 +1492,9 @@ static PyMethodDef Consumer_methods[] = {
           "send_offsets_to_transaction() API.\n"
           "\n"
         },
+        { "set_sasl_credentials", (PyCFunction)set_sasl_credentials, METH_VARARGS|METH_KEYWORDS,
+           set_sasl_credentials_doc
+        },
 
 
 	{ NULL }
diff --git a/src/confluent_kafka/src/Metadata.c b/src/confluent_kafka/src/Metadata.c
index e35461d..31e1db9 100644
--- a/src/confluent_kafka/src/Metadata.c
+++ b/src/confluent_kafka/src/Metadata.c
@@ -595,6 +595,11 @@ list_groups (Handle *self, PyObject *args, PyObject *kwargs) {
         double tmout = -1.0f;
         static char *kws[] = {"group", "timeout", NULL};
 
+        PyErr_WarnEx(PyExc_DeprecationWarning,
+                     "list_groups() is deprecated, use list_consumer_groups() "
+                     "and describe_consumer_groups() instead.",
+                     2);
+
         if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|zd", kws,
                                          &group, &tmout))
                 return NULL;
@@ -625,6 +630,9 @@ end:
 }
 
 const char list_groups_doc[] = PyDoc_STR(
+        ".. deprecated:: 2.0.2"
+        "   Use :func:`list_consumer_groups` and `describe_consumer_groups` instead."
+        "\n"
         ".. py:function:: list_groups([group=None], [timeout=-1])\n"
         "\n"
         " Request Group Metadata from cluster.\n"
diff --git a/src/confluent_kafka/src/Producer.c b/src/confluent_kafka/src/Producer.c
index 2026593..b6a51f5 100644
--- a/src/confluent_kafka/src/Producer.c
+++ b/src/confluent_kafka/src/Producer.c
@@ -542,7 +542,7 @@ static void *Producer_purge (Handle *self, PyObject *args,
                 return NULL;
         if (in_queue)
                 purge_strategy = RD_KAFKA_PURGE_F_QUEUE;
-        if (in_flight) 
+        if (in_flight)
                 purge_strategy |= RD_KAFKA_PURGE_F_INFLIGHT;
         if (blocking)
                 purge_strategy |= RD_KAFKA_PURGE_F_NON_BLOCKING;
@@ -584,7 +584,7 @@ static PyMethodDef Producer_methods[] = {
 	  "failed delivery\n"
           "  :param int timestamp: Message timestamp (CreateTime) in milliseconds since epoch UTC (requires librdkafka >= v0.9.4, api.version.request=true, and broker >= 0.10.0.0). Default value is current time.\n"
 	  "\n"
-          "  :param headers dict|list: Message headers to set on the message. The header key must be a string while the value must be binary, unicode or None. Accepts a list of (key,value) or a dict. (Requires librdkafka >= v0.11.4 and broker version >= 0.11.0.0)\n"
+          "  :param dict|list headers: Message headers to set on the message. The header key must be a string while the value must be binary, unicode or None. Accepts a list of (key,value) or a dict. (Requires librdkafka >= v0.11.4 and broker version >= 0.11.0.0)\n"
 	  "  :rtype: None\n"
 	  "  :raises BufferError: if the internal producer message queue is "
 	  "full (``queue.buffering.max.messages`` exceeded)\n"
@@ -665,7 +665,7 @@ static PyMethodDef Producer_methods[] = {
           "\n"
           "  Upon successful return from this function the application has to\n"
           "  perform at least one of the following operations within \n"
-          "  `transactional.timeout.ms` to avoid timing out the transaction\n"
+          "  `transaction.timeout.ms` to avoid timing out the transaction\n"
           "  on the broker:\n"
           "  * produce() (et.al)\n"
           "  * send_offsets_to_transaction()\n"
@@ -811,6 +811,9 @@ static PyMethodDef Producer_methods[] = {
           "           Treat any other error as a fatal error.\n"
           "\n"
         },
+        { "set_sasl_credentials", (PyCFunction)set_sasl_credentials, METH_VARARGS|METH_KEYWORDS,
+           set_sasl_credentials_doc
+        },
         { NULL }
 };
 
@@ -824,6 +827,23 @@ static PySequenceMethods Producer_seq_methods = {
 	(lenfunc)Producer__len__ /* sq_length */
 };
 
+static int Producer__bool__ (Handle *self) {
+        return 1;
+}
+
+static PyNumberMethods Producer_num_methods = {
+     0, // nb_add
+     0, // nb_subtract
+     0, // nb_multiply
+     0, // nb_remainder
+     0, // nb_divmod
+     0, // nb_power
+     0, // nb_negative
+     0, // nb_positive
+     0, // nb_absolute
+     (inquiry)Producer__bool__ // nb_bool
+};
+
 
 static int Producer_init (PyObject *selfobj, PyObject *args, PyObject *kwargs) {
         Handle *self = (Handle *)selfobj;
@@ -879,8 +899,8 @@ PyTypeObject ProducerType = {
 	0,                         /*tp_setattr*/
 	0,                         /*tp_compare*/
 	0,                         /*tp_repr*/
-	0,                         /*tp_as_number*/
-	&Producer_seq_methods,  /*tp_as_sequence*/
+	&Producer_num_methods,     /*tp_as_number*/
+	&Producer_seq_methods,     /*tp_as_sequence*/
 	0,                         /*tp_as_mapping*/
 	0,                         /*tp_hash */
 	0,                         /*tp_call*/
@@ -899,8 +919,9 @@ PyTypeObject ProducerType = {
         "  Create a new Producer instance using the provided configuration dict.\n"
         "\n"
         "\n"
-        ".. py:function:: len()\n"
+        ".. py:function:: __len__(self)\n"
         "\n"
+	"  Producer implements __len__ that can be used as len(producer) to obtain number of messages waiting.\n"
         "  :returns: Number of messages and Kafka protocol requests waiting to be delivered to broker.\n"
         "  :rtype: int\n"
         "\n", /*tp_doc*/
@@ -922,7 +943,3 @@ PyTypeObject ProducerType = {
 	0,                         /* tp_alloc */
 	Producer_new           /* tp_new */
 };
-
-
-
-
diff --git a/src/confluent_kafka/src/confluent_kafka.c b/src/confluent_kafka/src/confluent_kafka.c
index bcd2939..4787404 100644
--- a/src/confluent_kafka/src/confluent_kafka.c
+++ b/src/confluent_kafka/src/confluent_kafka.c
@@ -258,13 +258,13 @@ static void KafkaError_init (KafkaError *self,
                 self->str = NULL;
 }
 
-static int KafkaError_init0 (PyObject *selfobj, PyObject *args, 
+static int KafkaError_init0 (PyObject *selfobj, PyObject *args,
                              PyObject *kwargs) {
         KafkaError *self = (KafkaError *)selfobj;
         int code;
         int fatal = 0, retriable = 0, txn_requires_abort = 0;
         const char *reason = NULL;
-        static char *kws[] = { "error", "reason", "fatal", 
+        static char *kws[] = { "error", "reason", "fatal",
                                "retriable", "txn_requires_abort", NULL };
 
         if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|ziii", kws, &code,
@@ -476,6 +476,13 @@ static PyObject *Message_offset (Message *self, PyObject *ignore) {
 		Py_RETURN_NONE;
 }
 
+static PyObject *Message_leader_epoch (Message *self, PyObject *ignore) {
+	if (self->leader_epoch >= 0)
+		return cfl_PyInt_FromInt(self->leader_epoch);
+	else
+		Py_RETURN_NONE;
+}
+
 
 static PyObject *Message_timestamp (Message *self, PyObject *ignore) {
 	return Py_BuildValue("iL",
@@ -571,6 +578,11 @@ static PyMethodDef Message_methods[] = {
 	  "  :rtype: int or None\n"
 	  "\n"
 	},
+        { "leader_epoch", (PyCFunction)Message_leader_epoch, METH_NOARGS,
+	  "  :returns: message offset leader epoch or None if not available.\n"
+	  "  :rtype: int or None\n"
+	  "\n"
+	},
 	{ "timestamp", (PyCFunction)Message_timestamp, METH_NOARGS,
           "Retrieve timestamp type and timestamp from message.\n"
           "The timestamp type is one of:\n\n"
@@ -743,7 +755,7 @@ PyTypeObject MessageType = {
 	0,		           /* tp_weaklistoffset */
 	0,		           /* tp_iter */
 	0,		           /* tp_iternext */
-	Message_methods,       /* tp_methods */
+	Message_methods,           /* tp_methods */
 	0,                         /* tp_members */
 	0,                         /* tp_getset */
 	0,                         /* tp_base */
@@ -784,6 +796,7 @@ PyObject *Message_new0 (const Handle *handle, const rd_kafka_message_t *rkm) {
 
 	self->partition = rkm->partition;
 	self->offset = rkm->offset;
+        self->leader_epoch = rd_kafka_message_leader_epoch(rkm);
 
 	self->timestamp = rd_kafka_message_timestamp(rkm, &self->tstype);
 
@@ -816,15 +829,32 @@ static int TopicPartition_clear (TopicPartition *self) {
 		Py_DECREF(self->error);
 		self->error = NULL;
 	}
+	if (self->metadata) {
+		free(self->metadata);
+		self->metadata = NULL;
+	}
 	return 0;
 }
 
 static void TopicPartition_setup (TopicPartition *self, const char *topic,
 				  int partition, long long offset,
+                                  int32_t leader_epoch,
+				  const char *metadata,
 				  rd_kafka_resp_err_t err) {
 	self->topic = strdup(topic);
 	self->partition = partition;
 	self->offset = offset;
+
+        if (leader_epoch < 0)
+                leader_epoch = -1;
+        self->leader_epoch = leader_epoch;
+
+	if (metadata != NULL) {
+		self->metadata = strdup(metadata);
+	} else {
+		self->metadata = NULL;
+	}
+
 	self->error = KafkaError_new_or_None(err, NULL);
 }
 
@@ -842,19 +872,27 @@ static int TopicPartition_init (PyObject *self, PyObject *args,
 				      PyObject *kwargs) {
 	const char *topic;
 	int partition = RD_KAFKA_PARTITION_UA;
+        int32_t leader_epoch = -1;
 	long long offset = RD_KAFKA_OFFSET_INVALID;
+	const char *metadata = NULL;
+
 	static char *kws[] = { "topic",
 			       "partition",
 			       "offset",
+			       "metadata",
+                               "leader_epoch",
 			       NULL };
 
-	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|iL", kws,
-					 &topic, &partition, &offset))
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|iLsi", kws,
+					 &topic, &partition, &offset,
+					 &metadata,
+                                         &leader_epoch)) {
 		return -1;
+	}
 
 	TopicPartition_setup((TopicPartition *)self,
-			     topic, partition, offset, 0);
-
+			     topic, partition, offset,
+                             leader_epoch, metadata, 0);
 	return 0;
 }
 
@@ -874,6 +912,13 @@ static int TopicPartition_traverse (TopicPartition *self,
 	return 0;
 }
 
+static PyObject *TopicPartition_get_leader_epoch (TopicPartition *tp, void *closure) {
+        if (tp->leader_epoch >= 0) {
+                return cfl_PyInt_FromInt(tp->leader_epoch);
+        }
+        Py_RETURN_NONE;
+}
+
 
 static PyMemberDef TopicPartition_members[] = {
         { "topic", T_STRING, offsetof(TopicPartition, topic), READONLY,
@@ -889,11 +934,28 @@ static PyMemberDef TopicPartition_members[] = {
           " :py:const:`OFFSET_STORED`,"
           " :py:const:`OFFSET_INVALID`\n"
         },
+        {"metadata", T_STRING, offsetof(TopicPartition, metadata), READONLY,
+         "attribute metadata: Optional application metadata committed with the "
+         "offset (string)"},
         { "error", T_OBJECT, offsetof(TopicPartition, error), READONLY,
           ":attribute error: Indicates an error (with :py:class:`KafkaError`) unless None." },
         { NULL }
 };
 
+static PyGetSetDef TopicPartition_getters_and_setters[] = {
+        {
+          /* name */
+          "leader_epoch",
+          (getter) TopicPartition_get_leader_epoch,
+          NULL,
+          /* doc */
+          ":attribute leader_epoch: Offset leader epoch (int), or None",
+          /* closure */
+          NULL
+        },
+        { NULL }
+};
+
 
 static PyObject *TopicPartition_str0 (TopicPartition *self) {
         PyObject *errstr = NULL;
@@ -901,8 +963,15 @@ static PyObject *TopicPartition_str0 (TopicPartition *self) {
         const char *c_errstr = NULL;
 	PyObject *ret;
 	char offset_str[40];
+        char leader_epoch_str[12];
 
 	snprintf(offset_str, sizeof(offset_str), "%"CFL_PRId64"", self->offset);
+        if (self->leader_epoch >= 0)
+                snprintf(leader_epoch_str, sizeof(leader_epoch_str),
+                        "%"CFL_PRId32"", self->leader_epoch);
+        else
+                snprintf(leader_epoch_str, sizeof(leader_epoch_str),
+                        "None");
 
         if (self->error != Py_None) {
                 errstr = cfl_PyObject_Unistr(self->error);
@@ -911,9 +980,10 @@ static PyObject *TopicPartition_str0 (TopicPartition *self) {
 
 	ret = cfl_PyUnistr(
 		_FromFormat("TopicPartition{topic=%s,partition=%"CFL_PRId32
-			    ",offset=%s,error=%s}",
+			    ",offset=%s,leader_epoch=%s,error=%s}",
 			    self->topic, self->partition,
 			    offset_str,
+                            leader_epoch_str,
 			    c_errstr ? c_errstr : "None"));
         Py_XDECREF(errstr8);
         Py_XDECREF(errstr);
@@ -1005,47 +1075,53 @@ PyTypeObject TopicPartitionType = {
 	"It is typically used to provide a list of topics or partitions for "
 	"various operations, such as :py:func:`Consumer.assign()`.\n"
 	"\n"
-	".. py:function:: TopicPartition(topic, [partition], [offset])\n"
+	".. py:function:: TopicPartition(topic, [partition], [offset],"
+        " [metadata], [leader_epoch])\n"
 	"\n"
 	"  Instantiate a TopicPartition object.\n"
 	"\n"
 	"  :param string topic: Topic name\n"
 	"  :param int partition: Partition id\n"
 	"  :param int offset: Initial partition offset\n"
+        "  :param string metadata: Offset metadata\n"
+        "  :param int leader_epoch: Offset leader epoch\n"
 	"  :rtype: TopicPartition\n"
 	"\n"
 	"\n", /*tp_doc*/
 	(traverseproc)TopicPartition_traverse, /* tp_traverse */
 	(inquiry)TopicPartition_clear,       /* tp_clear */
 	(richcmpfunc)TopicPartition_richcompare, /* tp_richcompare */
-	0,		           /* tp_weaklistoffset */
-	0,		           /* tp_iter */
-	0,		           /* tp_iternext */
-	0,                         /* tp_methods */
-	TopicPartition_members,/* tp_members */
-	0,                         /* tp_getset */
-	0,                         /* tp_base */
-	0,                         /* tp_dict */
-	0,                         /* tp_descr_get */
-	0,                         /* tp_descr_set */
-	0,                         /* tp_dictoffset */
-	TopicPartition_init,       /* tp_init */
-	0,                         /* tp_alloc */
-	TopicPartition_new         /* tp_new */
+	0,		                    /* tp_weaklistoffset */
+	0,		                    /* tp_iter */
+	0,		                    /* tp_iternext */
+	0,                                  /* tp_methods */
+	TopicPartition_members,             /* tp_members */
+	TopicPartition_getters_and_setters, /* tp_getset */
+	0,                                  /* tp_base */
+	0,                                  /* tp_dict */
+	0,                                  /* tp_descr_get */
+	0,                                  /* tp_descr_set */
+	0,                                  /* tp_dictoffset */
+	TopicPartition_init,                /* tp_init */
+	0,                                  /* tp_alloc */
+	TopicPartition_new                  /* tp_new */
 };
 
 /**
  * @brief Internal factory to create a TopicPartition object.
  */
 static PyObject *TopicPartition_new0 (const char *topic, int partition,
-				      long long offset,
+				      long long offset, int32_t leader_epoch,
+                                      const char *metadata,
 				      rd_kafka_resp_err_t err) {
 	TopicPartition *self;
 
 	self = (TopicPartition *)TopicPartitionType.tp_new(
 		&TopicPartitionType, NULL, NULL);
 
-	TopicPartition_setup(self, topic, partition, offset, err);
+	TopicPartition_setup(self, topic, partition,
+			     offset, leader_epoch,
+                             metadata, err);
 
 	return (PyObject *)self;
 }
@@ -1069,7 +1145,10 @@ PyObject *c_parts_to_py (const rd_kafka_topic_partition_list_t *c_parts) {
 		PyList_SET_ITEM(parts, i,
 				TopicPartition_new0(
 					rktpar->topic, rktpar->partition,
-					rktpar->offset, rktpar->err));
+					rktpar->offset,
+                                        rd_kafka_topic_partition_get_leader_epoch(rktpar),
+					rktpar->metadata,
+					rktpar->err));
 	}
 
 	return parts;
@@ -1094,6 +1173,7 @@ rd_kafka_topic_partition_list_t *py_to_c_parts (PyObject *plist) {
 	c_parts = rd_kafka_topic_partition_list_new((int)PyList_Size(plist));
 
 	for (i = 0 ; i < (size_t)PyList_Size(plist) ; i++) {
+		rd_kafka_topic_partition_t *rktpar;
 		TopicPartition *tp = (TopicPartition *)
 			PyList_GetItem(plist, i);
 
@@ -1106,10 +1186,19 @@ rd_kafka_topic_partition_list_t *py_to_c_parts (PyObject *plist) {
 			return NULL;
 		}
 
-		rd_kafka_topic_partition_list_add(c_parts,
-						  tp->topic,
-						  tp->partition)->offset =
-			tp->offset;
+		rktpar = rd_kafka_topic_partition_list_add(c_parts,
+							   tp->topic,
+							   tp->partition);
+		rktpar->offset = tp->offset;
+                rd_kafka_topic_partition_set_leader_epoch(rktpar,
+                        tp->leader_epoch);
+		if (tp->metadata != NULL) {
+			rktpar->metadata_size = strlen(tp->metadata) + 1;
+			rktpar->metadata = strdup(tp->metadata);
+		} else {
+			rktpar->metadata_size = 0;
+			rktpar->metadata = NULL;
+		}
 	}
 
 	return c_parts;
@@ -1288,7 +1377,7 @@ PyObject *c_headers_to_py (rd_kafka_headers_t *headers) {
 
     while (!rd_kafka_header_get_all(headers, idx++,
                                      &header_key, &header_value, &header_value_size)) {
-            // Create one (key, value) tuple for each header
+            /* Create one (key, value) tuple for each header */
             PyObject *header_tuple = PyTuple_New(2);
             PyTuple_SetItem(header_tuple, 0,
                 cfl_PyUnistr(_FromString(header_key))
@@ -1299,6 +1388,7 @@ PyObject *c_headers_to_py (rd_kafka_headers_t *headers) {
                         cfl_PyBin(_FromStringAndSize(header_value, header_value_size))
                     );
             } else {
+                Py_INCREF(Py_None);
                 PyTuple_SetItem(header_tuple, 1, Py_None);
             }
         PyList_SET_ITEM(header_list, idx-1, header_tuple);
@@ -1357,6 +1447,40 @@ rd_kafka_consumer_group_metadata_t *py_to_c_cgmd (PyObject *obj) {
         return cgmd;
 }
 
+PyObject *c_Node_to_py(const rd_kafka_Node_t *c_node) {
+        PyObject *node = NULL;
+        PyObject *Node_type = NULL;
+        PyObject *args = NULL;
+        PyObject *kwargs = NULL;
+
+        Node_type = cfl_PyObject_lookup("confluent_kafka",
+                                        "Node");
+        if (!Node_type) {
+                goto err;
+        }
+
+        kwargs = PyDict_New();
+
+        cfl_PyDict_SetInt(kwargs, "id", rd_kafka_Node_id(c_node));
+        cfl_PyDict_SetInt(kwargs, "port", rd_kafka_Node_port(c_node));
+        cfl_PyDict_SetString(kwargs, "host", rd_kafka_Node_host(c_node));
+
+        args = PyTuple_New(0);
+
+        node = PyObject_Call(Node_type, args, kwargs);
+
+        Py_DECREF(Node_type);
+        Py_DECREF(args);
+        Py_DECREF(kwargs);
+        return node;
+
+err:
+        Py_XDECREF(Node_type);
+        Py_XDECREF(args);
+        Py_XDECREF(kwargs);
+        return NULL;
+}
+
 
 /****************************************************************************
  *
@@ -1521,6 +1645,62 @@ static void log_cb (const rd_kafka_t *rk, int level,
         CallState_resume(cs);
 }
 
+/**
+ * @brief Translate Python \p key and \p value to C types and set on
+ *        provided \p extensions char* array at the provided index.
+ *
+ * @returns 1 on success or 0 if an exception was raised.
+ */
+static int py_extensions_to_c (char **extensions, Py_ssize_t idx,
+                               PyObject *key, PyObject *value) {
+        PyObject *ks, *ks8, *vo8 = NULL;
+        const char *k;
+        const char *v;
+        Py_ssize_t ksize = 0;
+        Py_ssize_t vsize = 0;
+
+        if (!(ks = cfl_PyObject_Unistr(key))) {
+                PyErr_SetString(PyExc_TypeError,
+                                "expected extension key to be unicode "
+                                "string");
+                return 0;
+        }
+
+        k = cfl_PyUnistr_AsUTF8(ks, &ks8);
+        ksize = (Py_ssize_t)strlen(k);
+
+        if (cfl_PyUnistr(_Check(value))) {
+                /* Unicode string, translate to utf-8. */
+                v = cfl_PyUnistr_AsUTF8(value, &vo8);
+                if (!v) {
+                        Py_DECREF(ks);
+                        Py_XDECREF(ks8);
+                        return 0;
+                }
+                vsize = (Py_ssize_t)strlen(v);
+        } else {
+                PyErr_Format(PyExc_TypeError,
+                             "expected extension value to be "
+                             "unicode string, not %s",
+                             ((PyTypeObject *)PyObject_Type(value))->
+                             tp_name);
+                Py_DECREF(ks);
+                Py_XDECREF(ks8);
+                return 0;
+        }
+
+        extensions[idx] = (char*)malloc(ksize);
+        strcpy(extensions[idx], k);
+        extensions[idx + 1] = (char*)malloc(vsize);
+        strcpy(extensions[idx + 1], v);
+
+        Py_DECREF(ks);
+        Py_XDECREF(ks8);
+        Py_XDECREF(vo8);
+
+        return 1;
+}
+
 static void oauth_cb (rd_kafka_t *rk, const char *oauthbearer_config,
                       void *opaque) {
         Handle *h = opaque;
@@ -1528,6 +1708,10 @@ static void oauth_cb (rd_kafka_t *rk, const char *oauthbearer_config,
         CallState *cs;
         const char *token;
         double expiry;
+        const char *principal = "";
+        PyObject *extensions = NULL;
+        char **rd_extensions = NULL;
+        Py_ssize_t rd_extensions_size = 0;
         char err_msg[2048];
         rd_kafka_resp_err_t err_code;
 
@@ -1538,26 +1722,58 @@ static void oauth_cb (rd_kafka_t *rk, const char *oauthbearer_config,
         Py_DECREF(eo);
 
         if (!result) {
-                goto err;
+                goto fail;
         }
-        if (!PyArg_ParseTuple(result, "sd", &token, &expiry)) {
+        if (!PyArg_ParseTuple(result, "sd|sO!", &token, &expiry, &principal, &PyDict_Type, &extensions)) {
                 Py_DECREF(result);
-                PyErr_Format(PyExc_TypeError,
+                PyErr_SetString(PyExc_TypeError,
                              "expect returned value from oauth_cb "
-                             "to be (token_str, expiry_time) tuple");
+                             "to be (token_str, expiry_time[, principal, extensions]) tuple");
                 goto err;
         }
+
+        if (extensions) {
+                int len = (int)PyDict_Size(extensions);
+                rd_extensions = (char **)malloc(2 * len * sizeof(char *));
+                Py_ssize_t pos = 0;
+                PyObject *ko, *vo;
+                while (PyDict_Next(extensions, &pos, &ko, &vo)) {
+                        if (!py_extensions_to_c(rd_extensions, rd_extensions_size, ko, vo)) {
+                                Py_DECREF(result);
+                                free(rd_extensions);
+                                goto err;
+                        }
+                        rd_extensions_size = rd_extensions_size + 2;
+                }
+        }
+
         err_code = rd_kafka_oauthbearer_set_token(h->rk, token,
                                                   (int64_t)(expiry * 1000),
-                                                  "", NULL, 0, err_msg,
+                                                  principal, (const char **)rd_extensions, rd_extensions_size, err_msg,
                                                   sizeof(err_msg));
         Py_DECREF(result);
-        if (err_code) {
+        if (rd_extensions) {
+                int i;
+                for(i = 0; i < rd_extensions_size; i++) {
+                        free(rd_extensions[i]);
+                }
+                free(rd_extensions);
+        }
+
+        if (err_code != RD_KAFKA_RESP_ERR_NO_ERROR) {
                 PyErr_Format(PyExc_ValueError, "%s", err_msg);
-                goto err;
+                goto fail;
         }
         goto done;
 
+fail:
+        err_code = rd_kafka_oauthbearer_set_token_failure(h->rk, "OAuth callback raised exception");
+        if (err_code != RD_KAFKA_RESP_ERR_NO_ERROR) {
+                PyErr_SetString(PyExc_ValueError, "Failed to set token failure");
+                goto err;
+        }
+        PyErr_Clear();
+        goto done;
  err:
         CallState_crash(cs);
         rd_kafka_yield(h->rk);
@@ -1848,7 +2064,7 @@ rd_kafka_conf_t *common_conf_setup (rd_kafka_type_t ktype,
                 PyDict_Update(confdict, kwargs);
         }
 
-        if (ktype == RD_KAFKA_CONSUMER && 
+        if (ktype == RD_KAFKA_CONSUMER &&
                 !PyDict_GetItemString(confdict, "group.id")) {
 
                 PyErr_SetString(PyExc_ValueError,
@@ -2225,6 +2441,11 @@ void cfl_PyDict_SetInt (PyObject *dict, const char *name, int val) {
         Py_DECREF(vo);
 }
 
+void cfl_PyDict_SetLong (PyObject *dict, const char *name, long val) {
+        PyObject *vo = cfl_PyLong_FromLong(val);
+        PyDict_SetItemString(dict, name, vo);
+        Py_DECREF(vo);
+}
 
 int cfl_PyObject_SetString (PyObject *o, const char *name, const char *val) {
         PyObject *vo = cfl_PyUnistr(_FromString(val));
@@ -2254,7 +2475,7 @@ int cfl_PyObject_SetInt (PyObject *o, const char *name, int val) {
  */
 int cfl_PyObject_GetAttr (PyObject *object, const char *attr_name,
                           PyObject **valp, const PyTypeObject *py_type,
-                          int required) {
+                          int required, int allow_None) {
         PyObject *o;
 
         o = PyObject_GetAttrString(object, attr_name);
@@ -2269,7 +2490,7 @@ int cfl_PyObject_GetAttr (PyObject *object, const char *attr_name,
                 return 0;
         }
 
-        if (py_type && Py_TYPE(o) != py_type) {
+        if (!(allow_None && o == Py_None) && py_type && Py_TYPE(o) != py_type) {
                 Py_DECREF(o);
                 PyErr_Format(PyExc_TypeError,
                              "Expected .%s to be %s type, not %s",
@@ -2300,7 +2521,7 @@ int cfl_PyObject_GetInt (PyObject *object, const char *attr_name, int *valp,
 #else
                                   &PyInt_Type,
 #endif
-                                  required))
+                                  required, 0))
                 return 0;
 
         if (!o) {
@@ -2336,17 +2557,17 @@ int cfl_PyBool_get (PyObject *object, const char *name, int *valp) {
         return 1;
 }
 
-
 /**
  * @brief Get attribute \p attr_name from \p object and make sure it is
- *        a string type.
+ *        a string type or None if \p allow_None is 1
  *
  * @returns 1 if \p valp was updated with a newly allocated copy of either the
- *          object value (UTF8), or \p defval.
+ *          object value (UTF8), or \p defval or NULL if the attr is None
  *          0 if an exception was raised.
  */
 int cfl_PyObject_GetString (PyObject *object, const char *attr_name,
-                            char **valp, const char *defval, int required) {
+                            char **valp, const char *defval, int required,
+                            int allow_None) {
         PyObject *o, *uo, *uop;
 
         if (!cfl_PyObject_GetAttr(object, attr_name, &o,
@@ -2358,7 +2579,7 @@ int cfl_PyObject_GetString (PyObject *object, const char *attr_name,
                                    *           proper conversion below. */
                                   NULL,
 #endif
-                                  required))
+                                  required, allow_None))
                 return 0;
 
         if (!o) {
@@ -2366,6 +2587,12 @@ int cfl_PyObject_GetString (PyObject *object, const char *attr_name,
                 return 1;
         }
 
+        if (o == Py_None) {
+                Py_DECREF(o);
+                *valp = NULL;
+                return 1;
+        }
+
         if (!(uo = cfl_PyObject_Unistr(o))) {
                 Py_DECREF(o);
                 PyErr_Format(PyExc_TypeError,
@@ -2411,6 +2638,55 @@ PyObject *cfl_int32_array_to_py_list (const int32_t *arr, size_t cnt) {
 }
 
 
+/****************************************************************************
+ *
+ *
+ * Methods common across all types of clients.
+ *
+ *
+ *
+ *
+ ****************************************************************************/
+
+const char set_sasl_credentials_doc[] = PyDoc_STR(
+        ".. py:function:: set_sasl_credentials(username, password)\n"
+        "\n"
+        "  Sets the SASL credentials used for this client.\n"
+        "  These credentials will overwrite the old ones, and will be used the next time the client needs to authenticate.\n"
+        "  This method will not disconnect existing broker connections that have been established with the old credentials.\n"
+        "  This method is applicable only to SASL PLAIN and SCRAM mechanisms.\n");
+
+
+PyObject *set_sasl_credentials(Handle *self, PyObject *args, PyObject *kwargs) {
+        const char *username = NULL;
+        const char *password = NULL;
+        rd_kafka_error_t* error;
+        CallState cs;
+        static char *kws[] = {"username", "password", NULL};
+
+        if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ss", kws,
+                                         &username, &password)) {
+                return NULL;
+        }
+
+        CallState_begin(self, &cs);
+        error = rd_kafka_sasl_set_credentials(self->rk, username, password);
+
+        if (!CallState_end(self, &cs)) {
+                if (error) /* Ignore error in favour of callstate exception */
+                        rd_kafka_error_destroy(error);
+                return NULL;
+        }
+
+        if (error) {
+                cfl_PyErr_from_error_destroy(error);
+                return NULL;
+        }
+
+        Py_RETURN_NONE;
+}
+
+
 /****************************************************************************
  *
  *
@@ -2523,7 +2799,7 @@ static char *KafkaError_add_errs (PyObject *dict, const char *origdoc) {
 
 	_PRINT("\n");
 
-	return doc; // FIXME: leak
+	return doc; /* FIXME: leak */
 }
 
 
@@ -2541,7 +2817,11 @@ static struct PyModuleDef cimpl_moduledef = {
 static PyObject *_init_cimpl (void) {
 	PyObject *m;
 
+/* PyEval_InitThreads became deprecated in Python 3.9 and will be removed in Python 3.11.
+ * Prior to Python 3.7, this call was required to initialize the GIL. */
+#if PY_VERSION_HEX < 0x03090000
         PyEval_InitThreads();
+#endif
 
 	if (PyType_Ready(&KafkaErrorType) < 0)
 		return NULL;
diff --git a/src/confluent_kafka/src/confluent_kafka.h b/src/confluent_kafka/src/confluent_kafka.h
index a070d23..b432f55 100644
--- a/src/confluent_kafka/src/confluent_kafka.h
+++ b/src/confluent_kafka/src/confluent_kafka.h
@@ -42,8 +42,8 @@
  *  0xMMmmRRPP
  *  MM=major, mm=minor, RR=revision, PP=patchlevel (not used)
  */
-#define CFL_VERSION     0x01070000
-#define CFL_VERSION_STR "1.7.0"
+#define CFL_VERSION     0x02010100
+#define CFL_VERSION_STR "2.1.1rc1"
 
 /**
  * Minimum required librdkafka version. This is checked both during
@@ -51,19 +51,19 @@
  * Make sure to keep the MIN_RD_KAFKA_VERSION, MIN_VER_ERRSTR and #error
  * defines and strings in sync.
  */
-#define MIN_RD_KAFKA_VERSION 0x01060000
+#define MIN_RD_KAFKA_VERSION 0x020100ff
 
 #ifdef __APPLE__
-#define MIN_VER_ERRSTR "confluent-kafka-python requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
+#define MIN_VER_ERRSTR "confluent-kafka-python requires librdkafka v2.1.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
 #else
-#define MIN_VER_ERRSTR "confluent-kafka-python requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
+#define MIN_VER_ERRSTR "confluent-kafka-python requires librdkafka v2.1.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
 #endif
 
 #if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION
 #ifdef __APPLE__
-#error "confluent-kafka-python requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
+#error "confluent-kafka-python requires librdkafka v2.1.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
 #else
-#error "confluent-kafka-python requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
+#error "confluent-kafka-python requires librdkafka v2.1.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
 #endif
 #endif
 
@@ -319,20 +319,25 @@ void CallState_crash (CallState *cs);
 #define cfl_PyInt_FromInt(v) PyInt_FromLong(v)
 #endif
 
+#define cfl_PyLong_Check(o) PyLong_Check(o)
+#define cfl_PyLong_AsLong(o) (int)PyLong_AsLong(o)
+#define cfl_PyLong_FromLong(v) PyLong_FromLong(v)
 
 PyObject *cfl_PyObject_lookup (const char *modulename, const char *typename);
 
 void cfl_PyDict_SetString (PyObject *dict, const char *name, const char *val);
 void cfl_PyDict_SetInt (PyObject *dict, const char *name, int val);
+void cfl_PyDict_SetLong (PyObject *dict, const char *name, long val);
 int cfl_PyObject_SetString (PyObject *o, const char *name, const char *val);
 int cfl_PyObject_SetInt (PyObject *o, const char *name, int val);
 int cfl_PyObject_GetAttr (PyObject *object, const char *attr_name,
                           PyObject **valp, const PyTypeObject *py_type,
-                          int required);
+                          int required, int allow_None);
 int cfl_PyObject_GetInt (PyObject *object, const char *attr_name, int *valp,
                          int defval, int required);
 int cfl_PyObject_GetString (PyObject *object, const char *attr_name,
-                            char **valp, const char *defval, int required);
+                            char **valp, const char *defval, int required,
+                            int allow_None);
 int cfl_PyBool_get (PyObject *object, const char *name, int *valp);
 
 PyObject *cfl_int32_array_to_py_list (const int32_t *arr, size_t cnt);
@@ -351,6 +356,8 @@ typedef struct {
 	char *topic;
 	int   partition;
 	int64_t offset;
+	int32_t leader_epoch;
+	char *metadata;
 	PyObject *error;
 } TopicPartition;
 
@@ -376,13 +383,16 @@ rd_kafka_conf_t *common_conf_setup (rd_kafka_type_t ktype,
 				    PyObject *args,
 				    PyObject *kwargs);
 PyObject *c_parts_to_py (const rd_kafka_topic_partition_list_t *c_parts);
+PyObject *c_Node_to_py(const rd_kafka_Node_t *c_node);
 rd_kafka_topic_partition_list_t *py_to_c_parts (PyObject *plist);
 PyObject *list_topics (Handle *self, PyObject *args, PyObject *kwargs);
 PyObject *list_groups (Handle *self, PyObject *args, PyObject *kwargs);
+PyObject *set_sasl_credentials(Handle *self, PyObject *args, PyObject *kwargs);
 
 
 extern const char list_topics_doc[];
 extern const char list_groups_doc[];
+extern const char set_sasl_credentials_doc[];
 
 
 #ifdef RD_KAFKA_V_HEADERS
@@ -419,6 +429,7 @@ typedef struct {
 	PyObject *error;
 	int32_t partition;
 	int64_t offset;
+	int32_t leader_epoch;
 	int64_t timestamp;
 	rd_kafka_timestamp_type_t tstype;
         int64_t latency;  /**< Producer: time it took to produce message */
diff --git a/tests/README.md b/tests/README.md
index 4c40c8b..e3b2fe3 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -18,7 +18,7 @@ A python3 env suitable for running tests:
 
     $ python3 -m venv venv_test
     $ source venv_test/bin/activate
-    $ pip install -r test/requirements.txt
+    $ pip install -r tests/requirements.txt
     $ python setup.py build
     $ python setup.py install
 
@@ -60,7 +60,7 @@ The easiest way to arrange for this is:
 
 And also:
 
-    source ./tests/docker/.env
+    source ./tests/docker/.env.sh
 
 which sets environment variables referenced by `./tests/integration/testconf.json`.
 
diff --git a/tests/avro/adv_schema.avsc b/tests/avro/adv_schema.avsc
index a5f975f..29721aa 100644
--- a/tests/avro/adv_schema.avsc
+++ b/tests/avro/adv_schema.avsc
@@ -56,6 +56,17 @@
                 "type" : "map",
                 "values" : "basicPerson"
             }
+        },
+        {
+            "name": "timestamp",
+            "type": [
+                "null",
+                {
+                    "type": "long",
+                    "logicalType": "timestamp-millis"
+                }
+            ],
+            "default": null
         }
     ]
 }
diff --git a/tests/avro/data_gen.py b/tests/avro/data_gen.py
index 0ff4290..68e6751 100644
--- a/tests/avro/data_gen.py
+++ b/tests/avro/data_gen.py
@@ -22,6 +22,7 @@
 import os
 import os.path
 import random
+from datetime import datetime, timezone
 
 from avro import schema
 from avro.datafile import DataFileWriter
@@ -65,6 +66,7 @@ def create_adv_item(i):
     basic = create_basic_item(i)
     basic['family'] = dict(map(lambda bi: (bi['name'], bi), family))
     basic['friends'] = dict(map(lambda bi: (bi['name'], bi), friends))
+    basic['timestamp'] = datetime(1970, 1, 1, 0, 0, tzinfo=timezone.utc)
     return basic
 
 
diff --git a/tests/avro/test_cached_client.py b/tests/avro/test_cached_client.py
index 7d0182b..16b0f5d 100644
--- a/tests/avro/test_cached_client.py
+++ b/tests/avro/test_cached_client.py
@@ -32,7 +32,9 @@ class TestCacheSchemaRegistryClient(unittest.TestCase):
     def setUp(self):
         self.server = mock_registry.ServerThread(0)
         self.server.start()
-        self.client = CachedSchemaRegistryClient('http://127.0.0.1:' + str(self.server.server.server_port))
+        self.client = CachedSchemaRegistryClient({
+            'url': 'http://127.0.0.1:' + str(self.server.server.server_port),
+        })
 
     def tearDown(self):
         self.server.shutdown()
@@ -140,19 +142,25 @@ class TestCacheSchemaRegistryClient(unittest.TestCase):
 
     def test_cert_no_key(self):
         with self.assertRaises(ValueError):
-            self.client = CachedSchemaRegistryClient(url='https://127.0.0.1:65534',
-                                                     cert_location='/path/to/cert')
+            self.client = CachedSchemaRegistryClient({
+                'url': 'https://127.0.0.1:65534',
+                'ssl.certificate.location': '/path/to/cert',
+            })
 
     def test_cert_with_key(self):
-        self.client = CachedSchemaRegistryClient(url='https://127.0.0.1:65534',
-                                                 cert_location='/path/to/cert',
-                                                 key_location='/path/to/key')
+        self.client = CachedSchemaRegistryClient({
+            'url': 'https://127.0.0.1:65534',
+            'ssl.certificate.location': '/path/to/cert',
+            'ssl.key.location': '/path/to/key'
+        })
         self.assertTupleEqual(('/path/to/cert', '/path/to/key'), self.client._session.cert)
 
-    def test_cert_path(self):
-        self.client = CachedSchemaRegistryClient(url='https://127.0.0.1:65534',
-                                                 ca_location='/path/to/ca')
-        self.assertEqual('/path/to/ca', self.client._session.verify)
+    def test_key_no_cert(self):
+        with self.assertRaises(ValueError):
+            self.client = CachedSchemaRegistryClient({
+                'url': 'https://127.0.0.1:65534',
+                'ssl.key.location': '/path/to/key'
+            })
 
     def test_context(self):
         with self.client as c:
@@ -177,14 +185,15 @@ class TestCacheSchemaRegistryClient(unittest.TestCase):
 
     def test_invalid_type_url(self):
         with self.assertRaises(TypeError):
-            self.client = CachedSchemaRegistryClient(
-                url=1)
+            self.client = CachedSchemaRegistryClient({
+                'url': 1
+            })
 
     def test_invalid_type_url_dict(self):
         with self.assertRaises(TypeError):
             self.client = CachedSchemaRegistryClient({
                 "url": 1
-                })
+            })
 
     def test_invalid_url(self):
         with self.assertRaises(ValueError):
diff --git a/tests/docker/.env b/tests/docker/.env.sh
similarity index 83%
rename from tests/docker/.env
rename to tests/docker/.env.sh
index 14bca41..0847124 100644
--- a/tests/docker/.env
+++ b/tests/docker/.env.sh
@@ -13,3 +13,5 @@ export MY_SCHEMA_REGISTRY_SSL_URL_ENV=https://$(hostname -f):8082
 export MY_SCHEMA_REGISTRY_SSL_CA_LOCATION_ENV=$TLS/ca-cert
 export MY_SCHEMA_REGISTRY_SSL_CERTIFICATE_LOCATION_ENV=$TLS/client.pem
 export MY_SCHEMA_REGISTRY_SSL_KEY_LOCATION_ENV=$TLS/client.key
+export MY_SCHEMA_REGISTRY_SSL_KEY_WITH_PASSWORD_LOCATION_ENV=$TLS/client_with_password.key
+export MY_SCHEMA_REGISTRY_SSL_KEY_PASSWORD="abcdefgh"
\ No newline at end of file
diff --git a/tests/docker/bin/certify.sh b/tests/docker/bin/certify.sh
index e39e7c7..d753bb9 100755
--- a/tests/docker/bin/certify.sh
+++ b/tests/docker/bin/certify.sh
@@ -5,7 +5,7 @@ set -eu
 PY_DOCKER_BIN="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
 export PASS="abcdefgh"
 
-source ${PY_DOCKER_BIN}/../.env
+source ${PY_DOCKER_BIN}/../.env.sh
 
 mkdir -p ${TLS}
 
@@ -24,5 +24,6 @@ echo "Creating client cert..."
 ${PY_DOCKER_BIN}/gen-ssl-certs.sh client ${TLS}/ca-cert ${TLS}/ ${HOST} ${HOST}
 
 echo "Creating key ..."
+cp ${TLS}/client.key ${TLS}/client_with_password.key
 openssl rsa -in ${TLS}/client.key -out ${TLS}/client.key  -passin pass:${PASS}
 
diff --git a/tests/docker/bin/cluster_down.sh b/tests/docker/bin/cluster_down.sh
index 3d4bdc7..d985abf 100755
--- a/tests/docker/bin/cluster_down.sh
+++ b/tests/docker/bin/cluster_down.sh
@@ -3,7 +3,7 @@
 set -eu
 
 PY_DOCKER_BIN="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
-source ${PY_DOCKER_BIN}/../.env
+source ${PY_DOCKER_BIN}/../.env.sh
 
 echo "Destroying cluster.."
 docker-compose -f $PY_DOCKER_COMPOSE_FILE down -v --remove-orphans
diff --git a/tests/docker/bin/cluster_up.sh b/tests/docker/bin/cluster_up.sh
index 9da748c..2f19470 100755
--- a/tests/docker/bin/cluster_up.sh
+++ b/tests/docker/bin/cluster_up.sh
@@ -3,7 +3,7 @@
 set -eu
 
 PY_DOCKER_BIN="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
-source ${PY_DOCKER_BIN}/../.env
+source ${PY_DOCKER_BIN}/../.env.sh
 
 # Wait for http service listener to come up and start serving
 # $1 http service name
@@ -28,7 +28,7 @@ await_http() {
 }
 
 echo "Configuring Environment..."
-source ${PY_DOCKER_SOURCE}/.env
+source ${PY_DOCKER_SOURCE}/.env.sh
 
 echo "Generating SSL certs..."
 ${PY_DOCKER_BIN}/certify.sh
diff --git a/tests/docker/docker-compose.yaml b/tests/docker/docker-compose.yaml
index fc3b7b0..663eba3 100644
--- a/tests/docker/docker-compose.yaml
+++ b/tests/docker/docker-compose.yaml
@@ -18,8 +18,10 @@ services:
       KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+      KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
+      KAFKA_SUPER_USERS: "User:ANONYMOUS"
   schema-registry:
-    image: confluentinc/cp-schema-registry:5.0.0
+    image: confluentinc/cp-schema-registry
     depends_on:
     - zookeeper
     - kafka
@@ -40,7 +42,7 @@ services:
       SCHEMA_REGISTRY_SSL_TRUSTSTORE_PASSWORD: abcdefgh
       SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:2181
   schema-registry-basic-auth:
-    image: confluentinc/cp-schema-registry:5.0.0
+    image: confluentinc/cp-schema-registry
     depends_on:
       - zookeeper
       - kafka
@@ -50,9 +52,9 @@ services:
     volumes:
       - ./conf:/conf
     environment:
-      SCHEMA_REGISTRY_HOST_NAME: schema-registry2
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry-basic-auth
       SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas2
-      SCHEMA_REGISTRY_SCHEMA_REGISTRY_ZK_NAMESPACE: schema_registry2
+      SCHEMA_REGISTRY_SCHEMA_REGISTRY_GROUP_ID: schema-registry-basic-auth
       SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka:9092
       SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8083, https://0.0.0.0:8084
       SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: https
diff --git a/tests/integration/admin/test_basic_operations.py b/tests/integration/admin/test_basic_operations.py
new file mode 100644
index 0000000..7c5145c
--- /dev/null
+++ b/tests/integration/admin/test_basic_operations.py
@@ -0,0 +1,381 @@
+# -*- coding: utf-8 -*-
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import confluent_kafka
+import struct
+import time
+from confluent_kafka import ConsumerGroupTopicPartitions, TopicPartition, ConsumerGroupState
+from confluent_kafka.admin import (NewPartitions, ConfigResource,
+                                   AclBinding, AclBindingFilter, ResourceType,
+                                   ResourcePatternType, AclOperation, AclPermissionType)
+from confluent_kafka.error import ConsumeError
+
+topic_prefix = "test-topic"
+
+
+# Shared between producer and consumer tests and used to verify
+# that consumed headers are what was actually produced.
+produce_headers = [('foo1', 'bar'),
+                   ('foo1', 'bar2'),
+                   ('foo2', b'1'),
+                   (u'Jämtland', u'Härjedalen'),  # automatically utf-8 encoded
+                   ('nullheader', None),
+                   ('empty', ''),
+                   ('foobin', struct.pack('hhl', 10, 20, 30))]
+
+
+def verify_commit_result(err, partitions):
+    assert err is not None
+
+
+def verify_admin_acls(admin_client,
+                      topic,
+                      group):
+
+    #
+    # Add three ACLs
+    #
+    acl_binding_1 = AclBinding(ResourceType.TOPIC, topic, ResourcePatternType.LITERAL,
+                               "User:test-user-1", "*", AclOperation.READ, AclPermissionType.ALLOW)
+    acl_binding_2 = AclBinding(ResourceType.TOPIC, topic, ResourcePatternType.PREFIXED,
+                               "User:test-user-2", "*", AclOperation.WRITE, AclPermissionType.DENY)
+    acl_binding_3 = AclBinding(ResourceType.GROUP, group, ResourcePatternType.PREFIXED,
+                               "User:test-user-2", "*", AclOperation.ALL, AclPermissionType.ALLOW)
+
+    fs = admin_client.create_acls([acl_binding_1, acl_binding_2, acl_binding_3])
+    for acl_binding, f in fs.items():
+        f.result()  # trigger exception if there was an error
+
+    acl_binding_filter1 = AclBindingFilter(ResourceType.ANY, None, ResourcePatternType.ANY,
+                                           None, None, AclOperation.ANY, AclPermissionType.ANY)
+    acl_binding_filter2 = AclBindingFilter(ResourceType.ANY, None, ResourcePatternType.PREFIXED,
+                                           None, None, AclOperation.ANY, AclPermissionType.ANY)
+    acl_binding_filter3 = AclBindingFilter(ResourceType.TOPIC, None, ResourcePatternType.ANY,
+                                           None, None, AclOperation.ANY, AclPermissionType.ANY)
+    acl_binding_filter4 = AclBindingFilter(ResourceType.GROUP, None, ResourcePatternType.ANY,
+                                           None, None, AclOperation.ANY, AclPermissionType.ANY)
+
+    expected_acl_bindings = [acl_binding_1, acl_binding_2, acl_binding_3]
+    acl_bindings = admin_client.describe_acls(acl_binding_filter1).result()
+    assert sorted(acl_bindings) == sorted(expected_acl_bindings), \
+        "ACL bindings don't match, actual: {} expected: {}".format(acl_bindings,
+                                                                   expected_acl_bindings)
+
+    #
+    # Delete the ACLs with PREFIXED
+    #
+    expected_acl_bindings = [acl_binding_2, acl_binding_3]
+    fs = admin_client.delete_acls([acl_binding_filter2])
+    deleted_acl_bindings = sorted(fs[acl_binding_filter2].result())
+    assert deleted_acl_bindings == expected_acl_bindings, \
+        "Deleted ACL bindings don't match, actual {} expected {}".format(deleted_acl_bindings,
+                                                                         expected_acl_bindings)
+
+    #
+    # Delete the ACLs with TOPIC and GROUP
+    #
+    expected_acl_bindings = [[acl_binding_1], []]
+    delete_acl_binding_filters = [acl_binding_filter3, acl_binding_filter4]
+    fs = admin_client.delete_acls(delete_acl_binding_filters)
+    for acl_binding, expected in zip(delete_acl_binding_filters, expected_acl_bindings):
+        deleted_acl_bindings = sorted(fs[acl_binding].result())
+        assert deleted_acl_bindings == expected, \
+            "Deleted ACL bindings don't match, actual {} expected {}".format(deleted_acl_bindings,
+                                                                             expected)
+    #
+    # All the ACLs should have been deleted
+    #
+    expected_acl_bindings = []
+    acl_bindings = admin_client.describe_acls(acl_binding_filter1).result()
+    assert acl_bindings == expected_acl_bindings, \
+        "ACL bindings don't match, actual: {} expected: {}".format(acl_bindings,
+                                                                   expected_acl_bindings)
+
+
+def verify_topic_metadata(client, exp_topics, *args, **kwargs):
+    """
+    Verify that exp_topics (dict<topicname,partcnt>) is reported in metadata.
+    Will retry and wait for some time to let changes propagate.
+
+    Non-controller brokers may return the previous partition count for some
+    time before being updated, in this case simply retry.
+    """
+
+    for retry in range(0, 3):
+        do_retry = 0
+
+        md = client.list_topics(*args, **kwargs)
+
+        for exptopic, exppartcnt in exp_topics.items():
+            if exptopic not in md.topics:
+                print("Topic {} not yet reported in metadata: retrying".format(exptopic))
+                do_retry += 1
+                continue
+
+            if len(md.topics[exptopic].partitions) < exppartcnt:
+                print("Topic {} partition count not yet updated ({} != expected {}): retrying".format(
+                    exptopic, len(md.topics[exptopic].partitions), exppartcnt))
+                do_retry += 1
+                continue
+
+            assert len(md.topics[exptopic].partitions) == exppartcnt, \
+                "Expected {} partitions for topic {}, not {}".format(
+                    exppartcnt, exptopic, md.topics[exptopic].partitions)
+
+        if do_retry == 0:
+            return  # All topics okay.
+
+        time.sleep(1)
+
+
+def verify_consumer_group_offsets_operations(client, our_topic, group_id):
+
+    # List Consumer Group Offsets check with just group name
+    request = ConsumerGroupTopicPartitions(group_id)
+    fs = client.list_consumer_group_offsets([request])
+    f = fs[group_id]
+    res = f.result()
+    assert isinstance(res, ConsumerGroupTopicPartitions)
+    assert res.group_id == group_id
+    assert len(res.topic_partitions) == 2
+    is_any_message_consumed = False
+    for topic_partition in res.topic_partitions:
+        assert topic_partition.topic == our_topic
+        if topic_partition.offset > 0:
+            is_any_message_consumed = True
+    assert is_any_message_consumed
+
+    # Alter Consumer Group Offsets check
+    alter_group_topic_partitions = list(map(lambda topic_partition: TopicPartition(topic_partition.topic,
+                                                                                   topic_partition.partition,
+                                                                                   0),
+                                            res.topic_partitions))
+    alter_group_topic_partition_request = ConsumerGroupTopicPartitions(group_id,
+                                                                       alter_group_topic_partitions)
+    afs = client.alter_consumer_group_offsets([alter_group_topic_partition_request])
+    af = afs[group_id]
+    ares = af.result()
+    assert isinstance(ares, ConsumerGroupTopicPartitions)
+    assert ares.group_id == group_id
+    assert len(ares.topic_partitions) == 2
+    for topic_partition in ares.topic_partitions:
+        assert topic_partition.topic == our_topic
+        assert topic_partition.offset == 0
+
+    # List Consumer Group Offsets check with group name and partitions
+    list_group_topic_partitions = list(map(lambda topic_partition: TopicPartition(topic_partition.topic,
+                                                                                  topic_partition.partition),
+                                           ares.topic_partitions))
+    list_group_topic_partition_request = ConsumerGroupTopicPartitions(group_id,
+                                                                      list_group_topic_partitions)
+    lfs = client.list_consumer_group_offsets([list_group_topic_partition_request])
+    lf = lfs[group_id]
+    lres = lf.result()
+
+    assert isinstance(lres, ConsumerGroupTopicPartitions)
+    assert lres.group_id == group_id
+    assert len(lres.topic_partitions) == 2
+    for topic_partition in lres.topic_partitions:
+        assert topic_partition.topic == our_topic
+        assert topic_partition.offset == 0
+
+
+def test_basic_operations(kafka_cluster):
+    num_partitions = 2
+    topic_config = {"compression.type": "gzip"}
+
+    #
+    # First iteration: validate our_topic creation.
+    # Second iteration: create topic.
+    #
+    for validate in (True, False):
+        our_topic = kafka_cluster.create_topic(topic_prefix,
+                                               {
+                                                   "num_partitions": num_partitions,
+                                                   "config": topic_config,
+                                                   "replication_factor": 1,
+                                               },
+                                               validate_only=validate
+                                               )
+
+    admin_client = kafka_cluster.admin()
+
+    #
+    # Find the topic in list_topics
+    #
+    verify_topic_metadata(admin_client, {our_topic: num_partitions})
+    verify_topic_metadata(admin_client, {our_topic: num_partitions}, topic=our_topic)
+    verify_topic_metadata(admin_client, {our_topic: num_partitions}, our_topic)
+
+    #
+    # Increase the partition count
+    #
+    num_partitions += 3
+    fs = admin_client.create_partitions([NewPartitions(our_topic,
+                                                       new_total_count=num_partitions)],
+                                        operation_timeout=10.0)
+
+    for topic2, f in fs.items():
+        f.result()  # trigger exception if there was an error
+
+    #
+    # Verify with list_topics.
+    #
+    verify_topic_metadata(admin_client, {our_topic: num_partitions})
+
+    #
+    # Verify with list_groups.
+    #
+
+    # Produce some messages
+    p = kafka_cluster.producer()
+    p.produce(our_topic, 'Hello Python!', headers=produce_headers)
+    p.produce(our_topic, key='Just a key and headers', headers=produce_headers)
+    p.flush()
+
+    def consume_messages(group_id, num_messages=None):
+        # Consume messages
+        conf = {'group.id': group_id,
+                'session.timeout.ms': 6000,
+                'enable.auto.commit': False,
+                'on_commit': verify_commit_result,
+                'auto.offset.reset': 'earliest',
+                'enable.partition.eof': True}
+        c = kafka_cluster.consumer(conf)
+        c.subscribe([our_topic])
+        eof_reached = dict()
+        read_messages = 0
+        msg = None
+        while True:
+            try:
+                msg = c.poll()
+                if msg is None:
+                    raise Exception('Got timeout from poll() without a timeout set: %s' % msg)
+                # Commit offset
+                c.commit(msg, asynchronous=False)
+                read_messages += 1
+                if num_messages is not None and read_messages == num_messages:
+                    print('Read all the required messages: exiting')
+                    break
+            except ConsumeError as e:
+                if msg is not None and e.code == confluent_kafka.KafkaError._PARTITION_EOF:
+                    print('Reached end of %s [%d] at offset %d' % (
+                          msg.topic(), msg.partition(), msg.offset()))
+                    eof_reached[(msg.topic(), msg.partition())] = True
+                    if len(eof_reached) == len(c.assignment()):
+                        print('EOF reached for all assigned partitions: exiting')
+                        break
+                else:
+                    print('Consumer error: %s: ignoring' % str(e))
+                    break
+        c.close()
+
+    group1 = 'test-group-1'
+    group2 = 'test-group-2'
+    acls_topic = our_topic + "-acls"
+    acls_group = "test-group-acls"
+    consume_messages(group1, 2)
+    consume_messages(group2, 2)
+
+    # list_groups without group argument
+    groups = set(group.id for group in admin_client.list_groups(timeout=10))
+    assert group1 in groups, "Consumer group {} not found".format(group1)
+    assert group2 in groups, "Consumer group {} not found".format(group2)
+    # list_groups with group argument
+    groups = set(group.id for group in admin_client.list_groups(group1))
+    assert group1 in groups, "Consumer group {} not found".format(group1)
+    groups = set(group.id for group in admin_client.list_groups(group2))
+    assert group2 in groups, "Consumer group {} not found".format(group2)
+
+    # List Consumer Groups new API test
+    future = admin_client.list_consumer_groups(request_timeout=10)
+    result = future.result()
+    group_ids = [group.group_id for group in result.valid]
+    assert group1 in group_ids, "Consumer group {} not found".format(group1)
+    assert group2 in group_ids, "Consumer group {} not found".format(group2)
+
+    future = admin_client.list_consumer_groups(request_timeout=10, states={ConsumerGroupState.STABLE})
+    result = future.result()
+    assert isinstance(result.valid, list)
+    assert not result.valid
+
+    # Describe Consumer Groups API test
+    futureMap = admin_client.describe_consumer_groups([group1, group2], request_timeout=10)
+    for group_id, future in futureMap.items():
+        g = future.result()
+        assert group_id == g.group_id
+        assert g.is_simple_consumer_group is False
+        assert g.state == ConsumerGroupState.EMPTY
+
+    def verify_config(expconfig, configs):
+        """
+        Verify that the config key,values in expconfig are found
+        and matches the ConfigEntry in configs.
+        """
+        for key, expvalue in expconfig.items():
+            entry = configs.get(key, None)
+            assert entry is not None, "Config {} not found in returned configs".format(key)
+
+            assert entry.value == str(expvalue), \
+                "Config {} with value {} does not match expected value {}".format(key, entry, expvalue)
+
+    #
+    # Get current topic config
+    #
+    resource = ConfigResource(ResourceType.TOPIC, our_topic)
+    fs = admin_client.describe_configs([resource])
+    configs = fs[resource].result()  # will raise exception on failure
+
+    # Verify config matches our expectations
+    verify_config(topic_config, configs)
+
+    #
+    # Now change the config.
+    #
+    topic_config["file.delete.delay.ms"] = 12345
+    topic_config["compression.type"] = "snappy"
+
+    for key, value in topic_config.items():
+        resource.set_config(key, value)
+
+    fs = admin_client.alter_configs([resource])
+    fs[resource].result()  # will raise exception on failure
+
+    #
+    # Read the config back again and verify.
+    #
+    fs = admin_client.describe_configs([resource])
+    configs = fs[resource].result()  # will raise exception on failure
+
+    # Verify config matches our expectations
+    verify_config(topic_config, configs)
+
+    # Verify Consumer Offset Operations
+    verify_consumer_group_offsets_operations(admin_client, our_topic, group1)
+
+    # Delete groups
+    fs = admin_client.delete_consumer_groups([group1, group2], request_timeout=10)
+    fs[group1].result()  # will raise exception on failure
+    fs[group2].result()  # will raise exception on failure
+
+    #
+    # Delete the topic
+    #
+    fs = admin_client.delete_topics([our_topic])
+    fs[our_topic].result()  # will raise exception on failure
+    print("Topic {} marked for deletion".format(our_topic))
+
+    # Verify ACL operations
+    verify_admin_acls(admin_client, acls_topic, acls_group)
diff --git a/tests/integration/cluster_fixture.py b/tests/integration/cluster_fixture.py
index f99bb8b..832d134 100644
--- a/tests/integration/cluster_fixture.py
+++ b/tests/integration/cluster_fixture.py
@@ -140,7 +140,12 @@ class KafkaClusterFixture(object):
 
         return DeserializingConsumer(consumer_conf)
 
-    def create_topic(self, prefix, conf=None):
+    def admin(self):
+        if self._admin is None:
+            self._admin = AdminClient(self.client_conf())
+        return self._admin
+
+    def create_topic(self, prefix, conf=None, **create_topic_kwargs):
         """
         Creates a new topic with this cluster.
 
@@ -149,12 +154,10 @@ class KafkaClusterFixture(object):
         :returns: The topic's name
         :rtype: str
         """
-        if self._admin is None:
-            self._admin = AdminClient(self.client_conf())
-
         name = prefix + "-" + str(uuid1())
-        future_topic = self._admin.create_topics([NewTopic(name,
-                                                           **self._topic_conf(conf))])
+        future_topic = self.admin().create_topics([NewTopic(name,
+                                                            **self._topic_conf(conf))],
+                                                  **create_topic_kwargs)
 
         future_topic.get(name).result()
         return name
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index d57da47..fdb51ba 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -26,12 +26,14 @@ from tests.integration.cluster_fixture import ByoFixture
 work_dir = os.path.dirname(os.path.realpath(__file__))
 
 
-def create_trivup_cluster():
-    return TrivupFixture({'with_sr': True,
-                          'debug': True,
-                          'cp_version': 'latest',
-                          'broker_conf': ['transaction.state.log.replication.factor=1',
-                                          'transaction.state.log.min.isr=1']})
+def create_trivup_cluster(conf={}):
+    trivup_fixture_conf = {'with_sr': True,
+                           'debug': True,
+                           'cp_version': 'latest',
+                           'broker_conf': ['transaction.state.log.replication.factor=1',
+                                           'transaction.state.log.min.isr=1']}
+    trivup_fixture_conf.update(conf)
+    return TrivupFixture(trivup_fixture_conf)
 
 
 def create_byo_cluster(conf):
@@ -41,8 +43,11 @@ def create_byo_cluster(conf):
     return ByoFixture(conf)
 
 
-@pytest.fixture(scope="package")
-def kafka_cluster():
+def kafka_cluster_fixture(
+    brokers_env="BROKERS",
+    sr_url_env="SR_URL",
+    trivup_cluster_conf={}
+):
     """
     If BROKERS environment variable is set to a CSV list of bootstrap servers
     an existing cluster is used.
@@ -52,22 +57,28 @@ def kafka_cluster():
     If BROKERS is not set a TrivUp cluster is created and used.
     """
 
-    bootstraps = os.environ.get("BROKERS", "")
+    bootstraps = os.environ.get(brokers_env, "")
     if bootstraps != "":
         conf = {"bootstrap.servers": bootstraps}
-        sr_url = os.environ.get("SR_URL", "")
+        sr_url = os.environ.get(sr_url_env, "")
         if sr_url != "":
             conf["schema.registry.url"] = sr_url
         print("Using ByoFixture with config from env variables: ", conf)
         cluster = create_byo_cluster(conf)
     else:
-        cluster = create_trivup_cluster()
+        cluster = create_trivup_cluster(trivup_cluster_conf)
     try:
         yield cluster
     finally:
         cluster.stop()
 
 
+@pytest.fixture(scope="package")
+def kafka_cluster():
+    for fixture in kafka_cluster_fixture():
+        yield fixture
+
+
 @pytest.fixture()
 def load_file():
     def get_handle(name):
diff --git a/tests/integration/consumer/test_consumer_memberid.py b/tests/integration/consumer/test_consumer_memberid.py
new file mode 100644
index 0000000..cf00270
--- /dev/null
+++ b/tests/integration/consumer/test_consumer_memberid.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limit
+
+import pytest
+
+
+def test_consumer_memberid(kafka_cluster):
+    """
+    Test consumer memberid.
+    """
+
+    consumer_conf = {'group.id': 'test'}
+
+    topic = "testmemberid"
+
+    kafka_cluster.create_topic(topic)
+
+    consumer = kafka_cluster.consumer(consumer_conf)
+
+    assert consumer is not None
+    assert consumer.memberid() is None
+    kafka_cluster.seed_topic(topic, value_source=[b'memberid'])
+
+    consumer.subscribe([topic])
+    msg = consumer.poll(10)
+    assert msg is not None
+    assert msg.value() == b'memberid'
+    memberid = consumer.memberid()
+    print("Member Id is -----> " + memberid)
+    assert isinstance(memberid, str)
+    assert len(memberid) > 0
+    consumer.close()
+
+    with pytest.raises(RuntimeError) as error_info:
+        consumer.memberid()
+    assert error_info.value.args[0] == "Consumer closed"
diff --git a/tests/integration/consumer/test_consumer_topicpartition_metadata.py b/tests/integration/consumer/test_consumer_topicpartition_metadata.py
new file mode 100644
index 0000000..4c01c1d
--- /dev/null
+++ b/tests/integration/consumer/test_consumer_topicpartition_metadata.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2022 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limit
+
+from confluent_kafka import TopicPartition
+
+
+def commit_and_check(consumer, topic, metadata):
+    if metadata is None:
+        consumer.commit(offsets=[TopicPartition(topic, 0, 1)], asynchronous=False)
+    else:
+        consumer.commit(offsets=[TopicPartition(topic, 0, 1, metadata)], asynchronous=False)
+
+    offsets = consumer.committed([TopicPartition(topic, 0)], timeout=100)
+    assert len(offsets) == 1
+    assert offsets[0].metadata == metadata
+
+
+def test_consumer_topicpartition_metadata(kafka_cluster):
+    topic = kafka_cluster.create_topic("test_topicpartition")
+    consumer_conf = {'group.id': 'pytest'}
+
+    c = kafka_cluster.consumer(consumer_conf)
+
+    # Commit without any metadata.
+    metadata = None
+    commit_and_check(c, topic, metadata)
+
+    # Commit with only ASCII metadata.
+    metadata = 'hello world'
+    commit_and_check(c, topic, metadata)
+
+    # Commit with Unicode characters in metadata.
+    metadata = 'नमस्ते दुनिया'
+    commit_and_check(c, topic, metadata)
+
+    # Commit with empty string as metadata.
+    metadata = ''
+    commit_and_check(c, topic, metadata)
+
+    # Commit with invalid metadata (with null byte in the middle).
+    metadata = 'xyz\x00abc'
+    try:
+        commit_and_check(c, topic, metadata)
+        # We should never reach this point, since the prior statement should throw.
+        assert False
+    except ValueError as ve:
+        assert 'embedded null character' in str(ve)
+
+    c.close()
diff --git a/tests/integration/integration_test.py b/tests/integration/integration_test.py
index dc6caeb..e4ae6de 100755
--- a/tests/integration/integration_test.py
+++ b/tests/integration/integration_test.py
@@ -21,7 +21,6 @@
 """ Test script for confluent_kafka module """
 
 import confluent_kafka
-from confluent_kafka import admin
 import os
 import time
 import uuid
@@ -29,6 +28,7 @@ import sys
 import json
 import gc
 import struct
+import re
 
 try:
     # Memory tracker
@@ -569,7 +569,8 @@ def verify_consumer_seek(c, seek_to_msg):
 
     tp = confluent_kafka.TopicPartition(seek_to_msg.topic(),
                                         seek_to_msg.partition(),
-                                        seek_to_msg.offset())
+                                        seek_to_msg.offset(),
+                                        leader_epoch=seek_to_msg.leader_epoch())
     print('seek: Seeking to %s' % tp)
     c.seek(tp)
 
@@ -583,9 +584,14 @@ def verify_consumer_seek(c, seek_to_msg):
         if msg.topic() != seek_to_msg.topic() or msg.partition() != seek_to_msg.partition():
             continue
 
-        print('seek: message at offset %d' % msg.offset())
-        assert msg.offset() == seek_to_msg.offset(), \
-            'expected message at offset %d, not %d' % (seek_to_msg.offset(), msg.offset())
+        print('seek: message at offset %d (epoch %d)' %
+              (msg.offset(), msg.leader_epoch()))
+        assert msg.offset() == seek_to_msg.offset() and \
+               msg.leader_epoch() == seek_to_msg.leader_epoch(), \
+               ('expected message at offset %d (epoch %d), ' % (seek_to_msg.offset(),
+                                                                seek_to_msg.leader_epoch())) + \
+               ('not %d (epoch %d)' % (msg.offset(),
+                                       msg.leader_epoch()))
         break
 
 
@@ -796,13 +802,17 @@ def verify_avro_basic_auth(mode_conf):
     if mode_conf is None:
         abort_on_missing_configuration('avro-basic-auth')
 
-    url = {
-        'schema.registry.basic.auth.credentials.source': 'URL'
+    url = mode_conf.get('schema.registry.url')
+    credentials = mode_conf.get('schema.registry.basic.auth.user.info')
+
+    url_conf = {
+        'schema.registry.basic.auth.credentials.source': 'URL',
+        'schema.registry.url': str(re.sub("(^https?://)", f"\\1{credentials}@", url))
     }
 
     user_info = {
         'schema.registry.basic.auth.credentials.source': 'USER_INFO',
-        'schema.registry.basic.auth.user.info': mode_conf.get('schema.registry.basic.auth.user.info')
+        'schema.registry.basic.auth.user.info': credentials
     }
 
     sasl_inherit = {
@@ -812,10 +822,10 @@ def verify_avro_basic_auth(mode_conf):
     }
 
     base_conf = {
-            'bootstrap.servers': bootstrap_servers,
-            'error_cb': error_cb,
-            'schema.registry.url': schema_registry_url
-            }
+        'bootstrap.servers': bootstrap_servers,
+        'error_cb': error_cb,
+        'schema.registry.url': url
+    }
 
     consumer_conf = dict({'group.id': generate_group_id(),
                           'session.timeout.ms': 6000,
@@ -830,7 +840,7 @@ def verify_avro_basic_auth(mode_conf):
     run_avro_loop(dict(base_conf, **sasl_inherit), dict(consumer_conf, **sasl_inherit))
 
     print('-' * 10, 'Verifying basic auth source URL', '-' * 10)
-    run_avro_loop(dict(base_conf, **url), dict(consumer_conf, **url))
+    run_avro_loop(dict(base_conf, **url_conf), dict(consumer_conf, **url_conf))
 
 
 def run_avro_loop(producer_conf, consumer_conf):
@@ -1029,7 +1039,7 @@ def verify_stats_cb():
     c.close()
 
 
-def verify_topic_metadata(client, exp_topics):
+def verify_topic_metadata(client, exp_topics, *args, **kwargs):
     """
     Verify that exp_topics (dict<topicname,partcnt>) is reported in metadata.
     Will retry and wait for some time to let changes propagate.
@@ -1041,7 +1051,7 @@ def verify_topic_metadata(client, exp_topics):
     for retry in range(0, 3):
         do_retry = 0
 
-        md = client.list_topics()
+        md = client.list_topics(*args, **kwargs)
 
         for exptopic, exppartcnt in exp_topics.items():
             if exptopic not in md.topics:
@@ -1067,157 +1077,6 @@ def verify_topic_metadata(client, exp_topics):
     raise Exception("Timed out waiting for topics {} in metadata".format(exp_topics))
 
 
-def verify_admin():
-    """ Verify Admin API """
-
-    a = admin.AdminClient({'bootstrap.servers': bootstrap_servers})
-    our_topic = topic + '_admin_' + str(uuid.uuid4())
-    num_partitions = 2
-
-    topic_config = {"compression.type": "gzip"}
-
-    #
-    # First iteration: validate our_topic creation.
-    # Second iteration: create topic.
-    #
-    for validate in (True, False):
-        fs = a.create_topics([admin.NewTopic(our_topic,
-                                             num_partitions=num_partitions,
-                                             config=topic_config,
-                                             replication_factor=1)],
-                             validate_only=validate,
-                             operation_timeout=10.0)
-
-        for topic2, f in fs.items():
-            f.result()  # trigger exception if there was an error
-
-    #
-    # Find the topic in list_topics
-    #
-    verify_topic_metadata(a, {our_topic: num_partitions})
-
-    #
-    # Increase the partition count
-    #
-    num_partitions += 3
-    fs = a.create_partitions([admin.NewPartitions(our_topic,
-                                                  new_total_count=num_partitions)],
-                             operation_timeout=10.0)
-
-    for topic2, f in fs.items():
-        f.result()  # trigger exception if there was an error
-
-    #
-    # Verify with list_topics.
-    #
-    verify_topic_metadata(a, {our_topic: num_partitions})
-
-    #
-    # Verify with list_groups.
-    #
-
-    # Produce some messages
-    p = confluent_kafka.Producer({"bootstrap.servers": bootstrap_servers})
-    p.produce(our_topic, 'Hello Python!', headers=produce_headers)
-    p.produce(our_topic, key='Just a key and headers', headers=produce_headers)
-
-    def consume_messages(group_id):
-        # Consume messages
-        conf = {'bootstrap.servers': bootstrap_servers,
-                'group.id': group_id,
-                'session.timeout.ms': 6000,
-                'enable.auto.commit': False,
-                'on_commit': print_commit_result,
-                'error_cb': error_cb,
-                'auto.offset.reset': 'earliest',
-                'enable.partition.eof': True}
-        c = confluent_kafka.Consumer(conf)
-        c.subscribe([our_topic])
-        eof_reached = dict()
-        while True:
-            msg = c.poll()
-            if msg is None:
-                raise Exception('Got timeout from poll() without a timeout set: %s' % msg)
-
-            if msg.error():
-                if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:
-                    print('Reached end of %s [%d] at offset %d' % (
-                          msg.topic(), msg.partition(), msg.offset()))
-                    eof_reached[(msg.topic(), msg.partition())] = True
-                    if len(eof_reached) == len(c.assignment()):
-                        print('EOF reached for all assigned partitions: exiting')
-                        break
-                else:
-                    print('Consumer error: %s: ignoring' % msg.error())
-                    break
-            # Commit offset
-            c.commit(msg, asynchronous=False)
-
-    group1 = 'test-group-1'
-    group2 = 'test-group-2'
-    consume_messages(group1)
-    consume_messages(group2)
-    # list_groups without group argument
-    groups = set(group.id for group in a.list_groups(timeout=10))
-    assert group1 in groups, "Consumer group {} not found".format(group1)
-    assert group2 in groups, "Consumer group {} not found".format(group2)
-    # list_groups with group argument
-    groups = set(group.id for group in a.list_groups(group1))
-    assert group1 in groups, "Consumer group {} not found".format(group1)
-    groups = set(group.id for group in a.list_groups(group2))
-    assert group2 in groups, "Consumer group {} not found".format(group2)
-
-    def verify_config(expconfig, configs):
-        """
-        Verify that the config key,values in expconfig are found
-        and matches the ConfigEntry in configs.
-        """
-        for key, expvalue in expconfig.items():
-            entry = configs.get(key, None)
-            assert entry is not None, "Config {} not found in returned configs".format(key)
-
-            assert entry.value == str(expvalue), \
-                "Config {} with value {} does not match expected value {}".format(key, entry, expvalue)
-
-    #
-    # Get current topic config
-    #
-    resource = admin.ConfigResource(admin.RESOURCE_TOPIC, our_topic)
-    fs = a.describe_configs([resource])
-    configs = fs[resource].result()  # will raise exception on failure
-
-    # Verify config matches our expectations
-    verify_config(topic_config, configs)
-
-    #
-    # Now change the config.
-    #
-    topic_config["file.delete.delay.ms"] = 12345
-    topic_config["compression.type"] = "snappy"
-
-    for key, value in topic_config.items():
-        resource.set_config(key, value)
-
-    fs = a.alter_configs([resource])
-    fs[resource].result()  # will raise exception on failure
-
-    #
-    # Read the config back again and verify.
-    #
-    fs = a.describe_configs([resource])
-    configs = fs[resource].result()  # will raise exception on failure
-
-    # Verify config matches our expectations
-    verify_config(topic_config, configs)
-
-    #
-    # Delete the topic
-    #
-    fs = a.delete_topics([our_topic])
-    fs[our_topic].result()  # will raise exception on failure
-    print("Topic {} marked for deletion".format(our_topic))
-
-
 def verify_avro_explicit_read_schema():
     from confluent_kafka import avro
 
@@ -1273,10 +1132,10 @@ def verify_avro_explicit_read_schema():
         msgcount += 1
         # Avro schema projection should return the two fields not present in the writer schema
         try:
-            assert(msg.key().get('favorite_number') == 42)
-            assert(msg.key().get('favorite_color') == "purple")
-            assert(msg.value().get('favorite_number') == 42)
-            assert(msg.value().get('favorite_color') == "purple")
+            assert (msg.key().get('favorite_number') == 42)
+            assert (msg.key().get('favorite_color') == "purple")
+            assert (msg.value().get('favorite_number') == 42)
+            assert (msg.value().get('favorite_color') == "purple")
             print("success: schema projection worked for explicit reader schema")
         except KeyError:
             raise confluent_kafka.avro.SerializerError("Schema projection failed when setting reader schema.")
@@ -1401,15 +1260,21 @@ if __name__ == '__main__':
     if 'avro-https' in modes:
         print('=' * 30, 'Verifying AVRO with HTTPS', '=' * 30)
         verify_avro_https(testconf.get('avro-https', None))
+        key_with_password_conf = testconf.get("avro-https-key-with-password", None)
+        print('=' * 30, 'Verifying AVRO with HTTPS Flow with Password',
+              'Protected Private Key of Cached-Schema-Registry-Client', '=' * 30)
+        verify_avro_https(key_with_password_conf)
+        print('Verifying Error with Wrong Password of Password Protected Private Key of Cached-Schema-Registry-Client')
+        try:
+            key_with_password_conf['schema.registry.ssl.key.password'] += '->wrongpassword'
+            verify_avro_https(key_with_password_conf)
+        except Exception:
+            print("Wrong Password Gives Error -> Successful")
 
     if 'avro-basic-auth' in modes:
         print("=" * 30, 'Verifying AVRO with Basic Auth', '=' * 30)
         verify_avro_basic_auth(testconf.get('avro-basic-auth', None))
 
-    if 'admin' in modes:
-        print('=' * 30, 'Verifying Admin API', '=' * 30)
-        verify_admin()
-
     print('=' * 30, 'Done', '=' * 30)
 
     if with_pympler:
diff --git a/tests/integration/schema_registry/data/PublicTestProto.proto b/tests/integration/schema_registry/data/PublicTestProto.proto
deleted file mode 100644
index 2fb2ca4..0000000
--- a/tests/integration/schema_registry/data/PublicTestProto.proto
+++ /dev/null
@@ -1,6 +0,0 @@
-syntax = "proto3";
-
-import public "TestProto.proto";
-
-package tests.integration.serialization.data;
-
diff --git a/tests/integration/schema_registry/data/customer.json b/tests/integration/schema_registry/data/customer.json
new file mode 100644
index 0000000..7b9887f
--- /dev/null
+++ b/tests/integration/schema_registry/data/customer.json
@@ -0,0 +1,22 @@
+{
+  "$schema": "http://json-schema.org/draft-07/schema#",
+  "$id": "http://example.com/customer.schema.json",
+  "title": "Customer",
+  "description": "Customer data",
+  "type": "object",
+  "properties": {
+    "name": {
+      "description": "Customer name",
+      "type": "string"
+    },
+    "id": {
+      "description": "Customer id",
+      "type": "integer"
+    },
+    "email": {
+      "description": "Customer email",
+      "type": "string"
+    }
+  },
+  "required": [ "name", "id"]
+}
diff --git a/tests/integration/schema_registry/data/order.json b/tests/integration/schema_registry/data/order.json
new file mode 100644
index 0000000..5ba94c9
--- /dev/null
+++ b/tests/integration/schema_registry/data/order.json
@@ -0,0 +1,24 @@
+{
+  "$schema": "http://json-schema.org/draft-07/schema#",
+  "$id": "http://example.com/referencedproduct.schema.json",
+  "title": "Order",
+  "description": "Order",
+  "type": "object",
+  "properties": {
+    "order_details": {
+      "description": "Order Details",
+      "$ref": "http://example.com/order_details.schema.json"
+    },
+    "order_date": {
+      "description": "Order Date",
+      "type": "string",
+      "format": "date-time"
+    },
+    "product": {
+      "description": "Product",
+      "$ref": "http://example.com/product.schema.json"
+    }
+  },
+  "required": [
+    "order_details", "product"]
+}
diff --git a/tests/integration/schema_registry/data/order_details.json b/tests/integration/schema_registry/data/order_details.json
new file mode 100644
index 0000000..5fa933d
--- /dev/null
+++ b/tests/integration/schema_registry/data/order_details.json
@@ -0,0 +1,22 @@
+{
+  "$schema": "http://json-schema.org/draft-07/schema#",
+  "$id": "http://example.com/order_details.schema.json",
+  "title": "Order Details",
+  "description": "Order Details",
+  "type": "object",
+  "properties": {
+    "id": {
+      "description": "Order Id",
+      "type": "integer"
+    },
+    "customer": {
+      "description": "Customer",
+      "$ref": "http://example.com/customer.schema.json"
+    },
+    "payment_id": {
+      "description": "Payment Id",
+      "type": "string"
+    }
+  },
+  "required": [ "id", "customer"]
+}
diff --git a/tests/integration/schema_registry/data/DependencyTestProto.proto b/tests/integration/schema_registry/data/proto/DependencyTestProto.proto
similarity index 63%
rename from tests/integration/schema_registry/data/DependencyTestProto.proto
rename to tests/integration/schema_registry/data/proto/DependencyTestProto.proto
index 183dc39..693ba03 100644
--- a/tests/integration/schema_registry/data/DependencyTestProto.proto
+++ b/tests/integration/schema_registry/data/proto/DependencyTestProto.proto
@@ -1,7 +1,7 @@
 syntax = "proto3";
 
-import "NestedTestProto.proto";
-import "PublicTestProto.proto";
+import "tests/integration/schema_registry/data/proto/NestedTestProto.proto";
+import "tests/integration/schema_registry/data/proto/PublicTestProto.proto";
 
 package tests.integration.serialization.data;
 
diff --git a/tests/integration/schema_registry/data/proto/DependencyTestProto_pb2.py b/tests/integration/schema_registry/data/proto/DependencyTestProto_pb2.py
new file mode 100644
index 0000000..646cabb
--- /dev/null
+++ b/tests/integration/schema_registry/data/proto/DependencyTestProto_pb2.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: tests/integration/schema_registry/data/proto/DependencyTestProto.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from tests.integration.schema_registry.data.proto import NestedTestProto_pb2 as tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_NestedTestProto__pb2
+from tests.integration.schema_registry.data.proto import PublicTestProto_pb2 as tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_PublicTestProto__pb2
+try:
+  tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_TestProto__pb2 = tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_PublicTestProto__pb2.tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_TestProto__pb2
+except AttributeError:
+  tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_TestProto__pb2 = tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_PublicTestProto__pb2.tests.integration.schema_registry.data.proto.TestProto_pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nFtests/integration/schema_registry/data/proto/DependencyTestProto.proto\x12$tests.integration.serialization.data\x1a\x42tests/integration/schema_registry/data/proto/NestedTestProto.proto\x1a\x42tests/integration/schema_registry/data/proto/PublicTestProto.proto\"\x98\x01\n\x11\x44\x65pendencyMessage\x12K\n\x0enested_message\x18\x01 \x01(\x0b\x32\x33.tests.integration.serialization.data.NestedMessage\x12\x11\n\tis_active\x18\x02 \x01(\x08\x12#\n\rtest_messsage\x18\x03 \x01(\x0b\x32\x0c.TestMessageB.\n,io.confluent.kafka.serializers.protobuf.testb\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tests.integration.schema_registry.data.proto.DependencyTestProto_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+  DESCRIPTOR._serialized_options = b'\n,io.confluent.kafka.serializers.protobuf.test'
+  _DEPENDENCYMESSAGE._serialized_start=249
+  _DEPENDENCYMESSAGE._serialized_end=401
+# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/data/Makefile b/tests/integration/schema_registry/data/proto/Makefile
similarity index 66%
rename from tests/integration/schema_registry/data/Makefile
rename to tests/integration/schema_registry/data/proto/Makefile
index d87eac0..131c29c 100644
--- a/tests/integration/schema_registry/data/Makefile
+++ b/tests/integration/schema_registry/data/proto/Makefile
@@ -1,7 +1,6 @@
 WORK_DIR:=$(strip $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))))
 PROTO_HOME=/usr/local/opt/include
-SRC_DIR=$(WORK_DIR)
-TARGET_DIR=$(WORK_DIR)/../gen
+SRC_DIR=$(realpath $(WORK_DIR)/../../../../..)
 
 PROTOS := common_proto.proto DependencyTestProto.proto exampleProtoCriteo.proto $\
          metadata_proto.proto NestedTestProto.proto PublicTestProto.proto $\
@@ -9,8 +8,8 @@ PROTOS := common_proto.proto DependencyTestProto.proto exampleProtoCriteo.proto
 
 compile: $(PROTOS)
 	for proto in $(PROTOS); do \
-		protoc -I=$(PROTO_HOME) -I=$(SRC_DIR) --python_out=$(TARGET_DIR) $$proto ; \
+		(cd $(SRC_DIR) && protoc -I=$(PROTO_HOME) -I=$(SRC_DIR) --python_out=$(SRC_DIR) tests/integration/schema_registry/data/proto/$$proto ;) \
 	done
 
 clean:
-	rm -f $(TARGET_DIR)/*_pb2.py
+	rm -f *_pb2.py
diff --git a/tests/integration/schema_registry/data/NestedTestProto.proto b/tests/integration/schema_registry/data/proto/NestedTestProto.proto
similarity index 100%
rename from tests/integration/schema_registry/data/NestedTestProto.proto
rename to tests/integration/schema_registry/data/proto/NestedTestProto.proto
diff --git a/tests/integration/schema_registry/data/proto/NestedTestProto_pb2.py b/tests/integration/schema_registry/data/proto/NestedTestProto_pb2.py
new file mode 100644
index 0000000..1a7cda3
--- /dev/null
+++ b/tests/integration/schema_registry/data/proto/NestedTestProto_pb2.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: tests/integration/schema_registry/data/proto/NestedTestProto.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nBtests/integration/schema_registry/data/proto/NestedTestProto.proto\x12$tests.integration.serialization.data\x1a\x1fgoogle/protobuf/timestamp.proto\"\x8c\x01\n\x06UserId\x12\x17\n\rkafka_user_id\x18\x01 \x01(\tH\x00\x12\x17\n\rother_user_id\x18\x02 \x01(\x05H\x00\x12\x45\n\nanother_id\x18\x03 \x01(\x0b\x32/.tests.integration.serialization.data.MessageIdH\x00\x42\t\n\x07user_id\"\x17\n\tMessageId\x12\n\n\x02id\x18\x01 \x01(\t\"R\n\x0b\x43omplexType\x12\x10\n\x06one_id\x18\x01 \x01(\tH\x00\x12\x12\n\x08other_id\x18\x02 \x01(\x05H\x00\x12\x11\n\tis_active\x18\x03 \x01(\x08\x42\n\n\x08some_val\"\xd0\x04\n\rNestedMessage\x12=\n\x07user_id\x18\x01 \x01(\x0b\x32,.tests.integration.serialization.data.UserId\x12\x11\n\tis_active\x18\x02 \x01(\x08\x12\x1a\n\x12\x65xperiments_active\x18\x03 \x03(\t\x12<\n\x06status\x18\x05 \x01(\x0e\x32,.tests.integration.serialization.data.Status\x12G\n\x0c\x63omplex_type\x18\x06 \x01(\x0b\x32\x31.tests.integration.serialization.data.ComplexType\x12R\n\x08map_type\x18\x07 \x03(\x0b\x32@.tests.integration.serialization.data.NestedMessage.MapTypeEntry\x12O\n\x05inner\x18\x08 \x01(\x0b\x32@.tests.integration.serialization.data.NestedMessage.InnerMessage\x1a.\n\x0cMapTypeEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a/\n\x0cInnerMessage\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x0f\n\x03ids\x18\x02 \x03(\x05\x42\x02\x10\x01\"(\n\tInnerEnum\x12\x08\n\x04ZERO\x10\x00\x12\r\n\tALSO_ZERO\x10\x00\x1a\x02\x10\x01J\x04\x08\x0e\x10\x0fJ\x04\x08\x0f\x10\x10J\x04\x08\t\x10\x0cR\x03\x66ooR\x03\x62\x61r*\"\n\x06Status\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\x0c\n\x08INACTIVE\x10\x01\x42\x41\n,io.confluent.kafka.serializers.protobuf.testB\x0fNestedTestProtoP\x00\x62\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tests.integration.schema_registry.data.proto.NestedTestProto_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+  DESCRIPTOR._serialized_options = b'\n,io.confluent.kafka.serializers.protobuf.testB\017NestedTestProtoP\000'
+  _NESTEDMESSAGE_MAPTYPEENTRY._options = None
+  _NESTEDMESSAGE_MAPTYPEENTRY._serialized_options = b'8\001'
+  _NESTEDMESSAGE_INNERMESSAGE.fields_by_name['ids']._options = None
+  _NESTEDMESSAGE_INNERMESSAGE.fields_by_name['ids']._serialized_options = b'\020\001'
+  _NESTEDMESSAGE_INNERENUM._options = None
+  _NESTEDMESSAGE_INNERENUM._serialized_options = b'\020\001'
+  _STATUS._serialized_start=988
+  _STATUS._serialized_end=1022
+  _USERID._serialized_start=142
+  _USERID._serialized_end=282
+  _MESSAGEID._serialized_start=284
+  _MESSAGEID._serialized_end=307
+  _COMPLEXTYPE._serialized_start=309
+  _COMPLEXTYPE._serialized_end=391
+  _NESTEDMESSAGE._serialized_start=394
+  _NESTEDMESSAGE._serialized_end=986
+  _NESTEDMESSAGE_MAPTYPEENTRY._serialized_start=821
+  _NESTEDMESSAGE_MAPTYPEENTRY._serialized_end=867
+  _NESTEDMESSAGE_INNERMESSAGE._serialized_start=869
+  _NESTEDMESSAGE_INNERMESSAGE._serialized_end=916
+  _NESTEDMESSAGE_INNERENUM._serialized_start=918
+  _NESTEDMESSAGE_INNERENUM._serialized_end=958
+# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/data/proto/PublicTestProto.proto b/tests/integration/schema_registry/data/proto/PublicTestProto.proto
new file mode 100644
index 0000000..a1ef5a6
--- /dev/null
+++ b/tests/integration/schema_registry/data/proto/PublicTestProto.proto
@@ -0,0 +1,6 @@
+syntax = "proto3";
+
+import public "tests/integration/schema_registry/data/proto/TestProto.proto";
+
+package tests.integration.serialization.data;
+
diff --git a/tests/integration/schema_registry/data/proto/PublicTestProto_pb2.py b/tests/integration/schema_registry/data/proto/PublicTestProto_pb2.py
new file mode 100644
index 0000000..fe7d8ff
--- /dev/null
+++ b/tests/integration/schema_registry/data/proto/PublicTestProto_pb2.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: tests/integration/schema_registry/data/proto/PublicTestProto.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from tests.integration.schema_registry.data.proto import TestProto_pb2 as tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_TestProto__pb2
+
+from tests.integration.schema_registry.data.proto.TestProto_pb2 import *
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nBtests/integration/schema_registry/data/proto/PublicTestProto.proto\x12$tests.integration.serialization.data\x1a<tests/integration/schema_registry/data/proto/TestProto.protoP\x00\x62\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tests.integration.schema_registry.data.proto.PublicTestProto_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/data/SInt32Value.proto b/tests/integration/schema_registry/data/proto/SInt32Value.proto
similarity index 100%
rename from tests/integration/schema_registry/data/SInt32Value.proto
rename to tests/integration/schema_registry/data/proto/SInt32Value.proto
diff --git a/tests/integration/schema_registry/data/proto/SInt32Value_pb2.py b/tests/integration/schema_registry/data/proto/SInt32Value_pb2.py
new file mode 100644
index 0000000..ff40b80
--- /dev/null
+++ b/tests/integration/schema_registry/data/proto/SInt32Value_pb2.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: tests/integration/schema_registry/data/proto/SInt32Value.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n>tests/integration/schema_registry/data/proto/SInt32Value.proto\"\x1c\n\x0bSInt32Value\x12\r\n\x05value\x18\x01 \x01(\x11\x42.\n,io.confluent.kafka.serializers.protobuf.testb\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tests.integration.schema_registry.data.proto.SInt32Value_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+  DESCRIPTOR._serialized_options = b'\n,io.confluent.kafka.serializers.protobuf.test'
+  _SINT32VALUE._serialized_start=66
+  _SINT32VALUE._serialized_end=94
+# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/data/SInt64Value.proto b/tests/integration/schema_registry/data/proto/SInt64Value.proto
similarity index 100%
rename from tests/integration/schema_registry/data/SInt64Value.proto
rename to tests/integration/schema_registry/data/proto/SInt64Value.proto
diff --git a/tests/integration/schema_registry/data/proto/SInt64Value_pb2.py b/tests/integration/schema_registry/data/proto/SInt64Value_pb2.py
new file mode 100644
index 0000000..75be7ff
--- /dev/null
+++ b/tests/integration/schema_registry/data/proto/SInt64Value_pb2.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: tests/integration/schema_registry/data/proto/SInt64Value.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n>tests/integration/schema_registry/data/proto/SInt64Value.proto\"\x1c\n\x0bSInt64Value\x12\r\n\x05value\x18\x01 \x01(\x12\x42.\n,io.confluent.kafka.serializers.protobuf.testb\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tests.integration.schema_registry.data.proto.SInt64Value_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+  DESCRIPTOR._serialized_options = b'\n,io.confluent.kafka.serializers.protobuf.test'
+  _SINT64VALUE._serialized_start=66
+  _SINT64VALUE._serialized_end=94
+# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/data/TestProto.proto b/tests/integration/schema_registry/data/proto/TestProto.proto
similarity index 100%
rename from tests/integration/schema_registry/data/TestProto.proto
rename to tests/integration/schema_registry/data/proto/TestProto.proto
diff --git a/tests/integration/schema_registry/data/proto/TestProto_pb2.py b/tests/integration/schema_registry/data/proto/TestProto_pb2.py
new file mode 100644
index 0000000..04e6e67
--- /dev/null
+++ b/tests/integration/schema_registry/data/proto/TestProto_pb2.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: tests/integration/schema_registry/data/proto/TestProto.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n<tests/integration/schema_registry/data/proto/TestProto.proto\x1a google/protobuf/descriptor.proto\"\xc8\x02\n\x0bTestMessage\x12\x13\n\x0btest_string\x18\x01 \x01(\t\x12\x11\n\ttest_bool\x18\x02 \x01(\x08\x12\x12\n\ntest_bytes\x18\x03 \x01(\x0c\x12\x13\n\x0btest_double\x18\x04 \x01(\x01\x12\x12\n\ntest_float\x18\x05 \x01(\x02\x12\x14\n\x0ctest_fixed32\x18\x06 \x01(\x07\x12\x14\n\x0ctest_fixed64\x18\x07 \x01(\x06\x12\x12\n\ntest_int32\x18\x08 \x01(\x05\x12\x12\n\ntest_int64\x18\t \x01(\x03\x12\x15\n\rtest_sfixed32\x18\n \x01(\x0f\x12\x15\n\rtest_sfixed64\x18\x0b \x01(\x10\x12\x13\n\x0btest_sint32\x18\x0c \x01(\x11\x12\x13\n\x0btest_sint64\x18\r \x01(\x12\x12\x13\n\x0btest_uint32\x18\x0e \x01(\r\x12\x13\n\x0btest_uint64\x18\x0f \x01(\x04\x42\x41\n,io.confluent.kafka.serializers.protobuf.testB\x11TestMessageProtosb\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tests.integration.schema_registry.data.proto.TestProto_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+  DESCRIPTOR._serialized_options = b'\n,io.confluent.kafka.serializers.protobuf.testB\021TestMessageProtos'
+  _TESTMESSAGE._serialized_start=99
+  _TESTMESSAGE._serialized_end=427
+# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/data/TimestampValue.proto b/tests/integration/schema_registry/data/proto/TimestampValue.proto
similarity index 100%
rename from tests/integration/schema_registry/data/TimestampValue.proto
rename to tests/integration/schema_registry/data/proto/TimestampValue.proto
diff --git a/tests/integration/schema_registry/data/UInt32Value.proto b/tests/integration/schema_registry/data/proto/UInt32Value.proto
similarity index 100%
rename from tests/integration/schema_registry/data/UInt32Value.proto
rename to tests/integration/schema_registry/data/proto/UInt32Value.proto
diff --git a/tests/integration/schema_registry/data/common_proto.proto b/tests/integration/schema_registry/data/proto/common_proto.proto
similarity index 92%
rename from tests/integration/schema_registry/data/common_proto.proto
rename to tests/integration/schema_registry/data/proto/common_proto.proto
index d28bf04..dd71eb5 100644
--- a/tests/integration/schema_registry/data/common_proto.proto
+++ b/tests/integration/schema_registry/data/proto/common_proto.proto
@@ -3,7 +3,7 @@ syntax = "proto3";
 package Criteo.Glup;
 option java_package = "com.criteo.glup";
 
-import "metadata_proto.proto";
+import "tests/integration/schema_registry/data/proto/metadata_proto.proto";
 
 /* Describes if an event or campaign is an appinstall or normal
  * retargeting one
diff --git a/tests/integration/schema_registry/data/proto/common_proto_pb2.py b/tests/integration/schema_registry/data/proto/common_proto_pb2.py
new file mode 100644
index 0000000..dcc2f19
--- /dev/null
+++ b/tests/integration/schema_registry/data/proto/common_proto_pb2.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: tests/integration/schema_registry/data/proto/common_proto.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from tests.integration.schema_registry.data.proto import metadata_proto_pb2 as tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_metadata__proto__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n?tests/integration/schema_registry/data/proto/common_proto.proto\x12\x0b\x43riteo.Glup\x1a\x41tests/integration/schema_registry/data/proto/metadata_proto.proto\"\xda\x01\n\x07\x43onsent\x12 \n\x18identification_forbidden\x18\x01 \x01(\x08\x12:\n\x06reason\x18\x02 \x01(\x0e\x32*.Criteo.Glup.IdentificationForbiddenReason\x12\x39\n\nset_fields\x18\xda\x86\x03 \x03(\x0b\x32#.Criteo.Glup.Consent.SetFieldsEntry\x1a\x30\n\x0eSetFieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01:\x04\x88\xb5\x18\x01*/\n\x16MarketingObjectiveType\x12\x08\n\x04Sale\x10\x00\x12\x0b\n\x07Install\x10\x01*\xd0\x01\n\x1dIdentificationForbiddenReason\x12\x0c\n\x08NoReason\x10\x00\x12\x1b\n\x17\x45xplicitConsentRequired\x10\x01\x12\x10\n\x0cOptoutCookie\x10\x02\x12\x13\n\x0f\x43toOptoutCookie\x10\x03\x12\x15\n\x11LimitedAdTracking\x10\x04\x12\x0e\n\nHstsOptout\x10\x05\x12\x14\n\x10\x44oNotTrackHeader\x10\x06\x12\r\n\tOoOCookie\x10\x07\x12\x11\n\rPendingOptout\x10\x08\x42\x11\n\x0f\x63om.criteo.glupb\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tests.integration.schema_registry.data.proto.common_proto_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+  DESCRIPTOR._serialized_options = b'\n\017com.criteo.glup'
+  _CONSENT_SETFIELDSENTRY._options = None
+  _CONSENT_SETFIELDSENTRY._serialized_options = b'8\001'
+  _CONSENT._options = None
+  _CONSENT._serialized_options = b'\210\265\030\001'
+  _MARKETINGOBJECTIVETYPE._serialized_start=368
+  _MARKETINGOBJECTIVETYPE._serialized_end=415
+  _IDENTIFICATIONFORBIDDENREASON._serialized_start=418
+  _IDENTIFICATIONFORBIDDENREASON._serialized_end=626
+  _CONSENT._serialized_start=148
+  _CONSENT._serialized_end=366
+  _CONSENT_SETFIELDSENTRY._serialized_start=312
+  _CONSENT_SETFIELDSENTRY._serialized_end=360
+# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/data/exampleProtoCriteo.proto b/tests/integration/schema_registry/data/proto/exampleProtoCriteo.proto
similarity index 92%
rename from tests/integration/schema_registry/data/exampleProtoCriteo.proto
rename to tests/integration/schema_registry/data/proto/exampleProtoCriteo.proto
index 34db212..c00929e 100644
--- a/tests/integration/schema_registry/data/exampleProtoCriteo.proto
+++ b/tests/integration/schema_registry/data/proto/exampleProtoCriteo.proto
@@ -2,8 +2,8 @@ syntax = "proto3";
 package Criteo.Glup;
 option java_package = "com.criteo.glup";
 
-import "metadata_proto.proto";
-import "common_proto.proto";
+import "tests/integration/schema_registry/data/proto/metadata_proto.proto";
+import "tests/integration/schema_registry/data/proto/common_proto.proto";
 
 message ClickCas {
   option (contains_nullable_fields) = true;
diff --git a/tests/integration/schema_registry/data/proto/exampleProtoCriteo_pb2.py b/tests/integration/schema_registry/data/proto/exampleProtoCriteo_pb2.py
new file mode 100644
index 0000000..ff245a7
--- /dev/null
+++ b/tests/integration/schema_registry/data/proto/exampleProtoCriteo_pb2.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: tests/integration/schema_registry/data/proto/exampleProtoCriteo.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from tests.integration.schema_registry.data.proto import metadata_proto_pb2 as tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_metadata__proto__pb2
+from tests.integration.schema_registry.data.proto import common_proto_pb2 as tests_dot_integration_dot_schema__registry_dot_data_dot_proto_dot_common__proto__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nEtests/integration/schema_registry/data/proto/exampleProtoCriteo.proto\x12\x0b\x43riteo.Glup\x1a\x41tests/integration/schema_registry/data/proto/metadata_proto.proto\x1a?tests/integration/schema_registry/data/proto/common_proto.proto\"\x9b\x06\n\x08\x43lickCas\x12(\n\x0bglup_origin\x18\x01 \x01(\x0b\x32\x13.Criteo.Glup.Origin\x12)\n\tpartition\x18\x02 \x01(\x0b\x32\x16.Criteo.Glup.Partition\x12\x0b\n\x03uid\x18\x05 \x01(\t\x12:\n\nset_fields\x18\xda\x86\x03 \x03(\x0b\x32$.Criteo.Glup.ClickCas.SetFieldsEntry\x12R\n\x0f\x63ontrol_message\x18\xff\xff\x7f \x03(\x0b\x32%.Criteo.Glup.ControlMessage.WatermarkB\x10\x92\xb5\x18\x0c\n\n__metadata\x1a\x30\n\x0eSetFieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01:\xc9\x03\x88\xb5\x18\x01\x82\xb5\x18\x04:\x02\x10\x01\x82\xb5\x18\x12\n\x10\n\x0eglup_click_cas\x82\xb5\x18\xea\x01*\xe7\x01\n\tclick_cas\x12G\n,/glup/datasets/click_cas/data/full/JSON_PAIL\x10\x02@dJ\x13\x46\x45\x44\x45RATED_JSON_PAIL\x12U\n3/glup/datasets/click_cas/data/full/PROTOBUF_PARQUET\x10\x04@2J\x1a\x46\x45\x44\x45RATED_PROTOBUF_PARQUET\x18\x04\"&com.criteo.glup.ClickCasProto$ClickCas2\x0b\x65nginejoins@\x01H\x86\x03\x82\xb5\x18\xb3\x01\x12\xb0\x01\x1a\xad\x01\n\x0b\x65nginejoins\x12\tclick_cas \x04Z9\x12\x30\n\x0eglup_click_cas\"\tclick_cas*\x13\x46\x45\x44\x45RATED_JSON_PAIL\xd2\x0f\x04\x08\x02\x10\x06ZP2G\n\tclick_cas\x12\tclick_cas*\x13\x46\x45\x44\x45RATED_JSON_PAIL2\x1a\x46\x45\x44\x45RATED_PROTOBUF_PARQUET\xd2\x0f\x04\x08\x02\x10\x06\x62\x04R\x02\x18\x04J\x04\x08\x46\x10JJ\x04\x08K\x10LR\x08obsoleteR\tobsolete2B\x11\n\x0f\x63om.criteo.glupb\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tests.integration.schema_registry.data.proto.exampleProtoCriteo_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+  DESCRIPTOR._options = None
+  DESCRIPTOR._serialized_options = b'\n\017com.criteo.glup'
+  _CLICKCAS_SETFIELDSENTRY._options = None
+  _CLICKCAS_SETFIELDSENTRY._serialized_options = b'8\001'
+  _CLICKCAS.fields_by_name['control_message']._options = None
+  _CLICKCAS.fields_by_name['control_message']._serialized_options = b'\222\265\030\014\n\n__metadata'
+  _CLICKCAS._options = None
+  _CLICKCAS._serialized_options = b'\210\265\030\001\202\265\030\004:\002\020\001\202\265\030\022\n\020\n\016glup_click_cas\202\265\030\352\001*\347\001\n\tclick_cas\022G\n,/glup/datasets/click_cas/data/full/JSON_PAIL\020\002@dJ\023FEDERATED_JSON_PAIL\022U\n3/glup/datasets/click_cas/data/full/PROTOBUF_PARQUET\020\004@2J\032FEDERATED_PROTOBUF_PARQUET\030\004\"&com.criteo.glup.ClickCasProto$ClickCas2\013enginejoins@\001H\206\003\202\265\030\263\001\022\260\001\032\255\001\n\013enginejoins\022\tclick_cas \004Z9\0220\n\016glup_click_cas\"\tclick_cas*\023FEDERATED_JSON_PAIL\322\017\004\010\002\020\006ZP2G\n\tclick_cas\022\tclick_cas*\023FEDERATED_JSON_PAIL2\032FEDERATED_PROTOBUF_PARQUET\322\017\004\010\002\020\006b\004R\002\030\004'
+  _CLICKCAS._serialized_start=219
+  _CLICKCAS._serialized_end=1014
+  _CLICKCAS_SETFIELDSENTRY._serialized_start=473
+  _CLICKCAS_SETFIELDSENTRY._serialized_end=521
+# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/data/metadata_proto.proto b/tests/integration/schema_registry/data/proto/metadata_proto.proto
similarity index 100%
rename from tests/integration/schema_registry/data/metadata_proto.proto
rename to tests/integration/schema_registry/data/proto/metadata_proto.proto
diff --git a/tests/integration/schema_registry/data/proto/metadata_proto_pb2.py b/tests/integration/schema_registry/data/proto/metadata_proto_pb2.py
new file mode 100644
index 0000000..01ba42d
--- /dev/null
+++ b/tests/integration/schema_registry/data/proto/metadata_proto_pb2.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: tests/integration/schema_registry/data/proto/metadata_proto.proto
+"""Generated protocol buffer code."""
+from google.protobuf.internal import builder as _builder
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nAtests/integration/schema_registry/data/proto/metadata_proto.proto\x12\x0b\x43riteo.Glup\x1a google/protobuf/descriptor.proto\"$\n\x13KafkaMessageOptions\x12\r\n\x05topic\x18\x01 \x03(\t\"\x80\x02\n\x07\x44\x61taSet\x12\n\n\x02id\x18\x01 \x01(\t\x12*\n\x06\x66ormat\x18\x02 \x03(\x0b\x32\x1a.Criteo.Glup.DataSetFormat\x12\x36\n\x10partition_scheme\x18\x03 \x01(\x0e\x32\x1c.Criteo.Glup.PartitionScheme\x12\x12\n\njava_class\x18\x04 \x01(\t\x12\x11\n\tfor_tests\x18\x05 \x01(\x08\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x0f\n\x07private\x18\x07 \x01(\x08\x12&\n\x04kind\x18\x08 \x01(\x0e\x32\x18.Criteo.Glup.DataSetKind\x12\x16\n\x0eretention_days\x18\t \x01(\x05\"x\n\x0c\x44\x61taSetChunk\x12)\n\tpartition\x18\x01 \x03(\x0b\x32\x16.Criteo.Glup.Partition\x12*\n\x06\x66ormat\x18\x02 \x01(\x0b\x32\x1a.Criteo.Glup.DataSetFormat\x12\x11\n\tdatasetId\x18\x03 \x01(\t\"\xe6\x02\n\rDataSetFormat\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x30\n\x0b\x66ile_format\x18\x02 \x01(\x0e\x32\x1b.Criteo.Glup.HDFSDataFormat\x12\x36\n\x10partition_scheme\x18\x03 \x01(\x0e\x32\x1c.Criteo.Glup.PartitionScheme\x12\x33\n\x0fstart_partition\x18\x04 \x01(\x0b\x32\x1a.Criteo.Glup.HDFSPartition\x12\x31\n\rend_partition\x18\x05 \x01(\x0b\x32\x1a.Criteo.Glup.HDFSPartition\x12\x16\n\x0eretention_days\x18\x07 \x01(\x05\x12\x10\n\x08priority\x18\x08 \x01(\x05\x12\r\n\x05label\x18\t \x01(\t\x12\x36\n\x10monitoring_level\x18\n \x01(\x0e\x32\x1c.Criteo.Glup.MonitoringLevelJ\x04\x08\x06\x10\x07\"\xce\x19\n\x0bHDFSOptions\x12\x36\n\x06import\x18\x03 \x03(\x0b\x32&.Criteo.Glup.HDFSOptions.ImportOptions\x1a\x86\x19\n\rImportOptions\x12\r\n\x05owner\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x32\n\x0cpartitioning\x18\x04 \x01(\x0e\x32\x1c.Criteo.Glup.PartitionScheme\x12+\n\x06\x66ormat\x18\x05 \x01(\x0e\x32\x1b.Criteo.Glup.HDFSDataFormat\x12\x0f\n\x07private\x18\x06 \x01(\x08\x12\x43\n\tgenerator\x18\x0b \x03(\x0b\x32\x30.Criteo.Glup.HDFSOptions.ImportOptions.Generator\x12\x39\n\x04view\x18\x0c \x03(\x0b\x32+.Criteo.Glup.HDFSOptions.ImportOptions.View\x1a\x90\x01\n\x04View\x12\x45\n\x04hive\x18\n \x01(\x0b\x32\x37.Criteo.Glup.HDFSOptions.ImportOptions.View.HiveOptions\x1a\x41\n\x0bHiveOptions\x12\x32\n\x0cpartitioning\x18\x03 \x01(\x0e\x32\x1c.Criteo.Glup.PartitionScheme\x1a\xd2\x15\n\tGenerator\x12V\n\ndataloader\x18\x01 \x01(\x0b\x32\x42.Criteo.Glup.HDFSOptions.ImportOptions.Generator.DataloaderOptions\x12V\n\nkafka2hdfs\x18\x02 \x01(\x0b\x32\x42.Criteo.Glup.HDFSOptions.ImportOptions.Generator.Kafka2HdfsOptions\x12J\n\x04sync\x18\x03 \x01(\x0b\x32<.Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions\x12R\n\x08\x65xternal\x18\x04 \x01(\x0b\x32@.Criteo.Glup.HDFSOptions.ImportOptions.Generator.ExternalOptions\x12N\n\x06\x62\x61\x63kup\x18\x05 \x01(\x0b\x32>.Criteo.Glup.HDFSOptions.ImportOptions.Generator.BackupOptions\x12X\n\x0btranscoding\x18\x06 \x01(\x0b\x32\x43.Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions\x12N\n\x06kacoha\x18\x07 \x01(\x0b\x32>.Criteo.Glup.HDFSOptions.ImportOptions.Generator.KaCoHaOptions\x12R\n\x0b\x64\x65\x64uplicate\x18\x08 \x01(\x0b\x32=.Criteo.Glup.HDFSOptions.ImportOptions.Generator.DedupOptions\x12P\n\x07sampler\x18\t \x01(\x0b\x32?.Criteo.Glup.HDFSOptions.ImportOptions.Generator.SamplerOptions\x12V\n\ncomparator\x18\n \x01(\x0b\x32\x42.Criteo.Glup.HDFSOptions.ImportOptions.Generator.ComparatorOptions\x12\"\n\x02to\x18\xfa\x01 \x03(\x0b\x32\x15.Criteo.Glup.Location\x12\x12\n\tnamespace\x18\xfb\x01 \x01(\t\x12\x13\n\nstart_date\x18\xfd\x01 \x01(\t\x12\x12\n\tstop_date\x18\xfe\x01 \x01(\t\x12\x12\n\tignore_cn\x18\xff\x01 \x01(\x08\x1a\x9a\x01\n\x0c\x44\x65\x64upOptions\x12\x18\n\x10input_dataset_id\x18\x01 \x01(\t\x12\x1a\n\x12input_format_label\x18\x02 \x01(\t\x12\x19\n\x11output_dataset_id\x18\x03 \x01(\t\x12\x1b\n\x13output_format_label\x18\x04 \x01(\t\x12\x1c\n\x14use_hippo_cuttle_job\x18\x05 \x01(\x08\x1au\n\x11Kafka2HdfsOptions\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65\x64uplicate\x18\x03 \x01(\x08\x12\x19\n\x11output_dataset_id\x18\x04 \x01(\t\x12\x1b\n\x13output_format_label\x18\x05 \x01(\tJ\x04\x08\x02\x10\x03\x1aK\n\x0cKacohaConfig\x12\x1b\n\x13partitions_per_task\x18\x01 \x01(\x05\x12\x1e\n\x16poll_buffer_size_bytes\x18\x02 \x01(\x05\x1a\x87\x01\n\x11KacohaConfigPerDc\x12#\n\x02\x64\x63\x18\x01 \x01(\x0e\x32\x17.Criteo.Glup.DataCenter\x12M\n\x06\x63onfig\x18\x02 \x01(\x0b\x32=.Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfig\x1a\x95\x02\n\rKaCoHaOptions\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x19\n\x11output_dataset_id\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65\x64uplicate\x18\x03 \x01(\x08\x12M\n\x06\x63onfig\x18\x04 \x01(\x0b\x32=.Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfig\x12\x1b\n\x13output_format_label\x18\x05 \x01(\t\x12Y\n\rconfig_per_dc\x18\x06 \x03(\x0b\x32\x42.Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfigPerDc\x1a<\n\x11\x44\x61taloaderOptions\x12\'\n\x08platform\x18\x01 \x03(\x0e\x32\x15.Criteo.Glup.Platform\x1a\xf1\x01\n\x0bSyncOptions\x12#\n\x04\x66rom\x18\x01 \x01(\x0b\x32\x15.Criteo.Glup.Location\x12\x18\n\x10source_namespace\x18\x03 \x01(\t\x12(\n\tplatforms\x18\x06 \x03(\x0e\x32\x15.Criteo.Glup.Platform\x12\x16\n\x0eis_backfilling\x18\x08 \x01(\x08\x12\x10\n\x08to_label\x18\t \x01(\t\x12\x15\n\rto_dataset_id\x18\n \x01(\t\x12\x18\n\x10with_backfilling\x18\x0b \x01(\x08\x12\x1e\n\x16is_scheduled_on_source\x18\x0c \x01(\x08\x1ax\n\rBackupOptions\x12#\n\x04\x66rom\x18\x01 \x01(\x0b\x32\x15.Criteo.Glup.Location\x12\x18\n\x10source_namespace\x18\x02 \x01(\t\x12(\n\tplatforms\x18\x03 \x03(\x0e\x32\x15.Criteo.Glup.Platform\x1a\x83\x02\n\x12TranscodingOptions\x12\x18\n\x10input_dataset_id\x18\x01 \x01(\t\x12\x19\n\x11output_dataset_id\x18\x02 \x01(\t\x12\x31\n\x0cinput_format\x18\x03 \x01(\x0e\x32\x1b.Criteo.Glup.HDFSDataFormat\x12\x32\n\routput_format\x18\x04 \x01(\x0e\x32\x1b.Criteo.Glup.HDFSDataFormat\x12\x1b\n\x13input_dataset_label\x18\x05 \x01(\t\x12\x1c\n\x14output_dataset_label\x18\x06 \x01(\t\x12\x16\n\x0eis_by_platform\x18\x07 \x01(\x08\x1a\x95\x01\n\x0eSamplerOptions\x12\x18\n\x10input_dataset_id\x18\x01 \x01(\t\x12\x1a\n\x12input_format_label\x18\x02 \x01(\t\x12\x19\n\x11output_dataset_id\x18\x03 \x01(\t\x12\x1b\n\x13output_format_label\x18\x04 \x01(\t\x12\x15\n\rsampling_rate\x18\x05 \x01(\x02\x1a\xa7\x01\n\x11\x43omparatorOptions\x12\x17\n\x0fleft_dataset_id\x18\x01 \x01(\t\x12\x19\n\x11left_format_label\x18\x02 \x01(\t\x12\x18\n\x10right_dataset_id\x18\x03 \x01(\t\x12\x1a\n\x12right_format_label\x18\x04 \x01(\t\x12\x10\n\x08hostname\x18\x05 \x01(\t\x12\x16\n\x0eignored_fields\x18\x06 \x01(\t\x1a\x11\n\x0f\x45xternalOptions\"9\n\x18ProducerTransportOptions\x12\x0e\n\x06syslog\x18\x01 \x01(\x08\x12\r\n\x05kafka\x18\x02 \x01(\x08\"8\n\x0fPropertyOptions\x12\x10\n\x08valuable\x18\x01 \x01(\x08\x12\x13\n\x0bhigh_volume\x18\x02 \x01(\x08\"\xcb\x02\n\x0bGlupOptions\x12/\n\x05kafka\x18\x01 \x01(\x0b\x32 .Criteo.Glup.KafkaMessageOptions\x12&\n\x04hdfs\x18\x02 \x01(\x0b\x32\x18.Criteo.Glup.HDFSOptions\x12\x14\n\x0csampling_pct\x18\x03 \x01(\r\x12\x1c\n\x14preprod_sampling_pct\x18\x04 \x01(\r\x12%\n\x07\x64\x61taset\x18\x05 \x03(\x0b\x32\x14.Criteo.Glup.DataSet\x12\x1c\n\x14message_sampling_pct\x18\x06 \x01(\r\x12\x38\n\tproducers\x18\x07 \x01(\x0b\x32%.Criteo.Glup.ProducerTransportOptions\x12\x30\n\nproperties\x18\x08 \x01(\x0b\x32\x1c.Criteo.Glup.PropertyOptions\"\xb1\x01\n\x10GlupFieldOptions\x12\x0f\n\x07sampled\x18\x01 \x01(\x08\x12\x14\n\x0csampling_key\x18\x02 \x01(\x08\x12\x30\n\x11\x64isabled_platform\x18\x03 \x03(\x0e\x32\x15.Criteo.Glup.Platform\x12\x18\n\x10should_clean_pii\x18\x04 \x01(\x08\x12\x18\n\x10pending_deletion\x18\x05 \x01(\x08\x12\x10\n\x08\x61\x64\x64\x65\x64_at\x18\x06 \x01(\t\")\n\x0bJsonMapping\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04skip\x18\x02 \x01(\x08\"4\n\tJsonAlias\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11use_enum_field_id\x18\x03 \x01(\x08\"\xb5\x02\n\x0f\x42\x61seGlupMessage\x12(\n\x0bglup_origin\x18\x01 \x01(\x0b\x32\x13.Criteo.Glup.Origin\x12)\n\tpartition\x18\x02 \x01(\x0b\x32\x16.Criteo.Glup.Partition\x12\x41\n\nset_fields\x18\xda\x86\x03 \x03(\x0b\x32+.Criteo.Glup.BaseGlupMessage.SetFieldsEntry\x12R\n\x0f\x63ontrol_message\x18\xff\xff\x7f \x03(\x0b\x32%.Criteo.Glup.ControlMessage.WatermarkB\x10\x92\xb5\x18\x0c\n\n__metadata\x1a\x30\n\x0eSetFieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01:\x04\x88\xb5\x18\x01\"\xf2\x01\n\x19\x46orwardedWatermarkMessage\x12\x1d\n\x15original_kafka_offset\x18\x05 \x01(\x03\x12\x11\n\ttimestamp\x18\x06 \x01(\x03\x12\x1d\n\x15\x63onsolidation_enabled\x18\x07 \x01(\x08\x12\x12\n\ndataset_id\x18\n \x01(\t\x12\x1c\n\x14\x64\x61taset_format_label\x18\x0b \x01(\t\x12R\n\x0f\x63ontrol_message\x18\xff\xff\x7f \x03(\x0b\x32%.Criteo.Glup.ControlMessage.WatermarkB\x10\x92\xb5\x18\x0c\n\n__metadata\"y\n\x08Location\x12%\n\x03\x65nv\x18\x01 \x01(\x0e\x32\x18.Criteo.Glup.Environment\x12#\n\x02\x64\x63\x18\x02 \x01(\x0e\x32\x17.Criteo.Glup.DataCenter\x12\r\n\x05label\x18\x03 \x01(\t\x12\x12\n\ndataset_id\x18\x04 \x01(\t\"\xa2\x01\n\x06Origin\x12+\n\ndatacenter\x18\x01 \x01(\x0e\x32\x17.Criteo.Glup.DataCenter\x12\x1a\n\x03ip4\x18\x02 \x01(\x07\x42\r\x8a\xb5\x18\t\n\x07host_ip\x12\x10\n\x08hostname\x18\x03 \x01(\t\x12\x1e\n\x0e\x63ontainer_task\x18\x04 \x01(\tB\x06\x8a\xb5\x18\x02\x10\x01\x12\x1d\n\rcontainer_app\x18\x05 \x01(\tB\x06\x8a\xb5\x18\x02\x10\x01\"\x89\x05\n\x0e\x43ontrolMessage\x12\x38\n\twatermark\x18\x01 \x01(\x0b\x32%.Criteo.Glup.ControlMessage.Watermark\x1a\x89\x01\n\x0fWatermarkOrigin\x12\x13\n\x0bkafka_topic\x18\x01 \x01(\t\x12+\n\ndatacenter\x18\x02 \x01(\x0e\x32\x17.Criteo.Glup.DataCenter\x12\x34\n\x07\x63luster\x18\x03 \x01(\x0e\x32#.Criteo.Glup.ControlMessage.Cluster\x1a\xe8\x02\n\tWatermark\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x10\n\x08hostname\x18\x02 \x01(\t\x12\x13\n\x0bkafka_topic\x18\x03 \x01(\t\x12\x11\n\tpartition\x18\x04 \x01(\x05\x12\x17\n\x0fpartition_count\x18\x05 \x01(\x05\x12\x14\n\x0cprocess_uuid\x18\x06 \x01(\x0c\x12\x0e\n\x06region\x18\x07 \x01(\t\x12*\n\x11timestamp_seconds\x18\x08 \x01(\x05\x42\x0f\x92\xb5\x18\x0b\n\ttimestamp\x12\x0f\n\x07\x63luster\x18\t \x01(\t\x12\x13\n\x0b\x65nvironment\x18\n \x01(\t\x12J\n\nset_fields\x18\xda\x86\x03 \x03(\x0b\x32\x34.Criteo.Glup.ControlMessage.Watermark.SetFieldsEntry\x1a\x30\n\x0eSetFieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01:\x04\x88\xb5\x18\x01\"F\n\x07\x43luster\x12\x17\n\x13UNSUPPORTED_CLUSTER\x10\x00\x12\t\n\x05LOCAL\x10\x02\x12\x0b\n\x07\x43\x45NTRAL\x10\x03\x12\n\n\x06STREAM\x10\x04\"\x99\x01\n\tPartition\x12*\n\x11timestamp_seconds\x18\x01 \x01(\x04\x42\x0f\x8a\xb5\x18\x0b\n\ttimestamp\x12,\n\rhost_platform\x18\x02 \x01(\x0e\x32\x15.Criteo.Glup.Platform\x12\x32\n\nevent_type\x18\x03 \x01(\x0e\x32\x16.Criteo.Glup.EventTypeB\x06\x8a\xb5\x18\x02\x10\x01\"\x93\x01\n\rHDFSPartition\x12\x19\n\x11timestamp_seconds\x18\x01 \x01(\x04\x12,\n\rhost_platform\x18\x02 \x01(\x0e\x32\x15.Criteo.Glup.Platform\x12*\n\nevent_type\x18\x03 \x01(\x0e\x32\x16.Criteo.Glup.EventType\x12\r\n\x05\x64\x65pth\x18\x04 \x01(\x05\"\xa5\x01\n\x07Hash128\x12\x15\n\rmost_sig_bits\x18\x01 \x01(\x06\x12\x16\n\x0eleast_sig_bits\x18\x02 \x01(\x06\x12\x39\n\nset_fields\x18\xda\x86\x03 \x03(\x0b\x32#.Criteo.Glup.Hash128.SetFieldsEntry\x1a\x30\n\x0eSetFieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01*~\n\x0fPartitionScheme\x12 \n\x1cUNSUPPORTED_PARTITION_SCHEME\x10\x00\x12\t\n\x05\x44\x41ILY\x10\x02\x12\n\n\x06HOURLY\x10\x03\x12\x13\n\x0fPLATFORM_HOURLY\x10\x04\x12\x1d\n\x19\x45VENTTYPE_PLATFORM_HOURLY\x10\x05*?\n\rMessageFormat\x12\x16\n\x12UNSUPPORTED_FORMAT\x10\x00\x12\x08\n\x04JSON\x10\x01\x12\x0c\n\x08PROTOBUF\x10\x02*d\n\x0eHDFSDataFormat\x12\x1b\n\x17UNSUPPORTED_DATA_FORMAT\x10\x00\x12\r\n\tJSON_PAIL\x10\x02\x12\x10\n\x0cPROTOBUF_SEQ\x10\x03\x12\x14\n\x10PROTOBUF_PARQUET\x10\x04*3\n\x0b\x44\x61taSetKind\x12\x14\n\x10UNSUPPORTED_KIND\x10\x00\x12\x0e\n\nTIMESERIES\x10\x01*\x9a\x01\n\x0fMonitoringLevel\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x15\n\x11REMOVE_MONITORING\x10\x01\x12\x1a\n\x16INFORMATIVE_MONITORING\x10\x02\x12\x15\n\x11\x43ONSENSUS_IGNORED\x10\x03\x12\x30\n,CONSENSUS_IGNORED_AND_INFORMATIVE_MONITORING\x10\x04*\x8b\x01\n\nDataCenter\x12\x1a\n\x16UNSUPPORTED_DATACENTER\x10\x00\x12\x07\n\x03\x41M5\x10\x02\x12\x07\n\x03HK5\x10\x03\x12\x07\n\x03NY8\x10\x04\x12\x07\n\x03PAR\x10\x05\x12\x07\n\x03PA4\x10\x06\x12\x07\n\x03SH5\x10\x07\x12\x07\n\x03SV6\x10\x08\x12\x07\n\x03TY5\x10\t\x12\x07\n\x03VA1\x10\n\x12\x07\n\x03\x41M6\x10\x0b\x12\x07\n\x03\x44\x41\x31\x10\x0c*A\n\x0b\x45nvironment\x12\x1b\n\x17UNSUPPORTED_ENVIRONMENT\x10\x00\x12\x0b\n\x07PREPROD\x10\x01\x12\x08\n\x04PROD\x10\x02*D\n\x08Platform\x12\x18\n\x14UNSUPPORTED_PLATFORM\x10\x00\x12\x06\n\x02\x45U\x10\x02\x12\x06\n\x02US\x10\x03\x12\x06\n\x02\x41S\x10\x04\x12\x06\n\x02\x43N\x10\x05*[\n\tEventType\x12\x1a\n\x16UNSUPPORTED_EVENT_TYPE\x10\x00\x12\x10\n\x0cItemPageView\x10\x02\x12\t\n\x05Sales\x10\x03\x12\n\n\x06\x42\x61sket\x10\x04\x12\t\n\x05Other\x10\x05*%\n\x05YesNo\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02NO\x10\x01\x12\x07\n\x03YES\x10\x02:I\n\x04glup\x12\x1f.google.protobuf.MessageOptions\x18\xd0\x86\x03 \x01(\x0b\x32\x18.Criteo.Glup.GlupOptions:C\n\x18\x63ontains_nullable_fields\x12\x1f.google.protobuf.MessageOptions\x18\xd1\x86\x03 \x01(\x08:Q\n\tglupfield\x12\x1d.google.protobuf.FieldOptions\x18\xd0\x86\x03 \x01(\x0b\x32\x1d.Criteo.Glup.GlupFieldOptions:O\n\x0cjson_mapping\x12\x1d.google.protobuf.FieldOptions\x18\xd1\x86\x03 \x01(\x0b\x32\x18.Criteo.Glup.JsonMapping:E\n\x04json\x12\x1d.google.protobuf.FieldOptions\x18\xd2\x86\x03 \x01(\x0b\x32\x16.Criteo.Glup.JsonAliasB\x11\n\x0f\x63om.criteo.glupb\x06proto3')
+
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tests.integration.schema_registry.data.proto.metadata_proto_pb2', globals())
+if _descriptor._USE_C_DESCRIPTORS == False:
+  google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(glup)
+  google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(contains_nullable_fields)
+  google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(glupfield)
+  google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(json_mapping)
+  google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(json)
+
+  DESCRIPTOR._options = None
+  DESCRIPTOR._serialized_options = b'\n\017com.criteo.glup'
+  _BASEGLUPMESSAGE_SETFIELDSENTRY._options = None
+  _BASEGLUPMESSAGE_SETFIELDSENTRY._serialized_options = b'8\001'
+  _BASEGLUPMESSAGE.fields_by_name['control_message']._options = None
+  _BASEGLUPMESSAGE.fields_by_name['control_message']._serialized_options = b'\222\265\030\014\n\n__metadata'
+  _BASEGLUPMESSAGE._options = None
+  _BASEGLUPMESSAGE._serialized_options = b'\210\265\030\001'
+  _FORWARDEDWATERMARKMESSAGE.fields_by_name['control_message']._options = None
+  _FORWARDEDWATERMARKMESSAGE.fields_by_name['control_message']._serialized_options = b'\222\265\030\014\n\n__metadata'
+  _ORIGIN.fields_by_name['ip4']._options = None
+  _ORIGIN.fields_by_name['ip4']._serialized_options = b'\212\265\030\t\n\007host_ip'
+  _ORIGIN.fields_by_name['container_task']._options = None
+  _ORIGIN.fields_by_name['container_task']._serialized_options = b'\212\265\030\002\020\001'
+  _ORIGIN.fields_by_name['container_app']._options = None
+  _ORIGIN.fields_by_name['container_app']._serialized_options = b'\212\265\030\002\020\001'
+  _CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY._options = None
+  _CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY._serialized_options = b'8\001'
+  _CONTROLMESSAGE_WATERMARK.fields_by_name['timestamp_seconds']._options = None
+  _CONTROLMESSAGE_WATERMARK.fields_by_name['timestamp_seconds']._serialized_options = b'\222\265\030\013\n\ttimestamp'
+  _CONTROLMESSAGE_WATERMARK._options = None
+  _CONTROLMESSAGE_WATERMARK._serialized_options = b'\210\265\030\001'
+  _PARTITION.fields_by_name['timestamp_seconds']._options = None
+  _PARTITION.fields_by_name['timestamp_seconds']._serialized_options = b'\212\265\030\013\n\ttimestamp'
+  _PARTITION.fields_by_name['event_type']._options = None
+  _PARTITION.fields_by_name['event_type']._serialized_options = b'\212\265\030\002\020\001'
+  _HASH128_SETFIELDSENTRY._options = None
+  _HASH128_SETFIELDSENTRY._serialized_options = b'8\001'
+  _PARTITIONSCHEME._serialized_start=6876
+  _PARTITIONSCHEME._serialized_end=7002
+  _MESSAGEFORMAT._serialized_start=7004
+  _MESSAGEFORMAT._serialized_end=7067
+  _HDFSDATAFORMAT._serialized_start=7069
+  _HDFSDATAFORMAT._serialized_end=7169
+  _DATASETKIND._serialized_start=7171
+  _DATASETKIND._serialized_end=7222
+  _MONITORINGLEVEL._serialized_start=7225
+  _MONITORINGLEVEL._serialized_end=7379
+  _DATACENTER._serialized_start=7382
+  _DATACENTER._serialized_end=7521
+  _ENVIRONMENT._serialized_start=7523
+  _ENVIRONMENT._serialized_end=7588
+  _PLATFORM._serialized_start=7590
+  _PLATFORM._serialized_end=7658
+  _EVENTTYPE._serialized_start=7660
+  _EVENTTYPE._serialized_end=7751
+  _YESNO._serialized_start=7753
+  _YESNO._serialized_end=7790
+  _KAFKAMESSAGEOPTIONS._serialized_start=116
+  _KAFKAMESSAGEOPTIONS._serialized_end=152
+  _DATASET._serialized_start=155
+  _DATASET._serialized_end=411
+  _DATASETCHUNK._serialized_start=413
+  _DATASETCHUNK._serialized_end=533
+  _DATASETFORMAT._serialized_start=536
+  _DATASETFORMAT._serialized_end=894
+  _HDFSOPTIONS._serialized_start=897
+  _HDFSOPTIONS._serialized_end=4175
+  _HDFSOPTIONS_IMPORTOPTIONS._serialized_start=969
+  _HDFSOPTIONS_IMPORTOPTIONS._serialized_end=4175
+  _HDFSOPTIONS_IMPORTOPTIONS_VIEW._serialized_start=1258
+  _HDFSOPTIONS_IMPORTOPTIONS_VIEW._serialized_end=1402
+  _HDFSOPTIONS_IMPORTOPTIONS_VIEW_HIVEOPTIONS._serialized_start=1337
+  _HDFSOPTIONS_IMPORTOPTIONS_VIEW_HIVEOPTIONS._serialized_end=1402
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR._serialized_start=1405
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR._serialized_end=4175
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DEDUPOPTIONS._serialized_start=2376
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DEDUPOPTIONS._serialized_end=2530
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KAFKA2HDFSOPTIONS._serialized_start=2532
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KAFKA2HDFSOPTIONS._serialized_end=2649
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIG._serialized_start=2651
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIG._serialized_end=2726
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIGPERDC._serialized_start=2729
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIGPERDC._serialized_end=2864
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHAOPTIONS._serialized_start=2867
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHAOPTIONS._serialized_end=3144
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DATALOADEROPTIONS._serialized_start=3146
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DATALOADEROPTIONS._serialized_end=3206
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SYNCOPTIONS._serialized_start=3209
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SYNCOPTIONS._serialized_end=3450
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_BACKUPOPTIONS._serialized_start=3452
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_BACKUPOPTIONS._serialized_end=3572
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_TRANSCODINGOPTIONS._serialized_start=3575
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_TRANSCODINGOPTIONS._serialized_end=3834
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SAMPLEROPTIONS._serialized_start=3837
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SAMPLEROPTIONS._serialized_end=3986
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_COMPARATOROPTIONS._serialized_start=3989
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_COMPARATOROPTIONS._serialized_end=4156
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_EXTERNALOPTIONS._serialized_start=4158
+  _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_EXTERNALOPTIONS._serialized_end=4175
+  _PRODUCERTRANSPORTOPTIONS._serialized_start=4177
+  _PRODUCERTRANSPORTOPTIONS._serialized_end=4234
+  _PROPERTYOPTIONS._serialized_start=4236
+  _PROPERTYOPTIONS._serialized_end=4292
+  _GLUPOPTIONS._serialized_start=4295
+  _GLUPOPTIONS._serialized_end=4626
+  _GLUPFIELDOPTIONS._serialized_start=4629
+  _GLUPFIELDOPTIONS._serialized_end=4806
+  _JSONMAPPING._serialized_start=4808
+  _JSONMAPPING._serialized_end=4849
+  _JSONALIAS._serialized_start=4851
+  _JSONALIAS._serialized_end=4903
+  _BASEGLUPMESSAGE._serialized_start=4906
+  _BASEGLUPMESSAGE._serialized_end=5215
+  _BASEGLUPMESSAGE_SETFIELDSENTRY._serialized_start=5161
+  _BASEGLUPMESSAGE_SETFIELDSENTRY._serialized_end=5209
+  _FORWARDEDWATERMARKMESSAGE._serialized_start=5218
+  _FORWARDEDWATERMARKMESSAGE._serialized_end=5460
+  _LOCATION._serialized_start=5462
+  _LOCATION._serialized_end=5583
+  _ORIGIN._serialized_start=5586
+  _ORIGIN._serialized_end=5748
+  _CONTROLMESSAGE._serialized_start=5751
+  _CONTROLMESSAGE._serialized_end=6400
+  _CONTROLMESSAGE_WATERMARKORIGIN._serialized_start=5828
+  _CONTROLMESSAGE_WATERMARKORIGIN._serialized_end=5965
+  _CONTROLMESSAGE_WATERMARK._serialized_start=5968
+  _CONTROLMESSAGE_WATERMARK._serialized_end=6328
+  _CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY._serialized_start=5161
+  _CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY._serialized_end=5209
+  _CONTROLMESSAGE_CLUSTER._serialized_start=6330
+  _CONTROLMESSAGE_CLUSTER._serialized_end=6400
+  _PARTITION._serialized_start=6403
+  _PARTITION._serialized_end=6556
+  _HDFSPARTITION._serialized_start=6559
+  _HDFSPARTITION._serialized_end=6706
+  _HASH128._serialized_start=6709
+  _HASH128._serialized_end=6874
+  _HASH128_SETFIELDSENTRY._serialized_start=5161
+  _HASH128_SETFIELDSENTRY._serialized_end=5209
+# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/data/union_schema.avsc b/tests/integration/schema_registry/data/union_schema.avsc
new file mode 100644
index 0000000..2f78a4a
--- /dev/null
+++ b/tests/integration/schema_registry/data/union_schema.avsc
@@ -0,0 +1,22 @@
+[
+    {
+        "name": "RecordOne",
+        "type": "record",
+        "fields": [
+            {
+                "name": "field_one",
+                "type": "string"
+            }
+        ]
+    },
+    {
+        "name": "RecordTwo",
+        "type": "record",
+        "fields": [
+            {
+                "name": "field_two",
+                "type": "int"
+            }
+        ]
+    }
+]
diff --git a/tests/integration/schema_registry/gen/DependencyTestProto_pb2.py b/tests/integration/schema_registry/gen/DependencyTestProto_pb2.py
deleted file mode 100644
index f592407..0000000
--- a/tests/integration/schema_registry/gen/DependencyTestProto_pb2.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: DependencyTestProto.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from . import NestedTestProto_pb2 as NestedTestProto__pb2
-from . import PublicTestProto_pb2 as PublicTestProto__pb2
-try:
-  TestProto__pb2 = PublicTestProto__pb2.TestProto__pb2
-except AttributeError:
-  TestProto__pb2 = PublicTestProto__pb2.TestProto_pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='DependencyTestProto.proto',
-  package='tests.integration.serialization.data',
-  syntax='proto3',
-  serialized_pb=_b('\n\x19\x44\x65pendencyTestProto.proto\x12$tests.integration.serialization.data\x1a\x15NestedTestProto.proto\x1a\x15PublicTestProto.proto\"\x98\x01\n\x11\x44\x65pendencyMessage\x12K\n\x0enested_message\x18\x01 \x01(\x0b\x32\x33.tests.integration.serialization.data.NestedMessage\x12\x11\n\tis_active\x18\x02 \x01(\x08\x12#\n\rtest_messsage\x18\x03 \x01(\x0b\x32\x0c.TestMessageB.\n,io.confluent.kafka.serializers.protobuf.testb\x06proto3')
-  ,
-  dependencies=[NestedTestProto__pb2.DESCRIPTOR,PublicTestProto__pb2.DESCRIPTOR,])
-
-
-
-
-_DEPENDENCYMESSAGE = _descriptor.Descriptor(
-  name='DependencyMessage',
-  full_name='tests.integration.serialization.data.DependencyMessage',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='nested_message', full_name='tests.integration.serialization.data.DependencyMessage.nested_message', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='is_active', full_name='tests.integration.serialization.data.DependencyMessage.is_active', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_messsage', full_name='tests.integration.serialization.data.DependencyMessage.test_messsage', index=2,
-      number=3, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=114,
-  serialized_end=266,
-)
-
-_DEPENDENCYMESSAGE.fields_by_name['nested_message'].message_type = NestedTestProto__pb2._NESTEDMESSAGE
-_DEPENDENCYMESSAGE.fields_by_name['test_messsage'].message_type = TestProto__pb2._TESTMESSAGE
-DESCRIPTOR.message_types_by_name['DependencyMessage'] = _DEPENDENCYMESSAGE
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-DependencyMessage = _reflection.GeneratedProtocolMessageType('DependencyMessage', (_message.Message,), dict(
-  DESCRIPTOR = _DEPENDENCYMESSAGE,
-  __module__ = 'DependencyTestProto_pb2'
-  # @@protoc_insertion_point(class_scope:tests.integration.serialization.data.DependencyMessage)
-  ))
-_sym_db.RegisterMessage(DependencyMessage)
-
-
-DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n,io.confluent.kafka.serializers.protobuf.test'))
-# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/gen/NestedTestProto_pb2.py b/tests/integration/schema_registry/gen/NestedTestProto_pb2.py
deleted file mode 100644
index f58c7ff..0000000
--- a/tests/integration/schema_registry/gen/NestedTestProto_pb2.py
+++ /dev/null
@@ -1,436 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: NestedTestProto.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf.internal import enum_type_wrapper
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='NestedTestProto.proto',
-  package='tests.integration.serialization.data',
-  syntax='proto3',
-  serialized_pb=_b('\n\x15NestedTestProto.proto\x12$tests.integration.serialization.data\x1a\x1fgoogle/protobuf/timestamp.proto\"\x8c\x01\n\x06UserId\x12\x17\n\rkafka_user_id\x18\x01 \x01(\tH\x00\x12\x17\n\rother_user_id\x18\x02 \x01(\x05H\x00\x12\x45\n\nanother_id\x18\x03 \x01(\x0b\x32/.tests.integration.serialization.data.MessageIdH\x00\x42\t\n\x07user_id\"\x17\n\tMessageId\x12\n\n\x02id\x18\x01 \x01(\t\"R\n\x0b\x43omplexType\x12\x10\n\x06one_id\x18\x01 \x01(\tH\x00\x12\x12\n\x08other_id\x18\x02 \x01(\x05H\x00\x12\x11\n\tis_active\x18\x03 \x01(\x08\x42\n\n\x08some_val\"\xd0\x04\n\rNestedMessage\x12=\n\x07user_id\x18\x01 \x01(\x0b\x32,.tests.integration.serialization.data.UserId\x12\x11\n\tis_active\x18\x02 \x01(\x08\x12\x1a\n\x12\x65xperiments_active\x18\x03 \x03(\t\x12<\n\x06status\x18\x05 \x01(\x0e\x32,.tests.integration.serialization.data.Status\x12G\n\x0c\x63omplex_type\x18\x06 \x01(\x0b\x32\x31.tests.integration.serialization.data.ComplexType\x12R\n\x08map_type\x18\x07 \x03(\x0b\x32@.tests.integration.serialization.data.NestedMessage.MapTypeEntry\x12O\n\x05inner\x18\x08 \x01(\x0b\x32@.tests.integration.serialization.data.NestedMessage.InnerMessage\x1a.\n\x0cMapTypeEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a/\n\x0cInnerMessage\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x0f\n\x03ids\x18\x02 \x03(\x05\x42\x02\x10\x01\"(\n\tInnerEnum\x12\x08\n\x04ZERO\x10\x00\x12\r\n\tALSO_ZERO\x10\x00\x1a\x02\x10\x01J\x04\x08\x0e\x10\x0fJ\x04\x08\x0f\x10\x10J\x04\x08\t\x10\x0cR\x03\x66ooR\x03\x62\x61r*\"\n\x06Status\x12\n\n\x06\x41\x43TIVE\x10\x00\x12\x0c\n\x08INACTIVE\x10\x01\x42\x41\n,io.confluent.kafka.serializers.protobuf.testB\x0fNestedTestProtoP\x00\x62\x06proto3')
-  ,
-  dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
-
-_STATUS = _descriptor.EnumDescriptor(
-  name='Status',
-  full_name='tests.integration.serialization.data.Status',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='ACTIVE', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='INACTIVE', index=1, number=1,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=943,
-  serialized_end=977,
-)
-_sym_db.RegisterEnumDescriptor(_STATUS)
-
-Status = enum_type_wrapper.EnumTypeWrapper(_STATUS)
-ACTIVE = 0
-INACTIVE = 1
-
-
-_NESTEDMESSAGE_INNERENUM = _descriptor.EnumDescriptor(
-  name='InnerEnum',
-  full_name='tests.integration.serialization.data.NestedMessage.InnerEnum',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='ZERO', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='ALSO_ZERO', index=1, number=0,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=_descriptor._ParseOptions(descriptor_pb2.EnumOptions(), _b('\020\001')),
-  serialized_start=873,
-  serialized_end=913,
-)
-_sym_db.RegisterEnumDescriptor(_NESTEDMESSAGE_INNERENUM)
-
-
-_USERID = _descriptor.Descriptor(
-  name='UserId',
-  full_name='tests.integration.serialization.data.UserId',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='kafka_user_id', full_name='tests.integration.serialization.data.UserId.kafka_user_id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='other_user_id', full_name='tests.integration.serialization.data.UserId.other_user_id', index=1,
-      number=2, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='another_id', full_name='tests.integration.serialization.data.UserId.another_id', index=2,
-      number=3, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-    _descriptor.OneofDescriptor(
-      name='user_id', full_name='tests.integration.serialization.data.UserId.user_id',
-      index=0, containing_type=None, fields=[]),
-  ],
-  serialized_start=97,
-  serialized_end=237,
-)
-
-
-_MESSAGEID = _descriptor.Descriptor(
-  name='MessageId',
-  full_name='tests.integration.serialization.data.MessageId',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='id', full_name='tests.integration.serialization.data.MessageId.id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=239,
-  serialized_end=262,
-)
-
-
-_COMPLEXTYPE = _descriptor.Descriptor(
-  name='ComplexType',
-  full_name='tests.integration.serialization.data.ComplexType',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='one_id', full_name='tests.integration.serialization.data.ComplexType.one_id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='other_id', full_name='tests.integration.serialization.data.ComplexType.other_id', index=1,
-      number=2, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='is_active', full_name='tests.integration.serialization.data.ComplexType.is_active', index=2,
-      number=3, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-    _descriptor.OneofDescriptor(
-      name='some_val', full_name='tests.integration.serialization.data.ComplexType.some_val',
-      index=0, containing_type=None, fields=[]),
-  ],
-  serialized_start=264,
-  serialized_end=346,
-)
-
-
-_NESTEDMESSAGE_MAPTYPEENTRY = _descriptor.Descriptor(
-  name='MapTypeEntry',
-  full_name='tests.integration.serialization.data.NestedMessage.MapTypeEntry',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='key', full_name='tests.integration.serialization.data.NestedMessage.MapTypeEntry.key', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='value', full_name='tests.integration.serialization.data.NestedMessage.MapTypeEntry.value', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=776,
-  serialized_end=822,
-)
-
-_NESTEDMESSAGE_INNERMESSAGE = _descriptor.Descriptor(
-  name='InnerMessage',
-  full_name='tests.integration.serialization.data.NestedMessage.InnerMessage',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='id', full_name='tests.integration.serialization.data.NestedMessage.InnerMessage.id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, json_name='id', file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='ids', full_name='tests.integration.serialization.data.NestedMessage.InnerMessage.ids', index=1,
-      number=2, type=5, cpp_type=1, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')), file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=824,
-  serialized_end=871,
-)
-
-_NESTEDMESSAGE = _descriptor.Descriptor(
-  name='NestedMessage',
-  full_name='tests.integration.serialization.data.NestedMessage',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='user_id', full_name='tests.integration.serialization.data.NestedMessage.user_id', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='is_active', full_name='tests.integration.serialization.data.NestedMessage.is_active', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='experiments_active', full_name='tests.integration.serialization.data.NestedMessage.experiments_active', index=2,
-      number=3, type=9, cpp_type=9, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='status', full_name='tests.integration.serialization.data.NestedMessage.status', index=3,
-      number=5, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='complex_type', full_name='tests.integration.serialization.data.NestedMessage.complex_type', index=4,
-      number=6, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='map_type', full_name='tests.integration.serialization.data.NestedMessage.map_type', index=5,
-      number=7, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='inner', full_name='tests.integration.serialization.data.NestedMessage.inner', index=6,
-      number=8, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_NESTEDMESSAGE_MAPTYPEENTRY, _NESTEDMESSAGE_INNERMESSAGE, ],
-  enum_types=[
-    _NESTEDMESSAGE_INNERENUM,
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=349,
-  serialized_end=941,
-)
-
-_USERID.fields_by_name['another_id'].message_type = _MESSAGEID
-_USERID.oneofs_by_name['user_id'].fields.append(
-  _USERID.fields_by_name['kafka_user_id'])
-_USERID.fields_by_name['kafka_user_id'].containing_oneof = _USERID.oneofs_by_name['user_id']
-_USERID.oneofs_by_name['user_id'].fields.append(
-  _USERID.fields_by_name['other_user_id'])
-_USERID.fields_by_name['other_user_id'].containing_oneof = _USERID.oneofs_by_name['user_id']
-_USERID.oneofs_by_name['user_id'].fields.append(
-  _USERID.fields_by_name['another_id'])
-_USERID.fields_by_name['another_id'].containing_oneof = _USERID.oneofs_by_name['user_id']
-_COMPLEXTYPE.oneofs_by_name['some_val'].fields.append(
-  _COMPLEXTYPE.fields_by_name['one_id'])
-_COMPLEXTYPE.fields_by_name['one_id'].containing_oneof = _COMPLEXTYPE.oneofs_by_name['some_val']
-_COMPLEXTYPE.oneofs_by_name['some_val'].fields.append(
-  _COMPLEXTYPE.fields_by_name['other_id'])
-_COMPLEXTYPE.fields_by_name['other_id'].containing_oneof = _COMPLEXTYPE.oneofs_by_name['some_val']
-_NESTEDMESSAGE_MAPTYPEENTRY.containing_type = _NESTEDMESSAGE
-_NESTEDMESSAGE_INNERMESSAGE.containing_type = _NESTEDMESSAGE
-_NESTEDMESSAGE.fields_by_name['user_id'].message_type = _USERID
-_NESTEDMESSAGE.fields_by_name['status'].enum_type = _STATUS
-_NESTEDMESSAGE.fields_by_name['complex_type'].message_type = _COMPLEXTYPE
-_NESTEDMESSAGE.fields_by_name['map_type'].message_type = _NESTEDMESSAGE_MAPTYPEENTRY
-_NESTEDMESSAGE.fields_by_name['inner'].message_type = _NESTEDMESSAGE_INNERMESSAGE
-_NESTEDMESSAGE_INNERENUM.containing_type = _NESTEDMESSAGE
-DESCRIPTOR.message_types_by_name['UserId'] = _USERID
-DESCRIPTOR.message_types_by_name['MessageId'] = _MESSAGEID
-DESCRIPTOR.message_types_by_name['ComplexType'] = _COMPLEXTYPE
-DESCRIPTOR.message_types_by_name['NestedMessage'] = _NESTEDMESSAGE
-DESCRIPTOR.enum_types_by_name['Status'] = _STATUS
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-UserId = _reflection.GeneratedProtocolMessageType('UserId', (_message.Message,), dict(
-  DESCRIPTOR = _USERID,
-  __module__ = 'NestedTestProto_pb2'
-  # @@protoc_insertion_point(class_scope:tests.integration.serialization.data.UserId)
-  ))
-_sym_db.RegisterMessage(UserId)
-
-MessageId = _reflection.GeneratedProtocolMessageType('MessageId', (_message.Message,), dict(
-  DESCRIPTOR = _MESSAGEID,
-  __module__ = 'NestedTestProto_pb2'
-  # @@protoc_insertion_point(class_scope:tests.integration.serialization.data.MessageId)
-  ))
-_sym_db.RegisterMessage(MessageId)
-
-ComplexType = _reflection.GeneratedProtocolMessageType('ComplexType', (_message.Message,), dict(
-  DESCRIPTOR = _COMPLEXTYPE,
-  __module__ = 'NestedTestProto_pb2'
-  # @@protoc_insertion_point(class_scope:tests.integration.serialization.data.ComplexType)
-  ))
-_sym_db.RegisterMessage(ComplexType)
-
-NestedMessage = _reflection.GeneratedProtocolMessageType('NestedMessage', (_message.Message,), dict( 
-
-  MapTypeEntry = _reflection.GeneratedProtocolMessageType('MapTypeEntry', (_message.Message,), dict(
-    DESCRIPTOR = _NESTEDMESSAGE_MAPTYPEENTRY,
-    __module__ = 'NestedTestProto_pb2'
-    # @@protoc_insertion_point(class_scope:tests.integration.serialization.data.NestedMessage.MapTypeEntry)
-    ))
-  ,
-
-  InnerMessage = _reflection.GeneratedProtocolMessageType('InnerMessage', (_message.Message,), dict(
-    DESCRIPTOR = _NESTEDMESSAGE_INNERMESSAGE,
-    __module__ = 'NestedTestProto_pb2'
-    # @@protoc_insertion_point(class_scope:tests.integration.serialization.data.NestedMessage.InnerMessage)
-    ))
-  ,
-  DESCRIPTOR = _NESTEDMESSAGE,
-  __module__ = 'NestedTestProto_pb2'
-  # @@protoc_insertion_point(class_scope:tests.integration.serialization.data.NestedMessage)
-  ))
-_sym_db.RegisterMessage(NestedMessage)
-_sym_db.RegisterMessage(NestedMessage.MapTypeEntry)
-_sym_db.RegisterMessage(NestedMessage.InnerMessage)
-
-
-DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n,io.confluent.kafka.serializers.protobuf.testB\017NestedTestProtoP\000'))
-_NESTEDMESSAGE_MAPTYPEENTRY.has_options = True
-_NESTEDMESSAGE_MAPTYPEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
-_NESTEDMESSAGE_INNERMESSAGE.fields_by_name['ids'].has_options = True
-_NESTEDMESSAGE_INNERMESSAGE.fields_by_name['ids']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
-_NESTEDMESSAGE_INNERENUM.has_options = True
-_NESTEDMESSAGE_INNERENUM._options = _descriptor._ParseOptions(descriptor_pb2.EnumOptions(), _b('\020\001'))
-# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/gen/PublicTestProto_pb2.py b/tests/integration/schema_registry/gen/PublicTestProto_pb2.py
deleted file mode 100644
index 4d9c213..0000000
--- a/tests/integration/schema_registry/gen/PublicTestProto_pb2.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: PublicTestProto.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from . import TestProto_pb2 as TestProto__pb2
-
-from .TestProto_pb2 import *
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='PublicTestProto.proto',
-  package='tests.integration.serialization.data',
-  syntax='proto3',
-  serialized_pb=_b('\n\x15PublicTestProto.proto\x12$tests.integration.serialization.data\x1a\x0fTestProto.protoP\x00\x62\x06proto3')
-  ,
-  dependencies=[TestProto__pb2.DESCRIPTOR,],
-  public_dependencies=[TestProto__pb2.DESCRIPTOR,])
-
-
-
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-
-# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/gen/Sint32Value_pb2.py b/tests/integration/schema_registry/gen/Sint32Value_pb2.py
deleted file mode 100644
index cb36b46..0000000
--- a/tests/integration/schema_registry/gen/Sint32Value_pb2.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: Sint32Value.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='Sint32Value.proto',
-  package='',
-  syntax='proto3',
-  serialized_pb=_b('\n\x11Sint32Value.proto\"\x1c\n\x0bSInt32Value\x12\r\n\x05value\x18\x01 \x01(\x11\x42.\n,io.confluent.kafka.serializers.protobuf.testb\x06proto3')
-)
-
-
-
-
-_SINT32VALUE = _descriptor.Descriptor(
-  name='SInt32Value',
-  full_name='SInt32Value',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='value', full_name='SInt32Value.value', index=0,
-      number=1, type=17, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=21,
-  serialized_end=49,
-)
-
-DESCRIPTOR.message_types_by_name['SInt32Value'] = _SINT32VALUE
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-SInt32Value = _reflection.GeneratedProtocolMessageType('SInt32Value', (_message.Message,), dict(
-  DESCRIPTOR = _SINT32VALUE,
-  __module__ = 'Sint32Value_pb2'
-  # @@protoc_insertion_point(class_scope:SInt32Value)
-  ))
-_sym_db.RegisterMessage(SInt32Value)
-
-
-DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n,io.confluent.kafka.serializers.protobuf.test'))
-# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/gen/Sint64Value_pb2.py b/tests/integration/schema_registry/gen/Sint64Value_pb2.py
deleted file mode 100644
index 446da76..0000000
--- a/tests/integration/schema_registry/gen/Sint64Value_pb2.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: Sint64Value.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='Sint64Value.proto',
-  package='',
-  syntax='proto3',
-  serialized_pb=_b('\n\x11Sint64Value.proto\"\x1c\n\x0bSInt64Value\x12\r\n\x05value\x18\x01 \x01(\x12\x42.\n,io.confluent.kafka.serializers.protobuf.testb\x06proto3')
-)
-
-
-
-
-_SINT64VALUE = _descriptor.Descriptor(
-  name='SInt64Value',
-  full_name='SInt64Value',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='value', full_name='SInt64Value.value', index=0,
-      number=1, type=18, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=21,
-  serialized_end=49,
-)
-
-DESCRIPTOR.message_types_by_name['SInt64Value'] = _SINT64VALUE
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-SInt64Value = _reflection.GeneratedProtocolMessageType('SInt64Value', (_message.Message,), dict(
-  DESCRIPTOR = _SINT64VALUE,
-  __module__ = 'Sint64Value_pb2'
-  # @@protoc_insertion_point(class_scope:SInt64Value)
-  ))
-_sym_db.RegisterMessage(SInt64Value)
-
-
-DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n,io.confluent.kafka.serializers.protobuf.test'))
-# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/gen/TestProto_pb2.py b/tests/integration/schema_registry/gen/TestProto_pb2.py
deleted file mode 100644
index 1b86974..0000000
--- a/tests/integration/schema_registry/gen/TestProto_pb2.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: TestProto.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='TestProto.proto',
-  package='',
-  syntax='proto3',
-  serialized_pb=_b('\n\x0fTestProto.proto\x1a google/protobuf/descriptor.proto\"\xc8\x02\n\x0bTestMessage\x12\x13\n\x0btest_string\x18\x01 \x01(\t\x12\x11\n\ttest_bool\x18\x02 \x01(\x08\x12\x12\n\ntest_bytes\x18\x03 \x01(\x0c\x12\x13\n\x0btest_double\x18\x04 \x01(\x01\x12\x12\n\ntest_float\x18\x05 \x01(\x02\x12\x14\n\x0ctest_fixed32\x18\x06 \x01(\x07\x12\x14\n\x0ctest_fixed64\x18\x07 \x01(\x06\x12\x12\n\ntest_int32\x18\x08 \x01(\x05\x12\x12\n\ntest_int64\x18\t \x01(\x03\x12\x15\n\rtest_sfixed32\x18\n \x01(\x0f\x12\x15\n\rtest_sfixed64\x18\x0b \x01(\x10\x12\x13\n\x0btest_sint32\x18\x0c \x01(\x11\x12\x13\n\x0btest_sint64\x18\r \x01(\x12\x12\x13\n\x0btest_uint32\x18\x0e \x01(\r\x12\x13\n\x0btest_uint64\x18\x0f \x01(\x04\x42\x41\n,io.confluent.kafka.serializers.protobuf.testB\x11TestMessageProtosb\x06proto3')
-  ,
-  dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
-
-
-
-
-_TESTMESSAGE = _descriptor.Descriptor(
-  name='TestMessage',
-  full_name='TestMessage',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='test_string', full_name='TestMessage.test_string', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_bool', full_name='TestMessage.test_bool', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_bytes', full_name='TestMessage.test_bytes', index=2,
-      number=3, type=12, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b(""),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_double', full_name='TestMessage.test_double', index=3,
-      number=4, type=1, cpp_type=5, label=1,
-      has_default_value=False, default_value=float(0),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_float', full_name='TestMessage.test_float', index=4,
-      number=5, type=2, cpp_type=6, label=1,
-      has_default_value=False, default_value=float(0),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_fixed32', full_name='TestMessage.test_fixed32', index=5,
-      number=6, type=7, cpp_type=3, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_fixed64', full_name='TestMessage.test_fixed64', index=6,
-      number=7, type=6, cpp_type=4, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_int32', full_name='TestMessage.test_int32', index=7,
-      number=8, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_int64', full_name='TestMessage.test_int64', index=8,
-      number=9, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_sfixed32', full_name='TestMessage.test_sfixed32', index=9,
-      number=10, type=15, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_sfixed64', full_name='TestMessage.test_sfixed64', index=10,
-      number=11, type=16, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_sint32', full_name='TestMessage.test_sint32', index=11,
-      number=12, type=17, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_sint64', full_name='TestMessage.test_sint64', index=12,
-      number=13, type=18, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_uint32', full_name='TestMessage.test_uint32', index=13,
-      number=14, type=13, cpp_type=3, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='test_uint64', full_name='TestMessage.test_uint64', index=14,
-      number=15, type=4, cpp_type=4, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=54,
-  serialized_end=382,
-)
-
-DESCRIPTOR.message_types_by_name['TestMessage'] = _TESTMESSAGE
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-TestMessage = _reflection.GeneratedProtocolMessageType('TestMessage', (_message.Message,), dict(
-  DESCRIPTOR = _TESTMESSAGE,
-  __module__ = 'TestProto_pb2'
-  # @@protoc_insertion_point(class_scope:TestMessage)
-  ))
-_sym_db.RegisterMessage(TestMessage)
-
-
-DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n,io.confluent.kafka.serializers.protobuf.testB\021TestMessageProtos'))
-# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/gen/__init__.py b/tests/integration/schema_registry/gen/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tests/integration/schema_registry/gen/common_proto_pb2.py b/tests/integration/schema_registry/gen/common_proto_pb2.py
deleted file mode 100644
index 3a70ce3..0000000
--- a/tests/integration/schema_registry/gen/common_proto_pb2.py
+++ /dev/null
@@ -1,227 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: common_proto.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf.internal import enum_type_wrapper
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from . import metadata_proto_pb2 as metadata__proto__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='common_proto.proto',
-  package='Criteo.Glup',
-  syntax='proto3',
-  serialized_pb=_b('\n\x12\x63ommon_proto.proto\x12\x0b\x43riteo.Glup\x1a\x14metadata_proto.proto\"\xda\x01\n\x07\x43onsent\x12 \n\x18identification_forbidden\x18\x01 \x01(\x08\x12:\n\x06reason\x18\x02 \x01(\x0e\x32*.Criteo.Glup.IdentificationForbiddenReason\x12\x39\n\nset_fields\x18\xda\x86\x03 \x03(\x0b\x32#.Criteo.Glup.Consent.SetFieldsEntry\x1a\x30\n\x0eSetFieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01:\x04\x88\xb5\x18\x01*/\n\x16MarketingObjectiveType\x12\x08\n\x04Sale\x10\x00\x12\x0b\n\x07Install\x10\x01*\xd0\x01\n\x1dIdentificationForbiddenReason\x12\x0c\n\x08NoReason\x10\x00\x12\x1b\n\x17\x45xplicitConsentRequired\x10\x01\x12\x10\n\x0cOptoutCookie\x10\x02\x12\x13\n\x0f\x43toOptoutCookie\x10\x03\x12\x15\n\x11LimitedAdTracking\x10\x04\x12\x0e\n\nHstsOptout\x10\x05\x12\x14\n\x10\x44oNotTrackHeader\x10\x06\x12\r\n\tOoOCookie\x10\x07\x12\x11\n\rPendingOptout\x10\x08\x42\x11\n\x0f\x63om.criteo.glupb\x06proto3')
-  ,
-  dependencies=[metadata__proto__pb2.DESCRIPTOR,])
-
-_MARKETINGOBJECTIVETYPE = _descriptor.EnumDescriptor(
-  name='MarketingObjectiveType',
-  full_name='Criteo.Glup.MarketingObjectiveType',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='Sale', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='Install', index=1, number=1,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=278,
-  serialized_end=325,
-)
-_sym_db.RegisterEnumDescriptor(_MARKETINGOBJECTIVETYPE)
-
-MarketingObjectiveType = enum_type_wrapper.EnumTypeWrapper(_MARKETINGOBJECTIVETYPE)
-_IDENTIFICATIONFORBIDDENREASON = _descriptor.EnumDescriptor(
-  name='IdentificationForbiddenReason',
-  full_name='Criteo.Glup.IdentificationForbiddenReason',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='NoReason', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='ExplicitConsentRequired', index=1, number=1,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='OptoutCookie', index=2, number=2,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='CtoOptoutCookie', index=3, number=3,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='LimitedAdTracking', index=4, number=4,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='HstsOptout', index=5, number=5,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='DoNotTrackHeader', index=6, number=6,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='OoOCookie', index=7, number=7,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='PendingOptout', index=8, number=8,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=328,
-  serialized_end=536,
-)
-_sym_db.RegisterEnumDescriptor(_IDENTIFICATIONFORBIDDENREASON)
-
-IdentificationForbiddenReason = enum_type_wrapper.EnumTypeWrapper(_IDENTIFICATIONFORBIDDENREASON)
-Sale = 0
-Install = 1
-NoReason = 0
-ExplicitConsentRequired = 1
-OptoutCookie = 2
-CtoOptoutCookie = 3
-LimitedAdTracking = 4
-HstsOptout = 5
-DoNotTrackHeader = 6
-OoOCookie = 7
-PendingOptout = 8
-
-
-
-_CONSENT_SETFIELDSENTRY = _descriptor.Descriptor(
-  name='SetFieldsEntry',
-  full_name='Criteo.Glup.Consent.SetFieldsEntry',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='key', full_name='Criteo.Glup.Consent.SetFieldsEntry.key', index=0,
-      number=1, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='value', full_name='Criteo.Glup.Consent.SetFieldsEntry.value', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=222,
-  serialized_end=270,
-)
-
-_CONSENT = _descriptor.Descriptor(
-  name='Consent',
-  full_name='Criteo.Glup.Consent',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='identification_forbidden', full_name='Criteo.Glup.Consent.identification_forbidden', index=0,
-      number=1, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='reason', full_name='Criteo.Glup.Consent.reason', index=1,
-      number=2, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='set_fields', full_name='Criteo.Glup.Consent.set_fields', index=2,
-      number=50010, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_CONSENT_SETFIELDSENTRY, ],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\210\265\030\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=58,
-  serialized_end=276,
-)
-
-_CONSENT_SETFIELDSENTRY.containing_type = _CONSENT
-_CONSENT.fields_by_name['reason'].enum_type = _IDENTIFICATIONFORBIDDENREASON
-_CONSENT.fields_by_name['set_fields'].message_type = _CONSENT_SETFIELDSENTRY
-DESCRIPTOR.message_types_by_name['Consent'] = _CONSENT
-DESCRIPTOR.enum_types_by_name['MarketingObjectiveType'] = _MARKETINGOBJECTIVETYPE
-DESCRIPTOR.enum_types_by_name['IdentificationForbiddenReason'] = _IDENTIFICATIONFORBIDDENREASON
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-Consent = _reflection.GeneratedProtocolMessageType('Consent', (_message.Message,), dict(
-
-  SetFieldsEntry = _reflection.GeneratedProtocolMessageType('SetFieldsEntry', (_message.Message,), dict(
-    DESCRIPTOR = _CONSENT_SETFIELDSENTRY,
-    __module__ = 'common_proto_pb2'
-    # @@protoc_insertion_point(class_scope:Criteo.Glup.Consent.SetFieldsEntry)
-    ))
-  ,
-  DESCRIPTOR = _CONSENT,
-  __module__ = 'common_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.Consent)
-  ))
-_sym_db.RegisterMessage(Consent)
-_sym_db.RegisterMessage(Consent.SetFieldsEntry)
-
-
-DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\017com.criteo.glup'))
-_CONSENT_SETFIELDSENTRY.has_options = True
-_CONSENT_SETFIELDSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
-_CONSENT.has_options = True
-_CONSENT._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\210\265\030\001'))
-# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/gen/exampleProtoCriteo_pb2.py b/tests/integration/schema_registry/gen/exampleProtoCriteo_pb2.py
deleted file mode 100644
index 6297047..0000000
--- a/tests/integration/schema_registry/gen/exampleProtoCriteo_pb2.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: exampleProtoCriteo.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from . import metadata_proto_pb2 as metadata__proto__pb2
-from . import common_proto_pb2 as common__proto__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='exampleProtoCriteo.proto',
-  package='Criteo.Glup',
-  syntax='proto3',
-  serialized_pb=_b('\n\x18\x65xampleProtoCriteo.proto\x12\x0b\x43riteo.Glup\x1a\x14metadata_proto.proto\x1a\x12\x63ommon_proto.proto\"\x9b\x06\n\x08\x43lickCas\x12(\n\x0bglup_origin\x18\x01 \x01(\x0b\x32\x13.Criteo.Glup.Origin\x12)\n\tpartition\x18\x02 \x01(\x0b\x32\x16.Criteo.Glup.Partition\x12\x0b\n\x03uid\x18\x05 \x01(\t\x12:\n\nset_fields\x18\xda\x86\x03 \x03(\x0b\x32$.Criteo.Glup.ClickCas.SetFieldsEntry\x12R\n\x0f\x63ontrol_message\x18\xff\xff\x7f \x03(\x0b\x32%.Criteo.Glup.ControlMessage.WatermarkB\x10\x92\xb5\x18\x0c\n\n__metadata\x1a\x30\n\x0eSetFieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01:\xc9\x03\x88\xb5\x18\x01\x82\xb5\x18\x04:\x02\x10\x01\x82\xb5\x18\x12\n\x10\n\x0eglup_click_cas\x82\xb5\x18\xea\x01*\xe7\x01\n\tclick_cas\x12G\n,/glup/datasets/click_cas/data/full/JSON_PAIL\x10\x02@dJ\x13\x46\x45\x44\x45RATED_JSON_PAIL\x12U\n3/glup/datasets/click_cas/data/full/PROTOBUF_PARQUET\x10\x04@2J\x1a\x46\x45\x44\x45RATED_PROTOBUF_PARQUET\x18\x04\"&com.criteo.glup.ClickCasProto$ClickCas2\x0b\x65nginejoins@\x01H\x86\x03\x82\xb5\x18\xb3\x01\x12\xb0\x01\x1a\xad\x01\n\x0b\x65nginejoins\x12\tclick_cas \x04Z9\x12\x30\n\x0eglup_click_cas\"\tclick_cas*\x13\x46\x45\x44\x45RATED_JSON_PAIL\xd2\x0f\x04\x08\x02\x10\x06ZP2G\n\tclick_cas\x12\tclick_cas*\x13\x46\x45\x44\x45RATED_JSON_PAIL2\x1a\x46\x45\x44\x45RATED_PROTOBUF_PARQUET\xd2\x0f\x04\x08\x02\x10\x06\x62\x04R\x02\x18\x04J\x04\x08\x46\x10JJ\x04\x08K\x10LR\x08obsoleteR\tobsolete2B\x11\n\x0f\x63om.criteo.glupb\x06proto3')
-  ,
-  dependencies=[metadata__proto__pb2.DESCRIPTOR,common__proto__pb2.DESCRIPTOR,])
-
-
-
-
-_CLICKCAS_SETFIELDSENTRY = _descriptor.Descriptor(
-  name='SetFieldsEntry',
-  full_name='Criteo.Glup.ClickCas.SetFieldsEntry',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='key', full_name='Criteo.Glup.ClickCas.SetFieldsEntry.key', index=0,
-      number=1, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='value', full_name='Criteo.Glup.ClickCas.SetFieldsEntry.value', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=338,
-  serialized_end=386,
-)
-
-_CLICKCAS = _descriptor.Descriptor(
-  name='ClickCas',
-  full_name='Criteo.Glup.ClickCas',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='glup_origin', full_name='Criteo.Glup.ClickCas.glup_origin', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='partition', full_name='Criteo.Glup.ClickCas.partition', index=1,
-      number=2, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='uid', full_name='Criteo.Glup.ClickCas.uid', index=2,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='set_fields', full_name='Criteo.Glup.ClickCas.set_fields', index=3,
-      number=50010, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='control_message', full_name='Criteo.Glup.ClickCas.control_message', index=4,
-      number=2097151, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222\265\030\014\n\n__metadata')), file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_CLICKCAS_SETFIELDSENTRY, ],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\210\265\030\001\202\265\030\004:\002\020\001\202\265\030\022\n\020\n\016glup_click_cas\202\265\030\352\001*\347\001\n\tclick_cas\022G\n,/glup/datasets/click_cas/data/full/JSON_PAIL\020\002@dJ\023FEDERATED_JSON_PAIL\022U\n3/glup/datasets/click_cas/data/full/PROTOBUF_PARQUET\020\004@2J\032FEDERATED_PROTOBUF_PARQUET\030\004\"&com.criteo.glup.ClickCasProto$ClickCas2\013enginejoins@\001H\206\003\202\265\030\263\001\022\260\001\032\255\001\n\013enginejoins\022\tclick_cas \004Z9\0220\n\016glup_click_cas\"\tclick_cas*\023FEDERATED_JSON_PAIL\322\017\004\010\002\020\006ZP2G\n\tclick_cas\022\tclick_cas*\023FEDERATED_JSON_PAIL2\032FEDERATED_PROTOBUF_PARQUET\322\017\004\010\002\020\006b\004R\002\030\004')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=84,
-  serialized_end=879,
-)
-
-_CLICKCAS_SETFIELDSENTRY.containing_type = _CLICKCAS
-_CLICKCAS.fields_by_name['glup_origin'].message_type = metadata__proto__pb2._ORIGIN
-_CLICKCAS.fields_by_name['partition'].message_type = metadata__proto__pb2._PARTITION
-_CLICKCAS.fields_by_name['set_fields'].message_type = _CLICKCAS_SETFIELDSENTRY
-_CLICKCAS.fields_by_name['control_message'].message_type = metadata__proto__pb2._CONTROLMESSAGE_WATERMARK
-DESCRIPTOR.message_types_by_name['ClickCas'] = _CLICKCAS
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-ClickCas = _reflection.GeneratedProtocolMessageType('ClickCas', (_message.Message,), dict(
-
-  SetFieldsEntry = _reflection.GeneratedProtocolMessageType('SetFieldsEntry', (_message.Message,), dict(
-    DESCRIPTOR = _CLICKCAS_SETFIELDSENTRY,
-    __module__ = 'exampleProtoCriteo_pb2'
-    # @@protoc_insertion_point(class_scope:Criteo.Glup.ClickCas.SetFieldsEntry)
-    ))
-  ,
-  DESCRIPTOR = _CLICKCAS,
-  __module__ = 'exampleProtoCriteo_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.ClickCas)
-  ))
-_sym_db.RegisterMessage(ClickCas)
-_sym_db.RegisterMessage(ClickCas.SetFieldsEntry)
-
-
-DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\017com.criteo.glup'))
-_CLICKCAS_SETFIELDSENTRY.has_options = True
-_CLICKCAS_SETFIELDSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
-_CLICKCAS.fields_by_name['control_message'].has_options = True
-_CLICKCAS.fields_by_name['control_message']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222\265\030\014\n\n__metadata'))
-_CLICKCAS.has_options = True
-_CLICKCAS._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\210\265\030\001\202\265\030\004:\002\020\001\202\265\030\022\n\020\n\016glup_click_cas\202\265\030\352\001*\347\001\n\tclick_cas\022G\n,/glup/datasets/click_cas/data/full/JSON_PAIL\020\002@dJ\023FEDERATED_JSON_PAIL\022U\n3/glup/datasets/click_cas/data/full/PROTOBUF_PARQUET\020\004@2J\032FEDERATED_PROTOBUF_PARQUET\030\004\"&com.criteo.glup.ClickCasProto$ClickCas2\013enginejoins@\001H\206\003\202\265\030\263\001\022\260\001\032\255\001\n\013enginejoins\022\tclick_cas \004Z9\0220\n\016glup_click_cas\"\tclick_cas*\023FEDERATED_JSON_PAIL\322\017\004\010\002\020\006ZP2G\n\tclick_cas\022\tclick_cas*\023FEDERATED_JSON_PAIL2\032FEDERATED_PROTOBUF_PARQUET\322\017\004\010\002\020\006b\004R\002\030\004'))
-# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/gen/metadata_proto_pb2.py b/tests/integration/schema_registry/gen/metadata_proto_pb2.py
deleted file mode 100644
index e269825..0000000
--- a/tests/integration/schema_registry/gen/metadata_proto_pb2.py
+++ /dev/null
@@ -1,3060 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: metadata_proto.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf.internal import enum_type_wrapper
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='metadata_proto.proto',
-  package='Criteo.Glup',
-  syntax='proto3',
-  serialized_pb=_b('\n\x14metadata_proto.proto\x12\x0b\x43riteo.Glup\x1a google/protobuf/descriptor.proto\"$\n\x13KafkaMessageOptions\x12\r\n\x05topic\x18\x01 \x03(\t\"\x80\x02\n\x07\x44\x61taSet\x12\n\n\x02id\x18\x01 \x01(\t\x12*\n\x06\x66ormat\x18\x02 \x03(\x0b\x32\x1a.Criteo.Glup.DataSetFormat\x12\x36\n\x10partition_scheme\x18\x03 \x01(\x0e\x32\x1c.Criteo.Glup.PartitionScheme\x12\x12\n\njava_class\x18\x04 \x01(\t\x12\x11\n\tfor_tests\x18\x05 \x01(\x08\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x0f\n\x07private\x18\x07 \x01(\x08\x12&\n\x04kind\x18\x08 \x01(\x0e\x32\x18.Criteo.Glup.DataSetKind\x12\x16\n\x0eretention_days\x18\t \x01(\x05\"x\n\x0c\x44\x61taSetChunk\x12)\n\tpartition\x18\x01 \x03(\x0b\x32\x16.Criteo.Glup.Partition\x12*\n\x06\x66ormat\x18\x02 \x01(\x0b\x32\x1a.Criteo.Glup.DataSetFormat\x12\x11\n\tdatasetId\x18\x03 \x01(\t\"\xe6\x02\n\rDataSetFormat\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x30\n\x0b\x66ile_format\x18\x02 \x01(\x0e\x32\x1b.Criteo.Glup.HDFSDataFormat\x12\x36\n\x10partition_scheme\x18\x03 \x01(\x0e\x32\x1c.Criteo.Glup.PartitionScheme\x12\x33\n\x0fstart_partition\x18\x04 \x01(\x0b\x32\x1a.Criteo.Glup.HDFSPartition\x12\x31\n\rend_partition\x18\x05 \x01(\x0b\x32\x1a.Criteo.Glup.HDFSPartition\x12\x16\n\x0eretention_days\x18\x07 \x01(\x05\x12\x10\n\x08priority\x18\x08 \x01(\x05\x12\r\n\x05label\x18\t \x01(\t\x12\x36\n\x10monitoring_level\x18\n \x01(\x0e\x32\x1c.Criteo.Glup.MonitoringLevelJ\x04\x08\x06\x10\x07\"\xce\x19\n\x0bHDFSOptions\x12\x36\n\x06import\x18\x03 \x03(\x0b\x32&.Criteo.Glup.HDFSOptions.ImportOptions\x1a\x86\x19\n\rImportOptions\x12\r\n\x05owner\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x32\n\x0cpartitioning\x18\x04 \x01(\x0e\x32\x1c.Criteo.Glup.PartitionScheme\x12+\n\x06\x66ormat\x18\x05 \x01(\x0e\x32\x1b.Criteo.Glup.HDFSDataFormat\x12\x0f\n\x07private\x18\x06 \x01(\x08\x12\x43\n\tgenerator\x18\x0b \x03(\x0b\x32\x30.Criteo.Glup.HDFSOptions.ImportOptions.Generator\x12\x39\n\x04view\x18\x0c \x03(\x0b\x32+.Criteo.Glup.HDFSOptions.ImportOptions.View\x1a\x90\x01\n\x04View\x12\x45\n\x04hive\x18\n \x01(\x0b\x32\x37.Criteo.Glup.HDFSOptions.ImportOptions.View.HiveOptions\x1a\x41\n\x0bHiveOptions\x12\x32\n\x0cpartitioning\x18\x03 \x01(\x0e\x32\x1c.Criteo.Glup.PartitionScheme\x1a\xd2\x15\n\tGenerator\x12V\n\ndataloader\x18\x01 \x01(\x0b\x32\x42.Criteo.Glup.HDFSOptions.ImportOptions.Generator.DataloaderOptions\x12V\n\nkafka2hdfs\x18\x02 \x01(\x0b\x32\x42.Criteo.Glup.HDFSOptions.ImportOptions.Generator.Kafka2HdfsOptions\x12J\n\x04sync\x18\x03 \x01(\x0b\x32<.Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions\x12R\n\x08\x65xternal\x18\x04 \x01(\x0b\x32@.Criteo.Glup.HDFSOptions.ImportOptions.Generator.ExternalOptions\x12N\n\x06\x62\x61\x63kup\x18\x05 \x01(\x0b\x32>.Criteo.Glup.HDFSOptions.ImportOptions.Generator.BackupOptions\x12X\n\x0btranscoding\x18\x06 \x01(\x0b\x32\x43.Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions\x12N\n\x06kacoha\x18\x07 \x01(\x0b\x32>.Criteo.Glup.HDFSOptions.ImportOptions.Generator.KaCoHaOptions\x12R\n\x0b\x64\x65\x64uplicate\x18\x08 \x01(\x0b\x32=.Criteo.Glup.HDFSOptions.ImportOptions.Generator.DedupOptions\x12P\n\x07sampler\x18\t \x01(\x0b\x32?.Criteo.Glup.HDFSOptions.ImportOptions.Generator.SamplerOptions\x12V\n\ncomparator\x18\n \x01(\x0b\x32\x42.Criteo.Glup.HDFSOptions.ImportOptions.Generator.ComparatorOptions\x12\"\n\x02to\x18\xfa\x01 \x03(\x0b\x32\x15.Criteo.Glup.Location\x12\x12\n\tnamespace\x18\xfb\x01 \x01(\t\x12\x13\n\nstart_date\x18\xfd\x01 \x01(\t\x12\x12\n\tstop_date\x18\xfe\x01 \x01(\t\x12\x12\n\tignore_cn\x18\xff\x01 \x01(\x08\x1a\x9a\x01\n\x0c\x44\x65\x64upOptions\x12\x18\n\x10input_dataset_id\x18\x01 \x01(\t\x12\x1a\n\x12input_format_label\x18\x02 \x01(\t\x12\x19\n\x11output_dataset_id\x18\x03 \x01(\t\x12\x1b\n\x13output_format_label\x18\x04 \x01(\t\x12\x1c\n\x14use_hippo_cuttle_job\x18\x05 \x01(\x08\x1au\n\x11Kafka2HdfsOptions\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65\x64uplicate\x18\x03 \x01(\x08\x12\x19\n\x11output_dataset_id\x18\x04 \x01(\t\x12\x1b\n\x13output_format_label\x18\x05 \x01(\tJ\x04\x08\x02\x10\x03\x1aK\n\x0cKacohaConfig\x12\x1b\n\x13partitions_per_task\x18\x01 \x01(\x05\x12\x1e\n\x16poll_buffer_size_bytes\x18\x02 \x01(\x05\x1a\x87\x01\n\x11KacohaConfigPerDc\x12#\n\x02\x64\x63\x18\x01 \x01(\x0e\x32\x17.Criteo.Glup.DataCenter\x12M\n\x06\x63onfig\x18\x02 \x01(\x0b\x32=.Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfig\x1a\x95\x02\n\rKaCoHaOptions\x12\r\n\x05topic\x18\x01 \x01(\t\x12\x19\n\x11output_dataset_id\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65\x64uplicate\x18\x03 \x01(\x08\x12M\n\x06\x63onfig\x18\x04 \x01(\x0b\x32=.Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfig\x12\x1b\n\x13output_format_label\x18\x05 \x01(\t\x12Y\n\rconfig_per_dc\x18\x06 \x03(\x0b\x32\x42.Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfigPerDc\x1a<\n\x11\x44\x61taloaderOptions\x12\'\n\x08platform\x18\x01 \x03(\x0e\x32\x15.Criteo.Glup.Platform\x1a\xf1\x01\n\x0bSyncOptions\x12#\n\x04\x66rom\x18\x01 \x01(\x0b\x32\x15.Criteo.Glup.Location\x12\x18\n\x10source_namespace\x18\x03 \x01(\t\x12(\n\tplatforms\x18\x06 \x03(\x0e\x32\x15.Criteo.Glup.Platform\x12\x16\n\x0eis_backfilling\x18\x08 \x01(\x08\x12\x10\n\x08to_label\x18\t \x01(\t\x12\x15\n\rto_dataset_id\x18\n \x01(\t\x12\x18\n\x10with_backfilling\x18\x0b \x01(\x08\x12\x1e\n\x16is_scheduled_on_source\x18\x0c \x01(\x08\x1ax\n\rBackupOptions\x12#\n\x04\x66rom\x18\x01 \x01(\x0b\x32\x15.Criteo.Glup.Location\x12\x18\n\x10source_namespace\x18\x02 \x01(\t\x12(\n\tplatforms\x18\x03 \x03(\x0e\x32\x15.Criteo.Glup.Platform\x1a\x83\x02\n\x12TranscodingOptions\x12\x18\n\x10input_dataset_id\x18\x01 \x01(\t\x12\x19\n\x11output_dataset_id\x18\x02 \x01(\t\x12\x31\n\x0cinput_format\x18\x03 \x01(\x0e\x32\x1b.Criteo.Glup.HDFSDataFormat\x12\x32\n\routput_format\x18\x04 \x01(\x0e\x32\x1b.Criteo.Glup.HDFSDataFormat\x12\x1b\n\x13input_dataset_label\x18\x05 \x01(\t\x12\x1c\n\x14output_dataset_label\x18\x06 \x01(\t\x12\x16\n\x0eis_by_platform\x18\x07 \x01(\x08\x1a\x95\x01\n\x0eSamplerOptions\x12\x18\n\x10input_dataset_id\x18\x01 \x01(\t\x12\x1a\n\x12input_format_label\x18\x02 \x01(\t\x12\x19\n\x11output_dataset_id\x18\x03 \x01(\t\x12\x1b\n\x13output_format_label\x18\x04 \x01(\t\x12\x15\n\rsampling_rate\x18\x05 \x01(\x02\x1a\xa7\x01\n\x11\x43omparatorOptions\x12\x17\n\x0fleft_dataset_id\x18\x01 \x01(\t\x12\x19\n\x11left_format_label\x18\x02 \x01(\t\x12\x18\n\x10right_dataset_id\x18\x03 \x01(\t\x12\x1a\n\x12right_format_label\x18\x04 \x01(\t\x12\x10\n\x08hostname\x18\x05 \x01(\t\x12\x16\n\x0eignored_fields\x18\x06 \x01(\t\x1a\x11\n\x0f\x45xternalOptions\"9\n\x18ProducerTransportOptions\x12\x0e\n\x06syslog\x18\x01 \x01(\x08\x12\r\n\x05kafka\x18\x02 \x01(\x08\"8\n\x0fPropertyOptions\x12\x10\n\x08valuable\x18\x01 \x01(\x08\x12\x13\n\x0bhigh_volume\x18\x02 \x01(\x08\"\xcb\x02\n\x0bGlupOptions\x12/\n\x05kafka\x18\x01 \x01(\x0b\x32 .Criteo.Glup.KafkaMessageOptions\x12&\n\x04hdfs\x18\x02 \x01(\x0b\x32\x18.Criteo.Glup.HDFSOptions\x12\x14\n\x0csampling_pct\x18\x03 \x01(\r\x12\x1c\n\x14preprod_sampling_pct\x18\x04 \x01(\r\x12%\n\x07\x64\x61taset\x18\x05 \x03(\x0b\x32\x14.Criteo.Glup.DataSet\x12\x1c\n\x14message_sampling_pct\x18\x06 \x01(\r\x12\x38\n\tproducers\x18\x07 \x01(\x0b\x32%.Criteo.Glup.ProducerTransportOptions\x12\x30\n\nproperties\x18\x08 \x01(\x0b\x32\x1c.Criteo.Glup.PropertyOptions\"\xb1\x01\n\x10GlupFieldOptions\x12\x0f\n\x07sampled\x18\x01 \x01(\x08\x12\x14\n\x0csampling_key\x18\x02 \x01(\x08\x12\x30\n\x11\x64isabled_platform\x18\x03 \x03(\x0e\x32\x15.Criteo.Glup.Platform\x12\x18\n\x10should_clean_pii\x18\x04 \x01(\x08\x12\x18\n\x10pending_deletion\x18\x05 \x01(\x08\x12\x10\n\x08\x61\x64\x64\x65\x64_at\x18\x06 \x01(\t\")\n\x0bJsonMapping\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04skip\x18\x02 \x01(\x08\"4\n\tJsonAlias\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11use_enum_field_id\x18\x03 \x01(\x08\"\xb5\x02\n\x0f\x42\x61seGlupMessage\x12(\n\x0bglup_origin\x18\x01 \x01(\x0b\x32\x13.Criteo.Glup.Origin\x12)\n\tpartition\x18\x02 \x01(\x0b\x32\x16.Criteo.Glup.Partition\x12\x41\n\nset_fields\x18\xda\x86\x03 \x03(\x0b\x32+.Criteo.Glup.BaseGlupMessage.SetFieldsEntry\x12R\n\x0f\x63ontrol_message\x18\xff\xff\x7f \x03(\x0b\x32%.Criteo.Glup.ControlMessage.WatermarkB\x10\x92\xb5\x18\x0c\n\n__metadata\x1a\x30\n\x0eSetFieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01:\x04\x88\xb5\x18\x01\"\xf2\x01\n\x19\x46orwardedWatermarkMessage\x12\x1d\n\x15original_kafka_offset\x18\x05 \x01(\x03\x12\x11\n\ttimestamp\x18\x06 \x01(\x03\x12\x1d\n\x15\x63onsolidation_enabled\x18\x07 \x01(\x08\x12\x12\n\ndataset_id\x18\n \x01(\t\x12\x1c\n\x14\x64\x61taset_format_label\x18\x0b \x01(\t\x12R\n\x0f\x63ontrol_message\x18\xff\xff\x7f \x03(\x0b\x32%.Criteo.Glup.ControlMessage.WatermarkB\x10\x92\xb5\x18\x0c\n\n__metadata\"y\n\x08Location\x12%\n\x03\x65nv\x18\x01 \x01(\x0e\x32\x18.Criteo.Glup.Environment\x12#\n\x02\x64\x63\x18\x02 \x01(\x0e\x32\x17.Criteo.Glup.DataCenter\x12\r\n\x05label\x18\x03 \x01(\t\x12\x12\n\ndataset_id\x18\x04 \x01(\t\"\xa2\x01\n\x06Origin\x12+\n\ndatacenter\x18\x01 \x01(\x0e\x32\x17.Criteo.Glup.DataCenter\x12\x1a\n\x03ip4\x18\x02 \x01(\x07\x42\r\x8a\xb5\x18\t\n\x07host_ip\x12\x10\n\x08hostname\x18\x03 \x01(\t\x12\x1e\n\x0e\x63ontainer_task\x18\x04 \x01(\tB\x06\x8a\xb5\x18\x02\x10\x01\x12\x1d\n\rcontainer_app\x18\x05 \x01(\tB\x06\x8a\xb5\x18\x02\x10\x01\"\x89\x05\n\x0e\x43ontrolMessage\x12\x38\n\twatermark\x18\x01 \x01(\x0b\x32%.Criteo.Glup.ControlMessage.Watermark\x1a\x89\x01\n\x0fWatermarkOrigin\x12\x13\n\x0bkafka_topic\x18\x01 \x01(\t\x12+\n\ndatacenter\x18\x02 \x01(\x0e\x32\x17.Criteo.Glup.DataCenter\x12\x34\n\x07\x63luster\x18\x03 \x01(\x0e\x32#.Criteo.Glup.ControlMessage.Cluster\x1a\xe8\x02\n\tWatermark\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x10\n\x08hostname\x18\x02 \x01(\t\x12\x13\n\x0bkafka_topic\x18\x03 \x01(\t\x12\x11\n\tpartition\x18\x04 \x01(\x05\x12\x17\n\x0fpartition_count\x18\x05 \x01(\x05\x12\x14\n\x0cprocess_uuid\x18\x06 \x01(\x0c\x12\x0e\n\x06region\x18\x07 \x01(\t\x12*\n\x11timestamp_seconds\x18\x08 \x01(\x05\x42\x0f\x92\xb5\x18\x0b\n\ttimestamp\x12\x0f\n\x07\x63luster\x18\t \x01(\t\x12\x13\n\x0b\x65nvironment\x18\n \x01(\t\x12J\n\nset_fields\x18\xda\x86\x03 \x03(\x0b\x32\x34.Criteo.Glup.ControlMessage.Watermark.SetFieldsEntry\x1a\x30\n\x0eSetFieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01:\x04\x88\xb5\x18\x01\"F\n\x07\x43luster\x12\x17\n\x13UNSUPPORTED_CLUSTER\x10\x00\x12\t\n\x05LOCAL\x10\x02\x12\x0b\n\x07\x43\x45NTRAL\x10\x03\x12\n\n\x06STREAM\x10\x04\"\x99\x01\n\tPartition\x12*\n\x11timestamp_seconds\x18\x01 \x01(\x04\x42\x0f\x8a\xb5\x18\x0b\n\ttimestamp\x12,\n\rhost_platform\x18\x02 \x01(\x0e\x32\x15.Criteo.Glup.Platform\x12\x32\n\nevent_type\x18\x03 \x01(\x0e\x32\x16.Criteo.Glup.EventTypeB\x06\x8a\xb5\x18\x02\x10\x01\"\x93\x01\n\rHDFSPartition\x12\x19\n\x11timestamp_seconds\x18\x01 \x01(\x04\x12,\n\rhost_platform\x18\x02 \x01(\x0e\x32\x15.Criteo.Glup.Platform\x12*\n\nevent_type\x18\x03 \x01(\x0e\x32\x16.Criteo.Glup.EventType\x12\r\n\x05\x64\x65pth\x18\x04 \x01(\x05\"\xa5\x01\n\x07Hash128\x12\x15\n\rmost_sig_bits\x18\x01 \x01(\x06\x12\x16\n\x0eleast_sig_bits\x18\x02 \x01(\x06\x12\x39\n\nset_fields\x18\xda\x86\x03 \x03(\x0b\x32#.Criteo.Glup.Hash128.SetFieldsEntry\x1a\x30\n\x0eSetFieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01*~\n\x0fPartitionScheme\x12 \n\x1cUNSUPPORTED_PARTITION_SCHEME\x10\x00\x12\t\n\x05\x44\x41ILY\x10\x02\x12\n\n\x06HOURLY\x10\x03\x12\x13\n\x0fPLATFORM_HOURLY\x10\x04\x12\x1d\n\x19\x45VENTTYPE_PLATFORM_HOURLY\x10\x05*?\n\rMessageFormat\x12\x16\n\x12UNSUPPORTED_FORMAT\x10\x00\x12\x08\n\x04JSON\x10\x01\x12\x0c\n\x08PROTOBUF\x10\x02*d\n\x0eHDFSDataFormat\x12\x1b\n\x17UNSUPPORTED_DATA_FORMAT\x10\x00\x12\r\n\tJSON_PAIL\x10\x02\x12\x10\n\x0cPROTOBUF_SEQ\x10\x03\x12\x14\n\x10PROTOBUF_PARQUET\x10\x04*3\n\x0b\x44\x61taSetKind\x12\x14\n\x10UNSUPPORTED_KIND\x10\x00\x12\x0e\n\nTIMESERIES\x10\x01*\x9a\x01\n\x0fMonitoringLevel\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x15\n\x11REMOVE_MONITORING\x10\x01\x12\x1a\n\x16INFORMATIVE_MONITORING\x10\x02\x12\x15\n\x11\x43ONSENSUS_IGNORED\x10\x03\x12\x30\n,CONSENSUS_IGNORED_AND_INFORMATIVE_MONITORING\x10\x04*\x8b\x01\n\nDataCenter\x12\x1a\n\x16UNSUPPORTED_DATACENTER\x10\x00\x12\x07\n\x03\x41M5\x10\x02\x12\x07\n\x03HK5\x10\x03\x12\x07\n\x03NY8\x10\x04\x12\x07\n\x03PAR\x10\x05\x12\x07\n\x03PA4\x10\x06\x12\x07\n\x03SH5\x10\x07\x12\x07\n\x03SV6\x10\x08\x12\x07\n\x03TY5\x10\t\x12\x07\n\x03VA1\x10\n\x12\x07\n\x03\x41M6\x10\x0b\x12\x07\n\x03\x44\x41\x31\x10\x0c*A\n\x0b\x45nvironment\x12\x1b\n\x17UNSUPPORTED_ENVIRONMENT\x10\x00\x12\x0b\n\x07PREPROD\x10\x01\x12\x08\n\x04PROD\x10\x02*D\n\x08Platform\x12\x18\n\x14UNSUPPORTED_PLATFORM\x10\x00\x12\x06\n\x02\x45U\x10\x02\x12\x06\n\x02US\x10\x03\x12\x06\n\x02\x41S\x10\x04\x12\x06\n\x02\x43N\x10\x05*[\n\tEventType\x12\x1a\n\x16UNSUPPORTED_EVENT_TYPE\x10\x00\x12\x10\n\x0cItemPageView\x10\x02\x12\t\n\x05Sales\x10\x03\x12\n\n\x06\x42\x61sket\x10\x04\x12\t\n\x05Other\x10\x05*%\n\x05YesNo\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02NO\x10\x01\x12\x07\n\x03YES\x10\x02:I\n\x04glup\x12\x1f.google.protobuf.MessageOptions\x18\xd0\x86\x03 \x01(\x0b\x32\x18.Criteo.Glup.GlupOptions:C\n\x18\x63ontains_nullable_fields\x12\x1f.google.protobuf.MessageOptions\x18\xd1\x86\x03 \x01(\x08:Q\n\tglupfield\x12\x1d.google.protobuf.FieldOptions\x18\xd0\x86\x03 \x01(\x0b\x32\x1d.Criteo.Glup.GlupFieldOptions:O\n\x0cjson_mapping\x12\x1d.google.protobuf.FieldOptions\x18\xd1\x86\x03 \x01(\x0b\x32\x18.Criteo.Glup.JsonMapping:E\n\x04json\x12\x1d.google.protobuf.FieldOptions\x18\xd2\x86\x03 \x01(\x0b\x32\x16.Criteo.Glup.JsonAliasB\x11\n\x0f\x63om.criteo.glupb\x06proto3')
-  ,
-  dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
-
-_PARTITIONSCHEME = _descriptor.EnumDescriptor(
-  name='PartitionScheme',
-  full_name='Criteo.Glup.PartitionScheme',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='UNSUPPORTED_PARTITION_SCHEME', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='DAILY', index=1, number=2,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='HOURLY', index=2, number=3,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='PLATFORM_HOURLY', index=3, number=4,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='EVENTTYPE_PLATFORM_HOURLY', index=4, number=5,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=6831,
-  serialized_end=6957,
-)
-_sym_db.RegisterEnumDescriptor(_PARTITIONSCHEME)
-
-PartitionScheme = enum_type_wrapper.EnumTypeWrapper(_PARTITIONSCHEME)
-_MESSAGEFORMAT = _descriptor.EnumDescriptor(
-  name='MessageFormat',
-  full_name='Criteo.Glup.MessageFormat',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='UNSUPPORTED_FORMAT', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='JSON', index=1, number=1,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='PROTOBUF', index=2, number=2,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=6959,
-  serialized_end=7022,
-)
-_sym_db.RegisterEnumDescriptor(_MESSAGEFORMAT)
-
-MessageFormat = enum_type_wrapper.EnumTypeWrapper(_MESSAGEFORMAT)
-_HDFSDATAFORMAT = _descriptor.EnumDescriptor(
-  name='HDFSDataFormat',
-  full_name='Criteo.Glup.HDFSDataFormat',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='UNSUPPORTED_DATA_FORMAT', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='JSON_PAIL', index=1, number=2,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='PROTOBUF_SEQ', index=2, number=3,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='PROTOBUF_PARQUET', index=3, number=4,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=7024,
-  serialized_end=7124,
-)
-_sym_db.RegisterEnumDescriptor(_HDFSDATAFORMAT)
-
-HDFSDataFormat = enum_type_wrapper.EnumTypeWrapper(_HDFSDATAFORMAT)
-_DATASETKIND = _descriptor.EnumDescriptor(
-  name='DataSetKind',
-  full_name='Criteo.Glup.DataSetKind',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='UNSUPPORTED_KIND', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='TIMESERIES', index=1, number=1,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=7126,
-  serialized_end=7177,
-)
-_sym_db.RegisterEnumDescriptor(_DATASETKIND)
-
-DataSetKind = enum_type_wrapper.EnumTypeWrapper(_DATASETKIND)
-_MONITORINGLEVEL = _descriptor.EnumDescriptor(
-  name='MonitoringLevel',
-  full_name='Criteo.Glup.MonitoringLevel',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='DEFAULT', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='REMOVE_MONITORING', index=1, number=1,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='INFORMATIVE_MONITORING', index=2, number=2,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='CONSENSUS_IGNORED', index=3, number=3,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='CONSENSUS_IGNORED_AND_INFORMATIVE_MONITORING', index=4, number=4,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=7180,
-  serialized_end=7334,
-)
-_sym_db.RegisterEnumDescriptor(_MONITORINGLEVEL)
-
-MonitoringLevel = enum_type_wrapper.EnumTypeWrapper(_MONITORINGLEVEL)
-_DATACENTER = _descriptor.EnumDescriptor(
-  name='DataCenter',
-  full_name='Criteo.Glup.DataCenter',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='UNSUPPORTED_DATACENTER', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='AM5', index=1, number=2,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='HK5', index=2, number=3,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='NY8', index=3, number=4,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='PAR', index=4, number=5,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='PA4', index=5, number=6,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='SH5', index=6, number=7,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='SV6', index=7, number=8,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='TY5', index=8, number=9,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='VA1', index=9, number=10,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='AM6', index=10, number=11,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='DA1', index=11, number=12,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=7337,
-  serialized_end=7476,
-)
-_sym_db.RegisterEnumDescriptor(_DATACENTER)
-
-DataCenter = enum_type_wrapper.EnumTypeWrapper(_DATACENTER)
-_ENVIRONMENT = _descriptor.EnumDescriptor(
-  name='Environment',
-  full_name='Criteo.Glup.Environment',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='UNSUPPORTED_ENVIRONMENT', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='PREPROD', index=1, number=1,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='PROD', index=2, number=2,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=7478,
-  serialized_end=7543,
-)
-_sym_db.RegisterEnumDescriptor(_ENVIRONMENT)
-
-Environment = enum_type_wrapper.EnumTypeWrapper(_ENVIRONMENT)
-_PLATFORM = _descriptor.EnumDescriptor(
-  name='Platform',
-  full_name='Criteo.Glup.Platform',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='UNSUPPORTED_PLATFORM', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='EU', index=1, number=2,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='US', index=2, number=3,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='AS', index=3, number=4,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='CN', index=4, number=5,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=7545,
-  serialized_end=7613,
-)
-_sym_db.RegisterEnumDescriptor(_PLATFORM)
-
-Platform = enum_type_wrapper.EnumTypeWrapper(_PLATFORM)
-_EVENTTYPE = _descriptor.EnumDescriptor(
-  name='EventType',
-  full_name='Criteo.Glup.EventType',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='UNSUPPORTED_EVENT_TYPE', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='ItemPageView', index=1, number=2,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='Sales', index=2, number=3,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='Basket', index=3, number=4,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='Other', index=4, number=5,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=7615,
-  serialized_end=7706,
-)
-_sym_db.RegisterEnumDescriptor(_EVENTTYPE)
-
-EventType = enum_type_wrapper.EnumTypeWrapper(_EVENTTYPE)
-_YESNO = _descriptor.EnumDescriptor(
-  name='YesNo',
-  full_name='Criteo.Glup.YesNo',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='UNKNOWN', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='NO', index=1, number=1,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='YES', index=2, number=2,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=7708,
-  serialized_end=7745,
-)
-_sym_db.RegisterEnumDescriptor(_YESNO)
-
-YesNo = enum_type_wrapper.EnumTypeWrapper(_YESNO)
-UNSUPPORTED_PARTITION_SCHEME = 0
-DAILY = 2
-HOURLY = 3
-PLATFORM_HOURLY = 4
-EVENTTYPE_PLATFORM_HOURLY = 5
-UNSUPPORTED_FORMAT = 0
-JSON = 1
-PROTOBUF = 2
-UNSUPPORTED_DATA_FORMAT = 0
-JSON_PAIL = 2
-PROTOBUF_SEQ = 3
-PROTOBUF_PARQUET = 4
-UNSUPPORTED_KIND = 0
-TIMESERIES = 1
-DEFAULT = 0
-REMOVE_MONITORING = 1
-INFORMATIVE_MONITORING = 2
-CONSENSUS_IGNORED = 3
-CONSENSUS_IGNORED_AND_INFORMATIVE_MONITORING = 4
-UNSUPPORTED_DATACENTER = 0
-AM5 = 2
-HK5 = 3
-NY8 = 4
-PAR = 5
-PA4 = 6
-SH5 = 7
-SV6 = 8
-TY5 = 9
-VA1 = 10
-AM6 = 11
-DA1 = 12
-UNSUPPORTED_ENVIRONMENT = 0
-PREPROD = 1
-PROD = 2
-UNSUPPORTED_PLATFORM = 0
-EU = 2
-US = 3
-AS = 4
-CN = 5
-UNSUPPORTED_EVENT_TYPE = 0
-ItemPageView = 2
-Sales = 3
-Basket = 4
-Other = 5
-UNKNOWN = 0
-NO = 1
-YES = 2
-
-GLUP_FIELD_NUMBER = 50000
-glup = _descriptor.FieldDescriptor(
-  name='glup', full_name='Criteo.Glup.glup', index=0,
-  number=50000, type=11, cpp_type=10, label=1,
-  has_default_value=False, default_value=None,
-  message_type=None, enum_type=None, containing_type=None,
-  is_extension=True, extension_scope=None,
-  options=None, file=DESCRIPTOR)
-CONTAINS_NULLABLE_FIELDS_FIELD_NUMBER = 50001
-contains_nullable_fields = _descriptor.FieldDescriptor(
-  name='contains_nullable_fields', full_name='Criteo.Glup.contains_nullable_fields', index=1,
-  number=50001, type=8, cpp_type=7, label=1,
-  has_default_value=False, default_value=False,
-  message_type=None, enum_type=None, containing_type=None,
-  is_extension=True, extension_scope=None,
-  options=None, file=DESCRIPTOR)
-GLUPFIELD_FIELD_NUMBER = 50000
-glupfield = _descriptor.FieldDescriptor(
-  name='glupfield', full_name='Criteo.Glup.glupfield', index=2,
-  number=50000, type=11, cpp_type=10, label=1,
-  has_default_value=False, default_value=None,
-  message_type=None, enum_type=None, containing_type=None,
-  is_extension=True, extension_scope=None,
-  options=None, file=DESCRIPTOR)
-JSON_MAPPING_FIELD_NUMBER = 50001
-json_mapping = _descriptor.FieldDescriptor(
-  name='json_mapping', full_name='Criteo.Glup.json_mapping', index=3,
-  number=50001, type=11, cpp_type=10, label=1,
-  has_default_value=False, default_value=None,
-  message_type=None, enum_type=None, containing_type=None,
-  is_extension=True, extension_scope=None,
-  options=None, file=DESCRIPTOR)
-JSON_FIELD_NUMBER = 50002
-json = _descriptor.FieldDescriptor(
-  name='json', full_name='Criteo.Glup.json', index=4,
-  number=50002, type=11, cpp_type=10, label=1,
-  has_default_value=False, default_value=None,
-  message_type=None, enum_type=None, containing_type=None,
-  is_extension=True, extension_scope=None,
-  options=None, file=DESCRIPTOR)
-
-_CONTROLMESSAGE_CLUSTER = _descriptor.EnumDescriptor(
-  name='Cluster',
-  full_name='Criteo.Glup.ControlMessage.Cluster',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='UNSUPPORTED_CLUSTER', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='LOCAL', index=1, number=2,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='CENTRAL', index=2, number=3,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='STREAM', index=3, number=4,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=6285,
-  serialized_end=6355,
-)
-_sym_db.RegisterEnumDescriptor(_CONTROLMESSAGE_CLUSTER)
-
-
-_KAFKAMESSAGEOPTIONS = _descriptor.Descriptor(
-  name='KafkaMessageOptions',
-  full_name='Criteo.Glup.KafkaMessageOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='topic', full_name='Criteo.Glup.KafkaMessageOptions.topic', index=0,
-      number=1, type=9, cpp_type=9, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=71,
-  serialized_end=107,
-)
-
-
-_DATASET = _descriptor.Descriptor(
-  name='DataSet',
-  full_name='Criteo.Glup.DataSet',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='id', full_name='Criteo.Glup.DataSet.id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='format', full_name='Criteo.Glup.DataSet.format', index=1,
-      number=2, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='partition_scheme', full_name='Criteo.Glup.DataSet.partition_scheme', index=2,
-      number=3, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='java_class', full_name='Criteo.Glup.DataSet.java_class', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='for_tests', full_name='Criteo.Glup.DataSet.for_tests', index=4,
-      number=5, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='owner', full_name='Criteo.Glup.DataSet.owner', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='private', full_name='Criteo.Glup.DataSet.private', index=6,
-      number=7, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='kind', full_name='Criteo.Glup.DataSet.kind', index=7,
-      number=8, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='retention_days', full_name='Criteo.Glup.DataSet.retention_days', index=8,
-      number=9, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=110,
-  serialized_end=366,
-)
-
-
-_DATASETCHUNK = _descriptor.Descriptor(
-  name='DataSetChunk',
-  full_name='Criteo.Glup.DataSetChunk',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='partition', full_name='Criteo.Glup.DataSetChunk.partition', index=0,
-      number=1, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='format', full_name='Criteo.Glup.DataSetChunk.format', index=1,
-      number=2, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='datasetId', full_name='Criteo.Glup.DataSetChunk.datasetId', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=368,
-  serialized_end=488,
-)
-
-
-_DATASETFORMAT = _descriptor.Descriptor(
-  name='DataSetFormat',
-  full_name='Criteo.Glup.DataSetFormat',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='path', full_name='Criteo.Glup.DataSetFormat.path', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='file_format', full_name='Criteo.Glup.DataSetFormat.file_format', index=1,
-      number=2, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='partition_scheme', full_name='Criteo.Glup.DataSetFormat.partition_scheme', index=2,
-      number=3, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='start_partition', full_name='Criteo.Glup.DataSetFormat.start_partition', index=3,
-      number=4, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='end_partition', full_name='Criteo.Glup.DataSetFormat.end_partition', index=4,
-      number=5, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='retention_days', full_name='Criteo.Glup.DataSetFormat.retention_days', index=5,
-      number=7, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='priority', full_name='Criteo.Glup.DataSetFormat.priority', index=6,
-      number=8, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='label', full_name='Criteo.Glup.DataSetFormat.label', index=7,
-      number=9, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='monitoring_level', full_name='Criteo.Glup.DataSetFormat.monitoring_level', index=8,
-      number=10, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=491,
-  serialized_end=849,
-)
-
-
-_HDFSOPTIONS_IMPORTOPTIONS_VIEW_HIVEOPTIONS = _descriptor.Descriptor(
-  name='HiveOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.View.HiveOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='partitioning', full_name='Criteo.Glup.HDFSOptions.ImportOptions.View.HiveOptions.partitioning', index=0,
-      number=3, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1292,
-  serialized_end=1357,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_VIEW = _descriptor.Descriptor(
-  name='View',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.View',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='hive', full_name='Criteo.Glup.HDFSOptions.ImportOptions.View.hive', index=0,
-      number=10, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_HDFSOPTIONS_IMPORTOPTIONS_VIEW_HIVEOPTIONS, ],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1213,
-  serialized_end=1357,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DEDUPOPTIONS = _descriptor.Descriptor(
-  name='DedupOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.DedupOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='input_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.DedupOptions.input_dataset_id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='input_format_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.DedupOptions.input_format_label', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.DedupOptions.output_dataset_id', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_format_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.DedupOptions.output_format_label', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='use_hippo_cuttle_job', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.DedupOptions.use_hippo_cuttle_job', index=4,
-      number=5, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=2331,
-  serialized_end=2485,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KAFKA2HDFSOPTIONS = _descriptor.Descriptor(
-  name='Kafka2HdfsOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.Kafka2HdfsOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='topic', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.Kafka2HdfsOptions.topic', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='deduplicate', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.Kafka2HdfsOptions.deduplicate', index=1,
-      number=3, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.Kafka2HdfsOptions.output_dataset_id', index=2,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_format_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.Kafka2HdfsOptions.output_format_label', index=3,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=2487,
-  serialized_end=2604,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIG = _descriptor.Descriptor(
-  name='KacohaConfig',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfig',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='partitions_per_task', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfig.partitions_per_task', index=0,
-      number=1, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='poll_buffer_size_bytes', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfig.poll_buffer_size_bytes', index=1,
-      number=2, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=2606,
-  serialized_end=2681,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIGPERDC = _descriptor.Descriptor(
-  name='KacohaConfigPerDc',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfigPerDc',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='dc', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfigPerDc.dc', index=0,
-      number=1, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='config', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfigPerDc.config', index=1,
-      number=2, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=2684,
-  serialized_end=2819,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHAOPTIONS = _descriptor.Descriptor(
-  name='KaCoHaOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KaCoHaOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='topic', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KaCoHaOptions.topic', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KaCoHaOptions.output_dataset_id', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='deduplicate', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KaCoHaOptions.deduplicate', index=2,
-      number=3, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='config', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KaCoHaOptions.config', index=3,
-      number=4, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_format_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KaCoHaOptions.output_format_label', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='config_per_dc', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.KaCoHaOptions.config_per_dc', index=5,
-      number=6, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=2822,
-  serialized_end=3099,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DATALOADEROPTIONS = _descriptor.Descriptor(
-  name='DataloaderOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.DataloaderOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='platform', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.DataloaderOptions.platform', index=0,
-      number=1, type=14, cpp_type=8, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=3101,
-  serialized_end=3161,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SYNCOPTIONS = _descriptor.Descriptor(
-  name='SyncOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='from', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions.from', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='source_namespace', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions.source_namespace', index=1,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='platforms', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions.platforms', index=2,
-      number=6, type=14, cpp_type=8, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='is_backfilling', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions.is_backfilling', index=3,
-      number=8, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='to_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions.to_label', index=4,
-      number=9, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='to_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions.to_dataset_id', index=5,
-      number=10, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='with_backfilling', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions.with_backfilling', index=6,
-      number=11, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='is_scheduled_on_source', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions.is_scheduled_on_source', index=7,
-      number=12, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=3164,
-  serialized_end=3405,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_BACKUPOPTIONS = _descriptor.Descriptor(
-  name='BackupOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.BackupOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='from', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.BackupOptions.from', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='source_namespace', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.BackupOptions.source_namespace', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='platforms', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.BackupOptions.platforms', index=2,
-      number=3, type=14, cpp_type=8, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=3407,
-  serialized_end=3527,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_TRANSCODINGOPTIONS = _descriptor.Descriptor(
-  name='TranscodingOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='input_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions.input_dataset_id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions.output_dataset_id', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='input_format', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions.input_format', index=2,
-      number=3, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_format', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions.output_format', index=3,
-      number=4, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='input_dataset_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions.input_dataset_label', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_dataset_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions.output_dataset_label', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='is_by_platform', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions.is_by_platform', index=6,
-      number=7, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=3530,
-  serialized_end=3789,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SAMPLEROPTIONS = _descriptor.Descriptor(
-  name='SamplerOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SamplerOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='input_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SamplerOptions.input_dataset_id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='input_format_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SamplerOptions.input_format_label', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SamplerOptions.output_dataset_id', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='output_format_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SamplerOptions.output_format_label', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='sampling_rate', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.SamplerOptions.sampling_rate', index=4,
-      number=5, type=2, cpp_type=6, label=1,
-      has_default_value=False, default_value=float(0),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=3792,
-  serialized_end=3941,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_COMPARATOROPTIONS = _descriptor.Descriptor(
-  name='ComparatorOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.ComparatorOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='left_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.ComparatorOptions.left_dataset_id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='left_format_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.ComparatorOptions.left_format_label', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='right_dataset_id', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.ComparatorOptions.right_dataset_id', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='right_format_label', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.ComparatorOptions.right_format_label', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='hostname', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.ComparatorOptions.hostname', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='ignored_fields', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.ComparatorOptions.ignored_fields', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=3944,
-  serialized_end=4111,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_EXTERNALOPTIONS = _descriptor.Descriptor(
-  name='ExternalOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.ExternalOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=4113,
-  serialized_end=4130,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR = _descriptor.Descriptor(
-  name='Generator',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='dataloader', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.dataloader', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='kafka2hdfs', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.kafka2hdfs', index=1,
-      number=2, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='sync', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.sync', index=2,
-      number=3, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='external', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.external', index=3,
-      number=4, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='backup', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.backup', index=4,
-      number=5, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='transcoding', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.transcoding', index=5,
-      number=6, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='kacoha', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.kacoha', index=6,
-      number=7, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='deduplicate', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.deduplicate', index=7,
-      number=8, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='sampler', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.sampler', index=8,
-      number=9, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='comparator', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.comparator', index=9,
-      number=10, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='to', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.to', index=10,
-      number=250, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='namespace', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.namespace', index=11,
-      number=251, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='start_date', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.start_date', index=12,
-      number=253, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='stop_date', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.stop_date', index=13,
-      number=254, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='ignore_cn', full_name='Criteo.Glup.HDFSOptions.ImportOptions.Generator.ignore_cn', index=14,
-      number=255, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DEDUPOPTIONS, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KAFKA2HDFSOPTIONS, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIG, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIGPERDC, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHAOPTIONS, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DATALOADEROPTIONS, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SYNCOPTIONS, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_BACKUPOPTIONS, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_TRANSCODINGOPTIONS, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SAMPLEROPTIONS, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_COMPARATOROPTIONS, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_EXTERNALOPTIONS, ],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1360,
-  serialized_end=4130,
-)
-
-_HDFSOPTIONS_IMPORTOPTIONS = _descriptor.Descriptor(
-  name='ImportOptions',
-  full_name='Criteo.Glup.HDFSOptions.ImportOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='owner', full_name='Criteo.Glup.HDFSOptions.ImportOptions.owner', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='name', full_name='Criteo.Glup.HDFSOptions.ImportOptions.name', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='partitioning', full_name='Criteo.Glup.HDFSOptions.ImportOptions.partitioning', index=2,
-      number=4, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='format', full_name='Criteo.Glup.HDFSOptions.ImportOptions.format', index=3,
-      number=5, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='private', full_name='Criteo.Glup.HDFSOptions.ImportOptions.private', index=4,
-      number=6, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='generator', full_name='Criteo.Glup.HDFSOptions.ImportOptions.generator', index=5,
-      number=11, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='view', full_name='Criteo.Glup.HDFSOptions.ImportOptions.view', index=6,
-      number=12, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_HDFSOPTIONS_IMPORTOPTIONS_VIEW, _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR, ],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=924,
-  serialized_end=4130,
-)
-
-_HDFSOPTIONS = _descriptor.Descriptor(
-  name='HDFSOptions',
-  full_name='Criteo.Glup.HDFSOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='import', full_name='Criteo.Glup.HDFSOptions.import', index=0,
-      number=3, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_HDFSOPTIONS_IMPORTOPTIONS, ],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=852,
-  serialized_end=4130,
-)
-
-
-_PRODUCERTRANSPORTOPTIONS = _descriptor.Descriptor(
-  name='ProducerTransportOptions',
-  full_name='Criteo.Glup.ProducerTransportOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='syslog', full_name='Criteo.Glup.ProducerTransportOptions.syslog', index=0,
-      number=1, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='kafka', full_name='Criteo.Glup.ProducerTransportOptions.kafka', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=4132,
-  serialized_end=4189,
-)
-
-
-_PROPERTYOPTIONS = _descriptor.Descriptor(
-  name='PropertyOptions',
-  full_name='Criteo.Glup.PropertyOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='valuable', full_name='Criteo.Glup.PropertyOptions.valuable', index=0,
-      number=1, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='high_volume', full_name='Criteo.Glup.PropertyOptions.high_volume', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=4191,
-  serialized_end=4247,
-)
-
-
-_GLUPOPTIONS = _descriptor.Descriptor(
-  name='GlupOptions',
-  full_name='Criteo.Glup.GlupOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='kafka', full_name='Criteo.Glup.GlupOptions.kafka', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='hdfs', full_name='Criteo.Glup.GlupOptions.hdfs', index=1,
-      number=2, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='sampling_pct', full_name='Criteo.Glup.GlupOptions.sampling_pct', index=2,
-      number=3, type=13, cpp_type=3, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='preprod_sampling_pct', full_name='Criteo.Glup.GlupOptions.preprod_sampling_pct', index=3,
-      number=4, type=13, cpp_type=3, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='dataset', full_name='Criteo.Glup.GlupOptions.dataset', index=4,
-      number=5, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='message_sampling_pct', full_name='Criteo.Glup.GlupOptions.message_sampling_pct', index=5,
-      number=6, type=13, cpp_type=3, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='producers', full_name='Criteo.Glup.GlupOptions.producers', index=6,
-      number=7, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='properties', full_name='Criteo.Glup.GlupOptions.properties', index=7,
-      number=8, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=4250,
-  serialized_end=4581,
-)
-
-
-_GLUPFIELDOPTIONS = _descriptor.Descriptor(
-  name='GlupFieldOptions',
-  full_name='Criteo.Glup.GlupFieldOptions',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='sampled', full_name='Criteo.Glup.GlupFieldOptions.sampled', index=0,
-      number=1, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='sampling_key', full_name='Criteo.Glup.GlupFieldOptions.sampling_key', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='disabled_platform', full_name='Criteo.Glup.GlupFieldOptions.disabled_platform', index=2,
-      number=3, type=14, cpp_type=8, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='should_clean_pii', full_name='Criteo.Glup.GlupFieldOptions.should_clean_pii', index=3,
-      number=4, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='pending_deletion', full_name='Criteo.Glup.GlupFieldOptions.pending_deletion', index=4,
-      number=5, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='added_at', full_name='Criteo.Glup.GlupFieldOptions.added_at', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=4584,
-  serialized_end=4761,
-)
-
-
-_JSONMAPPING = _descriptor.Descriptor(
-  name='JsonMapping',
-  full_name='Criteo.Glup.JsonMapping',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='name', full_name='Criteo.Glup.JsonMapping.name', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='skip', full_name='Criteo.Glup.JsonMapping.skip', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=4763,
-  serialized_end=4804,
-)
-
-
-_JSONALIAS = _descriptor.Descriptor(
-  name='JsonAlias',
-  full_name='Criteo.Glup.JsonAlias',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='name', full_name='Criteo.Glup.JsonAlias.name', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='use_enum_field_id', full_name='Criteo.Glup.JsonAlias.use_enum_field_id', index=1,
-      number=3, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=4806,
-  serialized_end=4858,
-)
-
-
-_BASEGLUPMESSAGE_SETFIELDSENTRY = _descriptor.Descriptor(
-  name='SetFieldsEntry',
-  full_name='Criteo.Glup.BaseGlupMessage.SetFieldsEntry',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='key', full_name='Criteo.Glup.BaseGlupMessage.SetFieldsEntry.key', index=0,
-      number=1, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='value', full_name='Criteo.Glup.BaseGlupMessage.SetFieldsEntry.value', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=5116,
-  serialized_end=5164,
-)
-
-_BASEGLUPMESSAGE = _descriptor.Descriptor(
-  name='BaseGlupMessage',
-  full_name='Criteo.Glup.BaseGlupMessage',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='glup_origin', full_name='Criteo.Glup.BaseGlupMessage.glup_origin', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='partition', full_name='Criteo.Glup.BaseGlupMessage.partition', index=1,
-      number=2, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='set_fields', full_name='Criteo.Glup.BaseGlupMessage.set_fields', index=2,
-      number=50010, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='control_message', full_name='Criteo.Glup.BaseGlupMessage.control_message', index=3,
-      number=2097151, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222\265\030\014\n\n__metadata')), file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_BASEGLUPMESSAGE_SETFIELDSENTRY, ],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\210\265\030\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=4861,
-  serialized_end=5170,
-)
-
-
-_FORWARDEDWATERMARKMESSAGE = _descriptor.Descriptor(
-  name='ForwardedWatermarkMessage',
-  full_name='Criteo.Glup.ForwardedWatermarkMessage',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='original_kafka_offset', full_name='Criteo.Glup.ForwardedWatermarkMessage.original_kafka_offset', index=0,
-      number=5, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='timestamp', full_name='Criteo.Glup.ForwardedWatermarkMessage.timestamp', index=1,
-      number=6, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='consolidation_enabled', full_name='Criteo.Glup.ForwardedWatermarkMessage.consolidation_enabled', index=2,
-      number=7, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='dataset_id', full_name='Criteo.Glup.ForwardedWatermarkMessage.dataset_id', index=3,
-      number=10, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='dataset_format_label', full_name='Criteo.Glup.ForwardedWatermarkMessage.dataset_format_label', index=4,
-      number=11, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='control_message', full_name='Criteo.Glup.ForwardedWatermarkMessage.control_message', index=5,
-      number=2097151, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222\265\030\014\n\n__metadata')), file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=5173,
-  serialized_end=5415,
-)
-
-
-_LOCATION = _descriptor.Descriptor(
-  name='Location',
-  full_name='Criteo.Glup.Location',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='env', full_name='Criteo.Glup.Location.env', index=0,
-      number=1, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='dc', full_name='Criteo.Glup.Location.dc', index=1,
-      number=2, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='label', full_name='Criteo.Glup.Location.label', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='dataset_id', full_name='Criteo.Glup.Location.dataset_id', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=5417,
-  serialized_end=5538,
-)
-
-
-_ORIGIN = _descriptor.Descriptor(
-  name='Origin',
-  full_name='Criteo.Glup.Origin',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='datacenter', full_name='Criteo.Glup.Origin.datacenter', index=0,
-      number=1, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='ip4', full_name='Criteo.Glup.Origin.ip4', index=1,
-      number=2, type=7, cpp_type=3, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\212\265\030\t\n\007host_ip')), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='hostname', full_name='Criteo.Glup.Origin.hostname', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='container_task', full_name='Criteo.Glup.Origin.container_task', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\212\265\030\002\020\001')), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='container_app', full_name='Criteo.Glup.Origin.container_app', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\212\265\030\002\020\001')), file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=5541,
-  serialized_end=5703,
-)
-
-
-_CONTROLMESSAGE_WATERMARKORIGIN = _descriptor.Descriptor(
-  name='WatermarkOrigin',
-  full_name='Criteo.Glup.ControlMessage.WatermarkOrigin',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='kafka_topic', full_name='Criteo.Glup.ControlMessage.WatermarkOrigin.kafka_topic', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='datacenter', full_name='Criteo.Glup.ControlMessage.WatermarkOrigin.datacenter', index=1,
-      number=2, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='cluster', full_name='Criteo.Glup.ControlMessage.WatermarkOrigin.cluster', index=2,
-      number=3, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=5783,
-  serialized_end=5920,
-)
-
-_CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY = _descriptor.Descriptor(
-  name='SetFieldsEntry',
-  full_name='Criteo.Glup.ControlMessage.Watermark.SetFieldsEntry',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='key', full_name='Criteo.Glup.ControlMessage.Watermark.SetFieldsEntry.key', index=0,
-      number=1, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='value', full_name='Criteo.Glup.ControlMessage.Watermark.SetFieldsEntry.value', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=5116,
-  serialized_end=5164,
-)
-
-_CONTROLMESSAGE_WATERMARK = _descriptor.Descriptor(
-  name='Watermark',
-  full_name='Criteo.Glup.ControlMessage.Watermark',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='type', full_name='Criteo.Glup.ControlMessage.Watermark.type', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='hostname', full_name='Criteo.Glup.ControlMessage.Watermark.hostname', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='kafka_topic', full_name='Criteo.Glup.ControlMessage.Watermark.kafka_topic', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='partition', full_name='Criteo.Glup.ControlMessage.Watermark.partition', index=3,
-      number=4, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='partition_count', full_name='Criteo.Glup.ControlMessage.Watermark.partition_count', index=4,
-      number=5, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='process_uuid', full_name='Criteo.Glup.ControlMessage.Watermark.process_uuid', index=5,
-      number=6, type=12, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b(""),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='region', full_name='Criteo.Glup.ControlMessage.Watermark.region', index=6,
-      number=7, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='timestamp_seconds', full_name='Criteo.Glup.ControlMessage.Watermark.timestamp_seconds', index=7,
-      number=8, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222\265\030\013\n\ttimestamp')), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='cluster', full_name='Criteo.Glup.ControlMessage.Watermark.cluster', index=8,
-      number=9, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='environment', full_name='Criteo.Glup.ControlMessage.Watermark.environment', index=9,
-      number=10, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='set_fields', full_name='Criteo.Glup.ControlMessage.Watermark.set_fields', index=10,
-      number=50010, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY, ],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\210\265\030\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=5923,
-  serialized_end=6283,
-)
-
-_CONTROLMESSAGE = _descriptor.Descriptor(
-  name='ControlMessage',
-  full_name='Criteo.Glup.ControlMessage',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='watermark', full_name='Criteo.Glup.ControlMessage.watermark', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_CONTROLMESSAGE_WATERMARKORIGIN, _CONTROLMESSAGE_WATERMARK, ],
-  enum_types=[
-    _CONTROLMESSAGE_CLUSTER,
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=5706,
-  serialized_end=6355,
-)
-
-
-_PARTITION = _descriptor.Descriptor(
-  name='Partition',
-  full_name='Criteo.Glup.Partition',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='timestamp_seconds', full_name='Criteo.Glup.Partition.timestamp_seconds', index=0,
-      number=1, type=4, cpp_type=4, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\212\265\030\013\n\ttimestamp')), file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='host_platform', full_name='Criteo.Glup.Partition.host_platform', index=1,
-      number=2, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='event_type', full_name='Criteo.Glup.Partition.event_type', index=2,
-      number=3, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\212\265\030\002\020\001')), file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=6358,
-  serialized_end=6511,
-)
-
-
-_HDFSPARTITION = _descriptor.Descriptor(
-  name='HDFSPartition',
-  full_name='Criteo.Glup.HDFSPartition',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='timestamp_seconds', full_name='Criteo.Glup.HDFSPartition.timestamp_seconds', index=0,
-      number=1, type=4, cpp_type=4, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='host_platform', full_name='Criteo.Glup.HDFSPartition.host_platform', index=1,
-      number=2, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='event_type', full_name='Criteo.Glup.HDFSPartition.event_type', index=2,
-      number=3, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='depth', full_name='Criteo.Glup.HDFSPartition.depth', index=3,
-      number=4, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=6514,
-  serialized_end=6661,
-)
-
-
-_HASH128_SETFIELDSENTRY = _descriptor.Descriptor(
-  name='SetFieldsEntry',
-  full_name='Criteo.Glup.Hash128.SetFieldsEntry',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='key', full_name='Criteo.Glup.Hash128.SetFieldsEntry.key', index=0,
-      number=1, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='value', full_name='Criteo.Glup.Hash128.SetFieldsEntry.value', index=1,
-      number=2, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=5116,
-  serialized_end=5164,
-)
-
-_HASH128 = _descriptor.Descriptor(
-  name='Hash128',
-  full_name='Criteo.Glup.Hash128',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='most_sig_bits', full_name='Criteo.Glup.Hash128.most_sig_bits', index=0,
-      number=1, type=6, cpp_type=4, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='least_sig_bits', full_name='Criteo.Glup.Hash128.least_sig_bits', index=1,
-      number=2, type=6, cpp_type=4, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='set_fields', full_name='Criteo.Glup.Hash128.set_fields', index=2,
-      number=50010, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_HASH128_SETFIELDSENTRY, ],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=6664,
-  serialized_end=6829,
-)
-
-_DATASET.fields_by_name['format'].message_type = _DATASETFORMAT
-_DATASET.fields_by_name['partition_scheme'].enum_type = _PARTITIONSCHEME
-_DATASET.fields_by_name['kind'].enum_type = _DATASETKIND
-_DATASETCHUNK.fields_by_name['partition'].message_type = _PARTITION
-_DATASETCHUNK.fields_by_name['format'].message_type = _DATASETFORMAT
-_DATASETFORMAT.fields_by_name['file_format'].enum_type = _HDFSDATAFORMAT
-_DATASETFORMAT.fields_by_name['partition_scheme'].enum_type = _PARTITIONSCHEME
-_DATASETFORMAT.fields_by_name['start_partition'].message_type = _HDFSPARTITION
-_DATASETFORMAT.fields_by_name['end_partition'].message_type = _HDFSPARTITION
-_DATASETFORMAT.fields_by_name['monitoring_level'].enum_type = _MONITORINGLEVEL
-_HDFSOPTIONS_IMPORTOPTIONS_VIEW_HIVEOPTIONS.fields_by_name['partitioning'].enum_type = _PARTITIONSCHEME
-_HDFSOPTIONS_IMPORTOPTIONS_VIEW_HIVEOPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_VIEW
-_HDFSOPTIONS_IMPORTOPTIONS_VIEW.fields_by_name['hive'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_VIEW_HIVEOPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_VIEW.containing_type = _HDFSOPTIONS_IMPORTOPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DEDUPOPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KAFKA2HDFSOPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIG.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIGPERDC.fields_by_name['dc'].enum_type = _DATACENTER
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIGPERDC.fields_by_name['config'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIG
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIGPERDC.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHAOPTIONS.fields_by_name['config'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIG
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHAOPTIONS.fields_by_name['config_per_dc'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIGPERDC
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHAOPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DATALOADEROPTIONS.fields_by_name['platform'].enum_type = _PLATFORM
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DATALOADEROPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SYNCOPTIONS.fields_by_name['from'].message_type = _LOCATION
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SYNCOPTIONS.fields_by_name['platforms'].enum_type = _PLATFORM
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SYNCOPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_BACKUPOPTIONS.fields_by_name['from'].message_type = _LOCATION
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_BACKUPOPTIONS.fields_by_name['platforms'].enum_type = _PLATFORM
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_BACKUPOPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_TRANSCODINGOPTIONS.fields_by_name['input_format'].enum_type = _HDFSDATAFORMAT
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_TRANSCODINGOPTIONS.fields_by_name['output_format'].enum_type = _HDFSDATAFORMAT
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_TRANSCODINGOPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SAMPLEROPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_COMPARATOROPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_EXTERNALOPTIONS.containing_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['dataloader'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DATALOADEROPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['kafka2hdfs'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KAFKA2HDFSOPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['sync'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SYNCOPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['external'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_EXTERNALOPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['backup'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_BACKUPOPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['transcoding'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_TRANSCODINGOPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['kacoha'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHAOPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['deduplicate'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DEDUPOPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['sampler'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SAMPLEROPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['comparator'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_COMPARATOROPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.fields_by_name['to'].message_type = _LOCATION
-_HDFSOPTIONS_IMPORTOPTIONS_GENERATOR.containing_type = _HDFSOPTIONS_IMPORTOPTIONS
-_HDFSOPTIONS_IMPORTOPTIONS.fields_by_name['partitioning'].enum_type = _PARTITIONSCHEME
-_HDFSOPTIONS_IMPORTOPTIONS.fields_by_name['format'].enum_type = _HDFSDATAFORMAT
-_HDFSOPTIONS_IMPORTOPTIONS.fields_by_name['generator'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR
-_HDFSOPTIONS_IMPORTOPTIONS.fields_by_name['view'].message_type = _HDFSOPTIONS_IMPORTOPTIONS_VIEW
-_HDFSOPTIONS_IMPORTOPTIONS.containing_type = _HDFSOPTIONS
-_HDFSOPTIONS.fields_by_name['import'].message_type = _HDFSOPTIONS_IMPORTOPTIONS
-_GLUPOPTIONS.fields_by_name['kafka'].message_type = _KAFKAMESSAGEOPTIONS
-_GLUPOPTIONS.fields_by_name['hdfs'].message_type = _HDFSOPTIONS
-_GLUPOPTIONS.fields_by_name['dataset'].message_type = _DATASET
-_GLUPOPTIONS.fields_by_name['producers'].message_type = _PRODUCERTRANSPORTOPTIONS
-_GLUPOPTIONS.fields_by_name['properties'].message_type = _PROPERTYOPTIONS
-_GLUPFIELDOPTIONS.fields_by_name['disabled_platform'].enum_type = _PLATFORM
-_BASEGLUPMESSAGE_SETFIELDSENTRY.containing_type = _BASEGLUPMESSAGE
-_BASEGLUPMESSAGE.fields_by_name['glup_origin'].message_type = _ORIGIN
-_BASEGLUPMESSAGE.fields_by_name['partition'].message_type = _PARTITION
-_BASEGLUPMESSAGE.fields_by_name['set_fields'].message_type = _BASEGLUPMESSAGE_SETFIELDSENTRY
-_BASEGLUPMESSAGE.fields_by_name['control_message'].message_type = _CONTROLMESSAGE_WATERMARK
-_FORWARDEDWATERMARKMESSAGE.fields_by_name['control_message'].message_type = _CONTROLMESSAGE_WATERMARK
-_LOCATION.fields_by_name['env'].enum_type = _ENVIRONMENT
-_LOCATION.fields_by_name['dc'].enum_type = _DATACENTER
-_ORIGIN.fields_by_name['datacenter'].enum_type = _DATACENTER
-_CONTROLMESSAGE_WATERMARKORIGIN.fields_by_name['datacenter'].enum_type = _DATACENTER
-_CONTROLMESSAGE_WATERMARKORIGIN.fields_by_name['cluster'].enum_type = _CONTROLMESSAGE_CLUSTER
-_CONTROLMESSAGE_WATERMARKORIGIN.containing_type = _CONTROLMESSAGE
-_CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY.containing_type = _CONTROLMESSAGE_WATERMARK
-_CONTROLMESSAGE_WATERMARK.fields_by_name['set_fields'].message_type = _CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY
-_CONTROLMESSAGE_WATERMARK.containing_type = _CONTROLMESSAGE
-_CONTROLMESSAGE.fields_by_name['watermark'].message_type = _CONTROLMESSAGE_WATERMARK
-_CONTROLMESSAGE_CLUSTER.containing_type = _CONTROLMESSAGE
-_PARTITION.fields_by_name['host_platform'].enum_type = _PLATFORM
-_PARTITION.fields_by_name['event_type'].enum_type = _EVENTTYPE
-_HDFSPARTITION.fields_by_name['host_platform'].enum_type = _PLATFORM
-_HDFSPARTITION.fields_by_name['event_type'].enum_type = _EVENTTYPE
-_HASH128_SETFIELDSENTRY.containing_type = _HASH128
-_HASH128.fields_by_name['set_fields'].message_type = _HASH128_SETFIELDSENTRY
-DESCRIPTOR.message_types_by_name['KafkaMessageOptions'] = _KAFKAMESSAGEOPTIONS
-DESCRIPTOR.message_types_by_name['DataSet'] = _DATASET
-DESCRIPTOR.message_types_by_name['DataSetChunk'] = _DATASETCHUNK
-DESCRIPTOR.message_types_by_name['DataSetFormat'] = _DATASETFORMAT
-DESCRIPTOR.message_types_by_name['HDFSOptions'] = _HDFSOPTIONS
-DESCRIPTOR.message_types_by_name['ProducerTransportOptions'] = _PRODUCERTRANSPORTOPTIONS
-DESCRIPTOR.message_types_by_name['PropertyOptions'] = _PROPERTYOPTIONS
-DESCRIPTOR.message_types_by_name['GlupOptions'] = _GLUPOPTIONS
-DESCRIPTOR.message_types_by_name['GlupFieldOptions'] = _GLUPFIELDOPTIONS
-DESCRIPTOR.message_types_by_name['JsonMapping'] = _JSONMAPPING
-DESCRIPTOR.message_types_by_name['JsonAlias'] = _JSONALIAS
-DESCRIPTOR.message_types_by_name['BaseGlupMessage'] = _BASEGLUPMESSAGE
-DESCRIPTOR.message_types_by_name['ForwardedWatermarkMessage'] = _FORWARDEDWATERMARKMESSAGE
-DESCRIPTOR.message_types_by_name['Location'] = _LOCATION
-DESCRIPTOR.message_types_by_name['Origin'] = _ORIGIN
-DESCRIPTOR.message_types_by_name['ControlMessage'] = _CONTROLMESSAGE
-DESCRIPTOR.message_types_by_name['Partition'] = _PARTITION
-DESCRIPTOR.message_types_by_name['HDFSPartition'] = _HDFSPARTITION
-DESCRIPTOR.message_types_by_name['Hash128'] = _HASH128
-DESCRIPTOR.enum_types_by_name['PartitionScheme'] = _PARTITIONSCHEME
-DESCRIPTOR.enum_types_by_name['MessageFormat'] = _MESSAGEFORMAT
-DESCRIPTOR.enum_types_by_name['HDFSDataFormat'] = _HDFSDATAFORMAT
-DESCRIPTOR.enum_types_by_name['DataSetKind'] = _DATASETKIND
-DESCRIPTOR.enum_types_by_name['MonitoringLevel'] = _MONITORINGLEVEL
-DESCRIPTOR.enum_types_by_name['DataCenter'] = _DATACENTER
-DESCRIPTOR.enum_types_by_name['Environment'] = _ENVIRONMENT
-DESCRIPTOR.enum_types_by_name['Platform'] = _PLATFORM
-DESCRIPTOR.enum_types_by_name['EventType'] = _EVENTTYPE
-DESCRIPTOR.enum_types_by_name['YesNo'] = _YESNO
-DESCRIPTOR.extensions_by_name['glup'] = glup
-DESCRIPTOR.extensions_by_name['contains_nullable_fields'] = contains_nullable_fields
-DESCRIPTOR.extensions_by_name['glupfield'] = glupfield
-DESCRIPTOR.extensions_by_name['json_mapping'] = json_mapping
-DESCRIPTOR.extensions_by_name['json'] = json
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-KafkaMessageOptions = _reflection.GeneratedProtocolMessageType('KafkaMessageOptions', (_message.Message,), dict(
-  DESCRIPTOR = _KAFKAMESSAGEOPTIONS,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.KafkaMessageOptions)
-  ))
-_sym_db.RegisterMessage(KafkaMessageOptions)
-
-DataSet = _reflection.GeneratedProtocolMessageType('DataSet', (_message.Message,), dict(
-  DESCRIPTOR = _DATASET,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.DataSet)
-  ))
-_sym_db.RegisterMessage(DataSet)
-
-DataSetChunk = _reflection.GeneratedProtocolMessageType('DataSetChunk', (_message.Message,), dict(
-  DESCRIPTOR = _DATASETCHUNK,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.DataSetChunk)
-  ))
-_sym_db.RegisterMessage(DataSetChunk)
-
-DataSetFormat = _reflection.GeneratedProtocolMessageType('DataSetFormat', (_message.Message,), dict(
-  DESCRIPTOR = _DATASETFORMAT,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.DataSetFormat)
-  ))
-_sym_db.RegisterMessage(DataSetFormat)
-
-HDFSOptions = _reflection.GeneratedProtocolMessageType('HDFSOptions', (_message.Message,), dict(
-
-  ImportOptions = _reflection.GeneratedProtocolMessageType('ImportOptions', (_message.Message,), dict(
-
-    View = _reflection.GeneratedProtocolMessageType('View', (_message.Message,), dict(
-
-      HiveOptions = _reflection.GeneratedProtocolMessageType('HiveOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_VIEW_HIVEOPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.View.HiveOptions)
-        ))
-      ,
-      DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_VIEW,
-      __module__ = 'metadata_proto_pb2'
-      # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.View)
-      ))
-    ,
-
-    Generator = _reflection.GeneratedProtocolMessageType('Generator', (_message.Message,), dict(
-
-      DedupOptions = _reflection.GeneratedProtocolMessageType('DedupOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DEDUPOPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.DedupOptions)
-        ))
-      ,
-
-      Kafka2HdfsOptions = _reflection.GeneratedProtocolMessageType('Kafka2HdfsOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KAFKA2HDFSOPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.Kafka2HdfsOptions)
-        ))
-      ,
-
-      KacohaConfig = _reflection.GeneratedProtocolMessageType('KacohaConfig', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIG,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfig)
-        ))
-      ,
-
-      KacohaConfigPerDc = _reflection.GeneratedProtocolMessageType('KacohaConfigPerDc', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHACONFIGPERDC,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.KacohaConfigPerDc)
-        ))
-      ,
-
-      KaCoHaOptions = _reflection.GeneratedProtocolMessageType('KaCoHaOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_KACOHAOPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.KaCoHaOptions)
-        ))
-      ,
-
-      DataloaderOptions = _reflection.GeneratedProtocolMessageType('DataloaderOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_DATALOADEROPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.DataloaderOptions)
-        ))
-      ,
-
-      SyncOptions = _reflection.GeneratedProtocolMessageType('SyncOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SYNCOPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.SyncOptions)
-        ))
-      ,
-
-      BackupOptions = _reflection.GeneratedProtocolMessageType('BackupOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_BACKUPOPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.BackupOptions)
-        ))
-      ,
-
-      TranscodingOptions = _reflection.GeneratedProtocolMessageType('TranscodingOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_TRANSCODINGOPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.TranscodingOptions)
-        ))
-      ,
-
-      SamplerOptions = _reflection.GeneratedProtocolMessageType('SamplerOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_SAMPLEROPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.SamplerOptions)
-        ))
-      ,
-
-      ComparatorOptions = _reflection.GeneratedProtocolMessageType('ComparatorOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_COMPARATOROPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.ComparatorOptions)
-        ))
-      ,
-
-      ExternalOptions = _reflection.GeneratedProtocolMessageType('ExternalOptions', (_message.Message,), dict(
-        DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR_EXTERNALOPTIONS,
-        __module__ = 'metadata_proto_pb2'
-        # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator.ExternalOptions)
-        ))
-      ,
-      DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS_GENERATOR,
-      __module__ = 'metadata_proto_pb2'
-      # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions.Generator)
-      ))
-    ,
-    DESCRIPTOR = _HDFSOPTIONS_IMPORTOPTIONS,
-    __module__ = 'metadata_proto_pb2'
-    # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions.ImportOptions)
-    ))
-  ,
-  DESCRIPTOR = _HDFSOPTIONS,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSOptions)
-  ))
-_sym_db.RegisterMessage(HDFSOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.View)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.View.HiveOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.DedupOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.Kafka2HdfsOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.KacohaConfig)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.KacohaConfigPerDc)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.KaCoHaOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.DataloaderOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.SyncOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.BackupOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.TranscodingOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.SamplerOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.ComparatorOptions)
-_sym_db.RegisterMessage(HDFSOptions.ImportOptions.Generator.ExternalOptions)
-
-ProducerTransportOptions = _reflection.GeneratedProtocolMessageType('ProducerTransportOptions', (_message.Message,), dict(
-  DESCRIPTOR = _PRODUCERTRANSPORTOPTIONS,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.ProducerTransportOptions)
-  ))
-_sym_db.RegisterMessage(ProducerTransportOptions)
-
-PropertyOptions = _reflection.GeneratedProtocolMessageType('PropertyOptions', (_message.Message,), dict(
-  DESCRIPTOR = _PROPERTYOPTIONS,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.PropertyOptions)
-  ))
-_sym_db.RegisterMessage(PropertyOptions)
-
-GlupOptions = _reflection.GeneratedProtocolMessageType('GlupOptions', (_message.Message,), dict(
-  DESCRIPTOR = _GLUPOPTIONS,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.GlupOptions)
-  ))
-_sym_db.RegisterMessage(GlupOptions)
-
-GlupFieldOptions = _reflection.GeneratedProtocolMessageType('GlupFieldOptions', (_message.Message,), dict(
-  DESCRIPTOR = _GLUPFIELDOPTIONS,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.GlupFieldOptions)
-  ))
-_sym_db.RegisterMessage(GlupFieldOptions)
-
-JsonMapping = _reflection.GeneratedProtocolMessageType('JsonMapping', (_message.Message,), dict(
-  DESCRIPTOR = _JSONMAPPING,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.JsonMapping)
-  ))
-_sym_db.RegisterMessage(JsonMapping)
-
-JsonAlias = _reflection.GeneratedProtocolMessageType('JsonAlias', (_message.Message,), dict(
-  DESCRIPTOR = _JSONALIAS,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.JsonAlias)
-  ))
-_sym_db.RegisterMessage(JsonAlias)
-
-BaseGlupMessage = _reflection.GeneratedProtocolMessageType('BaseGlupMessage', (_message.Message,), dict(
-
-  SetFieldsEntry = _reflection.GeneratedProtocolMessageType('SetFieldsEntry', (_message.Message,), dict(
-    DESCRIPTOR = _BASEGLUPMESSAGE_SETFIELDSENTRY,
-    __module__ = 'metadata_proto_pb2'
-    # @@protoc_insertion_point(class_scope:Criteo.Glup.BaseGlupMessage.SetFieldsEntry)
-    ))
-  ,
-  DESCRIPTOR = _BASEGLUPMESSAGE,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.BaseGlupMessage)
-  ))
-_sym_db.RegisterMessage(BaseGlupMessage)
-_sym_db.RegisterMessage(BaseGlupMessage.SetFieldsEntry)
-
-ForwardedWatermarkMessage = _reflection.GeneratedProtocolMessageType('ForwardedWatermarkMessage', (_message.Message,), dict(
-  DESCRIPTOR = _FORWARDEDWATERMARKMESSAGE,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.ForwardedWatermarkMessage)
-  ))
-_sym_db.RegisterMessage(ForwardedWatermarkMessage)
-
-Location = _reflection.GeneratedProtocolMessageType('Location', (_message.Message,), dict(
-  DESCRIPTOR = _LOCATION,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.Location)
-  ))
-_sym_db.RegisterMessage(Location)
-
-Origin = _reflection.GeneratedProtocolMessageType('Origin', (_message.Message,), dict(
-  DESCRIPTOR = _ORIGIN,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.Origin)
-  ))
-_sym_db.RegisterMessage(Origin)
-
-ControlMessage = _reflection.GeneratedProtocolMessageType('ControlMessage', (_message.Message,), dict(
-
-  WatermarkOrigin = _reflection.GeneratedProtocolMessageType('WatermarkOrigin', (_message.Message,), dict(
-    DESCRIPTOR = _CONTROLMESSAGE_WATERMARKORIGIN,
-    __module__ = 'metadata_proto_pb2'
-    # @@protoc_insertion_point(class_scope:Criteo.Glup.ControlMessage.WatermarkOrigin)
-    ))
-  ,
-
-  Watermark = _reflection.GeneratedProtocolMessageType('Watermark', (_message.Message,), dict(
-
-    SetFieldsEntry = _reflection.GeneratedProtocolMessageType('SetFieldsEntry', (_message.Message,), dict(
-      DESCRIPTOR = _CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY,
-      __module__ = 'metadata_proto_pb2'
-      # @@protoc_insertion_point(class_scope:Criteo.Glup.ControlMessage.Watermark.SetFieldsEntry)
-      ))
-    ,
-    DESCRIPTOR = _CONTROLMESSAGE_WATERMARK,
-    __module__ = 'metadata_proto_pb2'
-    # @@protoc_insertion_point(class_scope:Criteo.Glup.ControlMessage.Watermark)
-    ))
-  ,
-  DESCRIPTOR = _CONTROLMESSAGE,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.ControlMessage)
-  ))
-_sym_db.RegisterMessage(ControlMessage)
-_sym_db.RegisterMessage(ControlMessage.WatermarkOrigin)
-_sym_db.RegisterMessage(ControlMessage.Watermark)
-_sym_db.RegisterMessage(ControlMessage.Watermark.SetFieldsEntry)
-
-Partition = _reflection.GeneratedProtocolMessageType('Partition', (_message.Message,), dict(
-  DESCRIPTOR = _PARTITION,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.Partition)
-  ))
-_sym_db.RegisterMessage(Partition)
-
-HDFSPartition = _reflection.GeneratedProtocolMessageType('HDFSPartition', (_message.Message,), dict(
-  DESCRIPTOR = _HDFSPARTITION,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.HDFSPartition)
-  ))
-_sym_db.RegisterMessage(HDFSPartition)
-
-Hash128 = _reflection.GeneratedProtocolMessageType('Hash128', (_message.Message,), dict(
-
-  SetFieldsEntry = _reflection.GeneratedProtocolMessageType('SetFieldsEntry', (_message.Message,), dict(
-    DESCRIPTOR = _HASH128_SETFIELDSENTRY,
-    __module__ = 'metadata_proto_pb2'
-    # @@protoc_insertion_point(class_scope:Criteo.Glup.Hash128.SetFieldsEntry)
-    ))
-  ,
-  DESCRIPTOR = _HASH128,
-  __module__ = 'metadata_proto_pb2'
-  # @@protoc_insertion_point(class_scope:Criteo.Glup.Hash128)
-  ))
-_sym_db.RegisterMessage(Hash128)
-_sym_db.RegisterMessage(Hash128.SetFieldsEntry)
-
-glup.message_type = _GLUPOPTIONS
-google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(glup)
-google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(contains_nullable_fields)
-glupfield.message_type = _GLUPFIELDOPTIONS
-google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(glupfield)
-json_mapping.message_type = _JSONMAPPING
-google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(json_mapping)
-json.message_type = _JSONALIAS
-google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(json)
-
-DESCRIPTOR.has_options = True
-DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\017com.criteo.glup'))
-_BASEGLUPMESSAGE_SETFIELDSENTRY.has_options = True
-_BASEGLUPMESSAGE_SETFIELDSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
-_BASEGLUPMESSAGE.fields_by_name['control_message'].has_options = True
-_BASEGLUPMESSAGE.fields_by_name['control_message']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222\265\030\014\n\n__metadata'))
-_BASEGLUPMESSAGE.has_options = True
-_BASEGLUPMESSAGE._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\210\265\030\001'))
-_FORWARDEDWATERMARKMESSAGE.fields_by_name['control_message'].has_options = True
-_FORWARDEDWATERMARKMESSAGE.fields_by_name['control_message']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222\265\030\014\n\n__metadata'))
-_ORIGIN.fields_by_name['ip4'].has_options = True
-_ORIGIN.fields_by_name['ip4']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\212\265\030\t\n\007host_ip'))
-_ORIGIN.fields_by_name['container_task'].has_options = True
-_ORIGIN.fields_by_name['container_task']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\212\265\030\002\020\001'))
-_ORIGIN.fields_by_name['container_app'].has_options = True
-_ORIGIN.fields_by_name['container_app']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\212\265\030\002\020\001'))
-_CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY.has_options = True
-_CONTROLMESSAGE_WATERMARK_SETFIELDSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
-_CONTROLMESSAGE_WATERMARK.fields_by_name['timestamp_seconds'].has_options = True
-_CONTROLMESSAGE_WATERMARK.fields_by_name['timestamp_seconds']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\222\265\030\013\n\ttimestamp'))
-_CONTROLMESSAGE_WATERMARK.has_options = True
-_CONTROLMESSAGE_WATERMARK._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('\210\265\030\001'))
-_PARTITION.fields_by_name['timestamp_seconds'].has_options = True
-_PARTITION.fields_by_name['timestamp_seconds']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\212\265\030\013\n\ttimestamp'))
-_PARTITION.fields_by_name['event_type'].has_options = True
-_PARTITION.fields_by_name['event_type']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\212\265\030\002\020\001'))
-_HASH128_SETFIELDSENTRY.has_options = True
-_HASH128_SETFIELDSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
-# @@protoc_insertion_point(module_scope)
diff --git a/tests/integration/schema_registry/test_api_client.py b/tests/integration/schema_registry/test_api_client.py
index a60f7c2..6b75e3b 100644
--- a/tests/integration/schema_registry/test_api_client.py
+++ b/tests/integration/schema_registry/test_api_client.py
@@ -21,6 +21,22 @@ import pytest
 
 from confluent_kafka.schema_registry import Schema
 from confluent_kafka.schema_registry.error import SchemaRegistryError
+from tests.integration.conftest import kafka_cluster_fixture
+
+
+@pytest.fixture(scope="module")
+def kafka_cluster_cp_7_0_1():
+    """
+    Returns a Trivup cluster with CP version 7.0.1.
+    SR version 7.0.1 is the last returning 500 instead of 422
+    for the invalid schema passed to test_api_get_register_schema_invalid
+    """
+    for fixture in kafka_cluster_fixture(
+        brokers_env="BROKERS_7_0_1",
+        sr_url_env="SR_URL_7_0_1",
+        trivup_cluster_conf={'cp_version': '7.0.1'}
+    ):
+        yield fixture
 
 
 def _subject_name(prefix):
@@ -49,6 +65,28 @@ def test_api_register_schema(kafka_cluster, load_file):
     assert schema.schema_str, registered_schema.schema.schema_str
 
 
+def test_api_register_normalized_schema(kafka_cluster, load_file):
+    """
+    Registers a schema, verifies the registration
+
+    Args:
+        kafka_cluster (KafkaClusterFixture): Kafka Cluster fixture
+        load_file (callable(str)): Schema fixture constructor
+
+    """
+    sr = kafka_cluster.schema_registry()
+    avsc = 'basic_schema.avsc'
+    subject = _subject_name(avsc)
+    schema = Schema(load_file(avsc), schema_type='AVRO')
+
+    schema_id = sr.register_schema(subject, schema, True)
+    registered_schema = sr.lookup_schema(subject, schema, True)
+
+    assert registered_schema.schema_id == schema_id
+    assert registered_schema.subject == subject
+    assert schema.schema_str, registered_schema.schema.schema_str
+
+
 def test_api_register_schema_incompatible(kafka_cluster, load_file):
     """
     Attempts to register an incompatible Schema verifies the error.
@@ -157,15 +195,28 @@ def test_api_get_registration_subject_not_found(kafka_cluster, load_file):
     assert e.value.error_code == 40401
 
 
-def test_api_get_register_schema_invalid(kafka_cluster, load_file):
+@pytest.mark.parametrize("kafka_cluster_name, http_status_code, error_code", [
+    ["kafka_cluster_cp_7_0_1", 500, 500],
+    ["kafka_cluster", 422, 42201],
+])
+def test_api_get_register_schema_invalid(
+        kafka_cluster_name,
+        http_status_code,
+        error_code,
+        load_file,
+        request):
     """
     Attempts to obtain registration information with an invalid schema
+    with different CP versions.
 
     Args:
-        kafka_cluster (KafkaClusterFixture): Kafka Cluster fixture
+        kafka_cluster_name (str): name of the Kafka Cluster fixture to use
+        http_status_code (int): HTTP status return code expected in this version
+        error_code (int): error code expected in this version
         load_file (callable(str)): Schema fixture constructor
-
+        request (FixtureRequest): PyTest object giving access to the test context
     """
+    kafka_cluster = request.getfixturevalue(kafka_cluster_name)
     sr = kafka_cluster.schema_registry()
     subject = _subject_name("registration_invalid_schema")
     schema = Schema(load_file('basic_schema.avsc'), schema_type='AVRO')
@@ -176,9 +227,9 @@ def test_api_get_register_schema_invalid(kafka_cluster, load_file):
 
     with pytest.raises(SchemaRegistryError, match="Invalid schema") as e:
         sr.lookup_schema(subject, schema2)
-    # Not as documented but the caused by is correct.
-    assert e.value.http_status_code == 500
-    assert e.value.error_code == 500
+
+    assert e.value.http_status_code == http_status_code
+    assert e.value.error_code == error_code
 
 
 def test_api_get_subjects(kafka_cluster, load_file):
diff --git a/tests/integration/schema_registry/test_avro_serializers.py b/tests/integration/schema_registry/test_avro_serializers.py
index 42a9167..882af40 100644
--- a/tests/integration/schema_registry/test_avro_serializers.py
+++ b/tests/integration/schema_registry/test_avro_serializers.py
@@ -15,7 +15,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-
 import pytest
 
 from confluent_kafka import TopicPartition
@@ -23,6 +22,7 @@ from confluent_kafka.serialization import (MessageField,
                                            SerializationContext)
 from confluent_kafka.schema_registry.avro import (AvroSerializer,
                                                   AvroDeserializer)
+from confluent_kafka.schema_registry import Schema, SchemaReference
 
 
 class User(object):
@@ -51,6 +51,145 @@ class User(object):
             self.favorite_color == other.favorite_color])
 
 
+class AwardProperties(object):
+    schema_str = """
+        {
+            "namespace": "confluent.io.examples.serialization.avro",
+            "name": "AwardProperties",
+            "type": "record",
+            "fields": [
+                {"name": "year", "type": "int"},
+                {"name": "points", "type": "int"}
+            ]
+        }
+    """
+
+    def __init__(self, points, year):
+        self.points = points
+        self.year = year
+
+    def __eq__(self, other):
+        return all([
+            self.points == other.points,
+            self.year == other.year
+        ])
+
+
+class Award(object):
+    schema_str = """
+        {
+            "namespace": "confluent.io.examples.serialization.avro",
+            "name": "Award",
+            "type": "record",
+            "fields": [
+                {"name": "name", "type": "string"},
+                {"name": "properties", "type": "AwardProperties"}
+            ]
+        }
+    """
+
+    def __init__(self, name, properties):
+        self.name = name
+        self.properties = properties
+
+    def __eq__(self, other):
+        return all([
+            self.name == other.name,
+            self.properties == other.properties
+        ])
+
+
+class AwardedUser(object):
+    schema_str = """
+        {
+            "namespace": "confluent.io.examples.serialization.avro",
+            "name": "AwardedUser",
+            "type": "record",
+            "fields": [
+                {"name": "award", "type": "Award"},
+                {"name": "user", "type": "User"}
+            ]
+        }
+    """
+
+    def __init__(self, award, user):
+        self.award = award
+        self.user = user
+
+    def __eq__(self, other):
+        return all([
+            self.award == other.award,
+            self.user == other.user
+        ])
+
+
+def _register_avro_schemas_and_build_awarded_user_schema(kafka_cluster):
+    sr = kafka_cluster.schema_registry()
+
+    user = User('Bowie', 47, 'purple')
+    award_properties = AwardProperties(10, 2023)
+    award = Award("Best In Show", award_properties)
+    awarded_user = AwardedUser(award, user)
+
+    user_schema_ref = SchemaReference("confluent.io.examples.serialization.avro.User", "user", 1)
+    award_properties_schema_ref = SchemaReference("confluent.io.examples.serialization.avro.AwardProperties",
+                                                  "award_properties", 1)
+    award_schema_ref = SchemaReference("confluent.io.examples.serialization.avro.Award", "award", 1)
+
+    sr.register_schema("user", Schema(User.schema_str, 'AVRO'))
+    sr.register_schema("award_properties", Schema(AwardProperties.schema_str, 'AVRO'))
+    sr.register_schema("award", Schema(Award.schema_str, 'AVRO', [award_properties_schema_ref]))
+
+    references = [user_schema_ref, award_schema_ref]
+    schema = Schema(AwardedUser.schema_str, 'AVRO', references)
+    return awarded_user, schema
+
+
+def _references_test_common(kafka_cluster, awarded_user, serializer_schema, deserializer_schema):
+    """
+    Common (both reader and writer) avro schema reference test.
+    Args:
+        kafka_cluster (KafkaClusterFixture): cluster fixture
+    """
+    topic = kafka_cluster.create_topic("reference-avro")
+    sr = kafka_cluster.schema_registry()
+
+    value_serializer = AvroSerializer(sr, serializer_schema,
+                                      lambda user, ctx:
+                                      dict(award=dict(name=user.award.name,
+                                                      properties=dict(year=user.award.properties.year,
+                                                                      points=user.award.properties.points)),
+                                           user=dict(name=user.user.name,
+                                                     favorite_number=user.user.favorite_number,
+                                                     favorite_color=user.user.favorite_color)))
+
+    value_deserializer = \
+        AvroDeserializer(sr, deserializer_schema,
+                         lambda user, ctx:
+                         AwardedUser(award=Award(name=user.get('award').get('name'),
+                                                 properties=AwardProperties(
+                                                     year=user.get('award').get('properties').get(
+                                                         'year'),
+                                                     points=user.get('award').get('properties').get(
+                                                         'points'))),
+                                     user=User(name=user.get('user').get('name'),
+                                               favorite_number=user.get('user').get('favorite_number'),
+                                               favorite_color=user.get('user').get('favorite_color'))))
+
+    producer = kafka_cluster.producer(value_serializer=value_serializer)
+
+    producer.produce(topic, value=awarded_user, partition=0)
+    producer.flush()
+
+    consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
+    consumer.assign([TopicPartition(topic, 0)])
+
+    msg = consumer.poll()
+    awarded_user2 = msg.value()
+
+    assert awarded_user2 == awarded_user
+
+
 @pytest.mark.parametrize("avsc, data, record_type",
                          [('basic_schema.avsc', {'name': 'abc'}, "record"),
                           ('primitive_string.avsc', u'Jämtland', "string"),
@@ -124,7 +263,7 @@ def test_delivery_report_serialization(kafka_cluster, load_file, avsc, data, rec
 
     def assert_cb(err, msg):
         actual = value_deserializer(msg.value(),
-                                    SerializationContext(topic, MessageField.VALUE))
+                                    SerializationContext(topic, MessageField.VALUE, msg.headers()))
 
         if record_type == "record":
             assert [v == actual[k] for k, v in data.items()]
@@ -185,3 +324,25 @@ def test_avro_record_serialization_custom(kafka_cluster):
     user2 = msg.value()
 
     assert user2 == user
+
+
+def test_avro_reference(kafka_cluster):
+    """
+    Tests Avro schema reference with both serializer and deserializer schemas provided.
+    Args:
+        kafka_cluster (KafkaClusterFixture): cluster fixture
+    """
+    awarded_user, schema = _register_avro_schemas_and_build_awarded_user_schema(kafka_cluster)
+
+    _references_test_common(kafka_cluster, awarded_user, schema, schema)
+
+
+def test_avro_reference_deserializer_none(kafka_cluster):
+    """
+    Tests Avro schema reference with serializer schema provided and deserializer schema set to None.
+    Args:
+        kafka_cluster (KafkaClusterFixture): cluster fixture
+    """
+    awarded_user, schema = _register_avro_schemas_and_build_awarded_user_schema(kafka_cluster)
+
+    _references_test_common(kafka_cluster, awarded_user, schema, None)
diff --git a/tests/integration/schema_registry/test_json_serializers.py b/tests/integration/schema_registry/test_json_serializers.py
index f28bd7a..3a60598 100644
--- a/tests/integration/schema_registry/test_json_serializers.py
+++ b/tests/integration/schema_registry/test_json_serializers.py
@@ -19,6 +19,7 @@ import pytest
 from confluent_kafka import TopicPartition
 
 from confluent_kafka.error import ConsumeError, ValueSerializationError
+from confluent_kafka.schema_registry import SchemaReference, Schema
 from confluent_kafka.schema_registry.json_schema import (JSONSerializer,
                                                          JSONDeserializer)
 
@@ -32,6 +33,64 @@ class _TestProduct(object):
         self.dimensions = dimensions
         self.location = location
 
+    def __eq__(self, other):
+        return all([
+            self.product_id == other.product_id,
+            self.name == other.name,
+            self.price == other.price,
+            self.tags == other.tags,
+            self.dimensions == other.dimensions,
+            self.location == other.location
+        ])
+
+
+class _TestCustomer(object):
+    def __init__(self, name, id):
+        self.name = name
+        self.id = id
+
+    def __eq__(self, other):
+        return all([
+            self.name == other.name,
+            self.id == other.id
+        ])
+
+
+class _TestOrderDetails(object):
+    def __init__(self, id, customer):
+        self.id = id
+        self.customer = customer
+
+    def __eq__(self, other):
+        return all([
+            self.id == other.id,
+            self.customer == other.customer
+        ])
+
+
+class _TestOrder(object):
+    def __init__(self, order_details, product):
+        self.order_details = order_details
+        self.product = product
+
+    def __eq__(self, other):
+        return all([
+            self.order_details == other.order_details,
+            self.product == other.product
+        ])
+
+
+class _TestReferencedProduct(object):
+    def __init__(self, name, product):
+        self.name = name
+        self.product = product
+
+    def __eq__(self, other):
+        return all([
+            self.name == other.name,
+            self.product == other.product
+        ])
+
 
 def _testProduct_to_dict(product_obj, ctx):
     """
@@ -55,6 +114,60 @@ def _testProduct_to_dict(product_obj, ctx):
             "warehouseLocation": product_obj.location}
 
 
+def _testCustomer_to_dict(customer_obj, ctx):
+    """
+    Returns testCustomer instance in dict format.
+
+    Args:
+        customer_obj (_TestCustomer): testCustomer instance.
+
+        ctx (SerializationContext): Metadata pertaining to the serialization
+                operation.
+
+    Returns:
+        dict: customer_obj as a dictionary.
+
+    """
+    return {"name": customer_obj.name,
+            "id": customer_obj.id}
+
+
+def _testOrderDetails_to_dict(orderdetails_obj, ctx):
+    """
+    Returns testOrderDetails instance in dict format.
+
+    Args:
+        orderdetails_obj (_TestOrderDetails): testOrderDetails instance.
+
+        ctx (SerializationContext): Metadata pertaining to the serialization
+                operation.
+
+    Returns:
+        dict: orderdetails_obj as a dictionary.
+
+    """
+    return {"id": orderdetails_obj.id,
+            "customer": _testCustomer_to_dict(orderdetails_obj.customer, ctx)}
+
+
+def _testOrder_to_dict(order_obj, ctx):
+    """
+    Returns testOrder instance in dict format.
+
+    Args:
+        order_obj (_TestOrder): testOrder instance.
+
+        ctx (SerializationContext): Metadata pertaining to the serialization
+                operation.
+
+    Returns:
+        dict: order_obj as a dictionary.
+
+    """
+    return {"order_details": _testOrderDetails_to_dict(order_obj.order_details, ctx),
+            "product": _testProduct_to_dict(order_obj.product, ctx)}
+
+
 def _testProduct_from_dict(product_dict, ctx):
     """
     Returns testProduct instance from its dict format.
@@ -77,6 +190,60 @@ def _testProduct_from_dict(product_dict, ctx):
                         product_dict['warehouseLocation'])
 
 
+def _testCustomer_from_dict(customer_dict, ctx):
+    """
+    Returns testCustomer instance from its dict format.
+
+    Args:
+        customer_dict (dict): testCustomer in dict format.
+
+        ctx (SerializationContext): Metadata pertaining to the serialization
+                operation.
+
+    Returns:
+        _TestCustomer: customer_obj instance.
+
+    """
+    return _TestCustomer(customer_dict['name'],
+                         customer_dict['id'])
+
+
+def _testOrderDetails_from_dict(orderdetails_dict, ctx):
+    """
+    Returns testOrderDetails instance from its dict format.
+
+    Args:
+        orderdetails_dict (dict): testOrderDetails in dict format.
+
+        ctx (SerializationContext): Metadata pertaining to the serialization
+                operation.
+
+    Returns:
+        _TestOrderDetails: orderdetails_obj instance.
+
+    """
+    return _TestOrderDetails(orderdetails_dict['id'],
+                             _testCustomer_from_dict(orderdetails_dict['customer'], ctx))
+
+
+def _testOrder_from_dict(order_dict, ctx):
+    """
+    Returns testOrder instance from its dict format.
+
+    Args:
+        order_dict (dict): testOrder in dict format.
+
+        ctx (SerializationContext): Metadata pertaining to the serialization
+                operation.
+
+    Returns:
+        _TestOrder: order_obj instance.
+
+    """
+    return _TestOrder(_testOrderDetails_from_dict(order_dict['order_details'], ctx),
+                      _testProduct_from_dict(order_dict['product'], ctx))
+
+
 def test_json_record_serialization(kafka_cluster, load_file):
     """
     Tests basic JsonSerializer and JsonDeserializer basic functionality.
@@ -253,3 +420,89 @@ def test_json_record_deserialization_mismatch(kafka_cluster, load_file):
             ConsumeError,
             match="'productId' is a required property"):
         consumer.poll()
+
+
+def _register_referenced_schemas(sr, load_file):
+    sr.register_schema("product", Schema(load_file("product.json"), 'JSON'))
+    sr.register_schema("customer", Schema(load_file("customer.json"), 'JSON'))
+    sr.register_schema("order_details", Schema(load_file("order_details.json"), 'JSON', [
+        SchemaReference("http://example.com/customer.schema.json", "customer", 1)]))
+
+    order_schema = Schema(load_file("order.json"), 'JSON',
+                          [SchemaReference("http://example.com/order_details.schema.json", "order_details", 1),
+                           SchemaReference("http://example.com/product.schema.json", "product", 1)])
+    return order_schema
+
+
+def test_json_reference(kafka_cluster, load_file):
+    topic = kafka_cluster.create_topic("serialization-json")
+    sr = kafka_cluster.schema_registry()
+
+    product = {"productId": 1,
+               "productName": "An ice sculpture",
+               "price": 12.50,
+               "tags": ["cold", "ice"],
+               "dimensions": {
+                   "length": 7.0,
+                   "width": 12.0,
+                   "height": 9.5
+               },
+               "warehouseLocation": {
+                   "latitude": -78.75,
+                   "longitude": 20.4
+               }}
+    customer = {"name": "John Doe", "id": 1}
+    order_details = {"id": 1, "customer": customer}
+    order = {"order_details": order_details, "product": product}
+
+    schema = _register_referenced_schemas(sr, load_file)
+
+    value_serializer = JSONSerializer(schema, sr)
+    value_deserializer = JSONDeserializer(schema, schema_registry_client=sr)
+
+    producer = kafka_cluster.producer(value_serializer=value_serializer)
+    producer.produce(topic, value=order, partition=0)
+    producer.flush()
+
+    consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
+    consumer.assign([TopicPartition(topic, 0)])
+
+    msg = consumer.poll()
+    actual = msg.value()
+
+    assert all([actual[k] == v for k, v in order.items()])
+
+
+def test_json_reference_custom(kafka_cluster, load_file):
+    topic = kafka_cluster.create_topic("serialization-json")
+    sr = kafka_cluster.schema_registry()
+
+    product = _TestProduct(product_id=1,
+                           name="The ice sculpture",
+                           price=12.50,
+                           tags=["cold", "ice"],
+                           dimensions={"length": 7.0,
+                                       "width": 12.0,
+                                       "height": 9.5},
+                           location={"latitude": -78.75,
+                                     "longitude": 20.4})
+    customer = _TestCustomer(name="John Doe", id=1)
+    order_details = _TestOrderDetails(id=1, customer=customer)
+    order = _TestOrder(order_details=order_details, product=product)
+
+    schema = _register_referenced_schemas(sr, load_file)
+
+    value_serializer = JSONSerializer(schema, sr, to_dict=_testOrder_to_dict)
+    value_deserializer = JSONDeserializer(schema, schema_registry_client=sr, from_dict=_testOrder_from_dict)
+
+    producer = kafka_cluster.producer(value_serializer=value_serializer)
+    producer.produce(topic, value=order, partition=0)
+    producer.flush()
+
+    consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
+    consumer.assign([TopicPartition(topic, 0)])
+
+    msg = consumer.poll()
+    actual = msg.value()
+
+    assert actual == order
diff --git a/tests/integration/schema_registry/test_proto_serializers.py b/tests/integration/schema_registry/test_proto_serializers.py
index 811dbd6..621beac 100644
--- a/tests/integration/schema_registry/test_proto_serializers.py
+++ b/tests/integration/schema_registry/test_proto_serializers.py
@@ -19,11 +19,10 @@ import pytest
 from confluent_kafka import TopicPartition, KafkaException, KafkaError
 from confluent_kafka.error import ConsumeError
 from confluent_kafka.schema_registry.protobuf import ProtobufSerializer, ProtobufDeserializer
-from .gen import metadata_proto_pb2
-from ..schema_registry.gen import NestedTestProto_pb2, TestProto_pb2, \
+from .data.proto import metadata_proto_pb2, NestedTestProto_pb2, TestProto_pb2, \
     PublicTestProto_pb2
-from tests.integration.schema_registry.gen.DependencyTestProto_pb2 import DependencyMessage
-from tests.integration.schema_registry.gen.exampleProtoCriteo_pb2 import ClickCas
+from tests.integration.schema_registry.data.proto.DependencyTestProto_pb2 import DependencyMessage
+from tests.integration.schema_registry.data.proto.exampleProtoCriteo_pb2 import ClickCas
 
 
 @pytest.mark.parametrize("pb2, data", [
@@ -39,7 +38,7 @@ from tests.integration.schema_registry.gen.exampleProtoCriteo_pb2 import ClickCa
                                        'test_float': 12.0}),
     (NestedTestProto_pb2.NestedMessage, {'user_id':
      NestedTestProto_pb2.UserId(
-            kafka_user_id='oneof_str'),
+         kafka_user_id='oneof_str'),
         'is_active': True,
         'experiments_active': ['x', 'y', '1'],
         'status': NestedTestProto_pb2.INACTIVE,
@@ -56,8 +55,8 @@ def test_protobuf_message_serialization(kafka_cluster, pb2, data):
     topic = kafka_cluster.create_topic("serialization-proto")
     sr = kafka_cluster.schema_registry()
 
-    value_serializer = ProtobufSerializer(pb2, sr)
-    value_deserializer = ProtobufDeserializer(pb2)
+    value_serializer = ProtobufSerializer(pb2, sr, {'use.deprecated.format': False})
+    value_deserializer = ProtobufDeserializer(pb2, {'use.deprecated.format': False})
 
     producer = kafka_cluster.producer(value_serializer=value_serializer)
     consumer = kafka_cluster.consumer(value_deserializer=value_deserializer)
@@ -87,7 +86,7 @@ def test_protobuf_reference_registration(kafka_cluster, pb2, expected_refs):
     """
     sr = kafka_cluster.schema_registry()
     topic = kafka_cluster.create_topic("serialization-proto-refs")
-    serializer = ProtobufSerializer(pb2, sr)
+    serializer = ProtobufSerializer(pb2, sr, {'use.deprecated.format': False})
     producer = kafka_cluster.producer(key_serializer=serializer)
 
     producer.produce(topic, key=pb2(), partition=0)
@@ -108,14 +107,14 @@ def test_protobuf_serializer_type_mismatch(kafka_cluster):
 
     sr = kafka_cluster.schema_registry()
     topic = kafka_cluster.create_topic("serialization-proto-refs")
-    serializer = ProtobufSerializer(pb2_1, sr)
+    serializer = ProtobufSerializer(pb2_1, sr, {'use.deprecated.format': False})
 
     producer = kafka_cluster.producer(key_serializer=serializer)
 
     with pytest.raises(KafkaException,
                        match=r"message must be of type <class"
-                             r" 'TestProto_pb2.TestMessage'\> not \<class"
-                             r" 'NestedTestProto_pb2.NestedMessage'\>"):
+                             r" 'tests.integration.schema_registry.data.proto.TestProto_pb2.TestMessage'\> not \<class"
+                             r" 'tests.integration.schema_registry.data.proto.NestedTestProto_pb2.NestedMessage'\>"):
         producer.produce(topic, key=pb2_2())
 
 
@@ -129,8 +128,8 @@ def test_protobuf_deserializer_type_mismatch(kafka_cluster):
 
     sr = kafka_cluster.schema_registry()
     topic = kafka_cluster.create_topic("serialization-proto-refs")
-    serializer = ProtobufSerializer(pb2_1, sr)
-    deserializer = ProtobufDeserializer(pb2_2)
+    serializer = ProtobufSerializer(pb2_1, sr, {'use.deprecated.format': False})
+    deserializer = ProtobufDeserializer(pb2_2, {'use.deprecated.format': False})
 
     producer = kafka_cluster.producer(key_serializer=serializer)
     consumer = kafka_cluster.consumer(key_deserializer=deserializer)
diff --git a/tests/integration/testconf.json b/tests/integration/testconf.json
index 1bc02bc..15b9ca3 100644
--- a/tests/integration/testconf.json
+++ b/tests/integration/testconf.json
@@ -3,15 +3,22 @@
     "bootstrap.servers": "$MY_BOOTSTRAP_SERVER_ENV",
     "schema.registry.url": "$MY_SCHEMA_REGISTRY_URL_ENV",
     "avro-https": {
-            "schema.registry.url": "$MY_SCHEMA_REGISTRY_SSL_URL_ENV",
-            "schema.registry.ssl.ca.location": "$MY_SCHEMA_REGISTRY_SSL_CA_LOCATION_ENV",
-            "schema.registry.ssl.certificate.location": "$MY_SCHEMA_REGISTRY_SSL_CERTIFICATE_LOCATION_ENV",
-            "schema.registry.ssl.key.location": "$MY_SCHEMA_REGISTRY_SSL_KEY_LOCATION_ENV"
+        "schema.registry.url": "$MY_SCHEMA_REGISTRY_SSL_URL_ENV",
+        "schema.registry.ssl.ca.location": "$MY_SCHEMA_REGISTRY_SSL_CA_LOCATION_ENV",
+        "schema.registry.ssl.certificate.location": "$MY_SCHEMA_REGISTRY_SSL_CERTIFICATE_LOCATION_ENV",
+        "schema.registry.ssl.key.location": "$MY_SCHEMA_REGISTRY_SSL_KEY_LOCATION_ENV"
     },
     "avro-basic-auth": {
         "schema.registry.url": "http://localhost:8083",
         "schema.registry.basic.auth.user.info": "ckp_tester:test_secret",
         "sasl.username": "ckp_tester",
         "sasl.password": "test_secret"
+    },
+    "avro-https-key-with-password": {
+        "schema.registry.url": "$MY_SCHEMA_REGISTRY_SSL_URL_ENV",
+        "schema.registry.ssl.ca.location": "$MY_SCHEMA_REGISTRY_SSL_CA_LOCATION_ENV",
+        "schema.registry.ssl.certificate.location": "$MY_SCHEMA_REGISTRY_SSL_CERTIFICATE_LOCATION_ENV",
+        "schema.registry.ssl.key.location": "$MY_SCHEMA_REGISTRY_SSL_KEY_WITH_PASSWORD_LOCATION_ENV",
+        "schema.registry.ssl.key.password": "$MY_SCHEMA_REGISTRY_SSL_KEY_PASSWORD"
     }
 }
diff --git a/tests/requirements.txt b/tests/requirements.txt
index a55e300..120daf4 100644
--- a/tests/requirements.txt
+++ b/tests/requirements.txt
@@ -5,6 +5,6 @@ pytest-timeout
 requests-mock
 trivup>=0.8.3
 fastavro
-avro
+avro>=1.11.1,<2
 jsonschema
 protobuf
diff --git a/tests/run.sh b/tests/run.sh
index 80ab167..35a9ca3 100755
--- a/tests/run.sh
+++ b/tests/run.sh
@@ -11,7 +11,7 @@ cleanup() {
 
 trap cleanup 0 2 3 6 15
 
-source ${DOCKER_BIN}/../.env
+source ${DOCKER_BIN}/../.env.sh
 
 if [[ ${1:-} == "help" ]]; then
     python ${TEST_SOURCE}/integration/integration_test.py --help
diff --git a/tests/schema_registry/conftest.py b/tests/schema_registry/conftest.py
index b6f5eae..047dca0 100644
--- a/tests/schema_registry/conftest.py
+++ b/tests/schema_registry/conftest.py
@@ -297,12 +297,13 @@ class MockSchemaRegistryClient(SchemaRegistryClient):
         path_match = re.match(self.subject_versions, request.path)
         subject = path_match.group(1)
         version = path_match.group(2)
+        version_num = -1 if version == 'latest' else int(version)
 
-        if int(version) == 404:
+        if version_num == 404:
             context.status_code = 404
             return {'error_code': 40402,
                     'message': "Version not found"}
-        if int(version) == 422:
+        if version_num == 422:
             context.status_code = 422
             return {'error_code': 42202,
                     'message': "Invalid version"}
@@ -313,7 +314,7 @@ class MockSchemaRegistryClient(SchemaRegistryClient):
         context.status_code = 200
         return {'subject': subject,
                 'id': self.SCHEMA_ID,
-                'version': int(version),
+                'version': version_num,
                 'schema': self._load_avsc(self.SCHEMA)}
 
     def delete_subject_version_callback(self, request, context):
@@ -322,13 +323,14 @@ class MockSchemaRegistryClient(SchemaRegistryClient):
         path_match = re.match(self.subject_versions, request.path)
         subject = path_match.group(1)
         version = path_match.group(2)
+        version_num = -1 if version == 'latest' else int(version)
 
-        if int(version) == 404:
+        if version_num == 404:
             context.status_code = 404
             return {"error_code": 40402,
                     "message": "Version not found"}
 
-        if int(version) == 422:
+        if version_num == 422:
             context.status_code = 422
             return {"error_code": 42202,
                     "message": "Invalid version"}
@@ -339,7 +341,7 @@ class MockSchemaRegistryClient(SchemaRegistryClient):
                     "message": "Subject not found"}
 
         context.status_code = 200
-        return int(version)
+        return version_num
 
     def post_subject_version_callback(self, request, context):
         self.counter['POST'][request.path] += 1
diff --git a/tests/schema_registry/test_avro_serializer.py b/tests/schema_registry/test_avro_serializer.py
index 080ae48..7dca001 100644
--- a/tests/schema_registry/test_avro_serializer.py
+++ b/tests/schema_registry/test_avro_serializer.py
@@ -20,7 +20,7 @@ import pytest
 from confluent_kafka.schema_registry import (record_subject_name_strategy,
                                              SchemaRegistryClient,
                                              topic_record_subject_name_strategy)
-from confluent_kafka.schema_registry.avro import AvroSerializer
+from confluent_kafka.schema_registry.avro import AvroSerializer, AvroDeserializer
 from confluent_kafka.serialization import (MessageField,
                                            SerializationContext)
 
@@ -34,7 +34,7 @@ def test_avro_serializer_config_auto_register_schemas():
     """
     conf = {'url': TEST_URL}
     test_client = SchemaRegistryClient(conf)
-    test_serializer = AvroSerializer(test_client, 'string',
+    test_serializer = AvroSerializer(test_client, '"string"',
                                      conf={'auto.register.schemas': False})
     assert not test_serializer._auto_register
 
@@ -60,7 +60,7 @@ def test_avro_serializer_config_auto_register_schemas_false(mock_schema_registry
     topic = "test-auto-register"
     subject = topic + '-key'
 
-    test_serializer = AvroSerializer(test_client, 'string',
+    test_serializer = AvroSerializer(test_client, '"string"',
                                      conf={'auto.register.schemas': False})
 
     test_serializer("test",
@@ -74,6 +74,29 @@ def test_avro_serializer_config_auto_register_schemas_false(mock_schema_registry
     assert test_client.counter['POST'].get('/subjects/{}'.format(subject)) == 1
 
 
+def test_avro_serializer_config_use_latest_version(mock_schema_registry):
+    """
+    Ensures auto.register.schemas=False does not register schema
+    """
+    conf = {'url': TEST_URL}
+    test_client = mock_schema_registry(conf)
+    topic = "test-use-latest-version"
+    subject = topic + '-key'
+
+    test_serializer = AvroSerializer(test_client, '"string"',
+                                     conf={'auto.register.schemas': False, 'use.latest.version': True})
+
+    test_serializer("test",
+                    SerializationContext("test-use-latest-version",
+                                         MessageField.KEY))
+
+    register_count = test_client.counter['POST'].get('/subjects/{}/versions'
+                                                     .format(subject), 0)
+    assert register_count == 0
+    # Ensure latest was requested
+    assert test_client.counter['GET'].get('/subjects/{}/versions/latest'.format(subject)) == 1
+
+
 def test_avro_serializer_config_subject_name_strategy():
     """
     Ensures subject.name.strategy is applied
@@ -81,9 +104,9 @@ def test_avro_serializer_config_subject_name_strategy():
 
     conf = {'url': TEST_URL}
     test_client = SchemaRegistryClient(conf)
-    test_serializer = AvroSerializer(test_client, 'int',
+    test_serializer = AvroSerializer(test_client, '"int"',
                                      conf={'subject.name.strategy':
-                                           record_subject_name_strategy})
+                                               record_subject_name_strategy})
 
     assert test_serializer._subject_name_func is record_subject_name_strategy
 
@@ -96,7 +119,7 @@ def test_avro_serializer_config_subject_name_strategy_invalid():
     conf = {'url': TEST_URL}
     test_client = SchemaRegistryClient(conf)
     with pytest.raises(ValueError, match="must be callable"):
-        AvroSerializer(test_client, 'int',
+        AvroSerializer(test_client, '"int"',
                        conf={'subject.name.strategy': dict()})
 
 
@@ -109,11 +132,13 @@ def test_avro_serializer_record_subject_name_strategy(load_avsc):
     test_serializer = AvroSerializer(test_client,
                                      load_avsc('basic_schema.avsc'),
                                      conf={'subject.name.strategy':
-                                           record_subject_name_strategy})
+                                               record_subject_name_strategy})
 
-    ctx = SerializationContext('test_subj', MessageField.VALUE)
+    ctx = SerializationContext('test_subj', MessageField.VALUE, [])
     assert test_serializer._subject_name_func(ctx,
                                               test_serializer._schema_name) == 'python.test.basic'
+    assert ctx is not None
+    assert not ctx.headers
 
 
 def test_avro_serializer_record_subject_name_strategy_primitive(load_avsc):
@@ -123,13 +148,14 @@ def test_avro_serializer_record_subject_name_strategy_primitive(load_avsc):
     """
     conf = {'url': TEST_URL}
     test_client = SchemaRegistryClient(conf)
-    test_serializer = AvroSerializer(test_client, 'int',
+    test_serializer = AvroSerializer(test_client, '"int"',
                                      conf={'subject.name.strategy':
-                                           record_subject_name_strategy})
+                                               record_subject_name_strategy})
 
-    ctx = SerializationContext('test_subj', MessageField.VALUE)
+    ctx = SerializationContext('test_subj', MessageField.VALUE, [('header1', 'header value 1'), ])
     assert test_serializer._subject_name_func(ctx,
                                               test_serializer._schema_name) == 'int'
+    assert ('header1', 'header value 1') in ctx.headers
 
 
 def test_avro_serializer_topic_record_subject_name_strategy(load_avsc):
@@ -141,7 +167,7 @@ def test_avro_serializer_topic_record_subject_name_strategy(load_avsc):
     test_serializer = AvroSerializer(test_client,
                                      load_avsc('basic_schema.avsc'),
                                      conf={'subject.name.strategy':
-                                           topic_record_subject_name_strategy})
+                                               topic_record_subject_name_strategy})
 
     ctx = SerializationContext('test_subj', MessageField.VALUE)
     assert test_serializer._subject_name_func(
@@ -155,13 +181,15 @@ def test_avro_serializer_topic_record_subject_name_strategy_primitive(load_avsc)
     """
     conf = {'url': TEST_URL}
     test_client = SchemaRegistryClient(conf)
-    test_serializer = AvroSerializer(test_client, 'int',
+    test_serializer = AvroSerializer(test_client, '"int"',
                                      conf={'subject.name.strategy':
-                                           topic_record_subject_name_strategy})
+                                               topic_record_subject_name_strategy})
 
     ctx = SerializationContext('test_subj', MessageField.VALUE)
     assert test_serializer._subject_name_func(
         ctx, test_serializer._schema_name) == 'test_subj-int'
+    assert ctx is not None
+    assert ctx.headers is None
 
 
 def test_avro_serializer_subject_name_strategy_default(load_avsc):
@@ -176,3 +204,40 @@ def test_avro_serializer_subject_name_strategy_default(load_avsc):
     ctx = SerializationContext('test_subj', MessageField.VALUE)
     assert test_serializer._subject_name_func(
         ctx, test_serializer._schema_name) == 'test_subj-value'
+
+
+def test_avro_serializer_schema_loads_union(load_avsc):
+    """
+    Ensures union types are correctly parsed
+    """
+    conf = {'url': TEST_URL}
+    test_client = SchemaRegistryClient(conf)
+    test_serializer = AvroSerializer(test_client,
+                                     load_avsc('union_schema.avsc'))
+
+    assert test_serializer._schema_name is None
+
+    schema = test_serializer._parsed_schema
+    assert isinstance(schema, list)
+    assert schema[0]["name"] == "RecordOne"
+    assert schema[1]["name"] == "RecordTwo"
+
+
+def test_avro_serializer_invalid_schema_type():
+    """
+    Ensures invalid schema types are rejected
+    """
+    conf = {'url': TEST_URL}
+    test_client = SchemaRegistryClient(conf)
+    with pytest.raises(TypeError, match="You must pass either schema string or schema object"):
+        AvroSerializer(test_client, 1)
+
+
+def test_avro_deserializer_invalid_schema_type():
+    """
+    Ensures invalid schema types are rejected
+    """
+    conf = {'url': TEST_URL}
+    test_client = SchemaRegistryClient(conf)
+    with pytest.raises(TypeError, match="You must pass either schema string or schema object"):
+        AvroDeserializer(test_client, 1)
diff --git a/tests/schema_registry/test_json.py b/tests/schema_registry/test_json.py
new file mode 100644
index 0000000..68ee65b
--- /dev/null
+++ b/tests/schema_registry/test_json.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2023 Confluent Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import pytest
+
+from confluent_kafka.schema_registry import SchemaReference, Schema
+from confluent_kafka.schema_registry.json_schema import JSONDeserializer, JSONSerializer
+
+
+def test_json_deserializer_referenced_schema_no_schema_registry_client(load_avsc):
+    """
+    Ensures that the deserializer raises a ValueError if a referenced schema is provided but no schema registry
+    client is provided.
+    """
+    schema = Schema(load_avsc("order_details.json"), 'JSON',
+                    [SchemaReference("http://example.com/customer.schema.json", "customer", 1)])
+    with pytest.raises(
+            ValueError,
+            match="""schema_registry_client must be provided if "schema_str" is a Schema instance with references"""):
+        deserializer = JSONDeserializer(schema, schema_registry_client=None)
+
+
+def test_json_deserializer_invalid_schema_type():
+    """
+    Ensures that the deserializer raises a ValueError if an invalid schema type is provided.
+    """
+    with pytest.raises(TypeError, match="You must pass either str or Schema"):
+        deserializer = JSONDeserializer(1)
+
+
+def test_json_serializer_invalid_schema_type():
+    """
+    Ensures that the serializer raises a ValueError if an invalid schema type is provided.
+    """
+    with pytest.raises(TypeError, match="You must pass either str or Schema"):
+        deserializer = JSONSerializer(1, schema_registry_client=None)
diff --git a/tests/schema_registry/test_proto.py b/tests/schema_registry/test_proto.py
index a455f42..c47692b 100644
--- a/tests/schema_registry/test_proto.py
+++ b/tests/schema_registry/test_proto.py
@@ -22,9 +22,9 @@ import pytest
 
 from confluent_kafka.schema_registry.protobuf import (ProtobufSerializer,
                                                       ProtobufDeserializer,
-                                                      _create_msg_index)
-from tests.integration.schema_registry.gen import (DependencyTestProto_pb2,
-                                                   metadata_proto_pb2)
+                                                      _create_index_array)
+from tests.integration.schema_registry.data.proto import (DependencyTestProto_pb2,
+                                                          metadata_proto_pb2)
 
 
 @pytest.mark.parametrize("pb2, coordinates", [
@@ -34,13 +34,9 @@ from tests.integration.schema_registry.gen import (DependencyTestProto_pb2,
      [4, 0, 1, 2])  # [HdfsOptions, ImportOptions, Generator, KacohaConfig ]
 ])
 def test_create_index(pb2, coordinates):
-    msg_idx = _create_msg_index(pb2.DESCRIPTOR)
+    msg_idx = _create_index_array(pb2.DESCRIPTOR)
 
-    if coordinates == [0]:
-        assert msg_idx == coordinates
-    else:
-        assert msg_idx[0] == len(coordinates)
-        assert msg_idx[1:] == coordinates
+    assert msg_idx == coordinates
 
 
 @pytest.mark.parametrize("pb2", [
@@ -48,35 +44,42 @@ def test_create_index(pb2, coordinates):
     metadata_proto_pb2.ControlMessage.Watermark,
     metadata_proto_pb2.HDFSOptions.ImportOptions.Generator.KacohaConfig
 ])
-def test_index_serialization(pb2):
-    msg_idx = _create_msg_index(pb2.DESCRIPTOR)
+@pytest.mark.parametrize("zigzag", [True, False])
+def test_index_serialization(pb2, zigzag):
+    msg_idx = _create_index_array(pb2.DESCRIPTOR)
     buf = BytesIO()
-    ProtobufSerializer._encode_uvarints(buf, msg_idx)
+    ProtobufSerializer._encode_varints(buf, msg_idx, zigzag=zigzag)
     buf.flush()
 
     # reset buffer cursor
     buf.seek(0)
-    decoded_msg_idx = ProtobufDeserializer._decode_index(buf)
+    decoded_msg_idx = ProtobufDeserializer._read_index_array(buf, zigzag=zigzag)
     buf.close()
 
     assert decoded_msg_idx == msg_idx
 
 
-@pytest.mark.parametrize("msg_idx, expected_hex", [
-    ([1, 0], b'00'),   # b2a_hex always returns hex pairs
-    ([1, 1], b'01'),
-    ([1, 127], b'7f'),
-    ([1, 128], b'8001'),
-    ([1, 9223372036854775807], b'ffffffffffffffff7f')
+@pytest.mark.parametrize("msg_idx, zigzag, expected_hex", [
+    # b2a_hex returns hex pairs
+    ([0], True, b'00'),   # special case [0]
+    ([0], False, b'00'),  # special case [0]
+    ([1], True, b'0202'),
+    ([1], False, b'0101'),
+    ([127, 8, 9], True, b'06fe011012'),
+    ([127, 8, 9], False, b'037f0809'),
+    ([128], True, b'028002'),
+    ([128], False, b'018001'),
+    ([9223372036854775807], True, b'02feffffffffffffffff01'),
+    ([9223372036854775807], False, b'01ffffffffffffffff7f')
 ])
-def test_index_encoder(msg_idx, expected_hex):
+def test_index_encoder(msg_idx, zigzag, expected_hex):
     buf = BytesIO()
-    ProtobufSerializer._encode_uvarints(buf, msg_idx)
+    ProtobufSerializer._encode_varints(buf, msg_idx, zigzag=zigzag)
     buf.flush()
-    # ignore array length prefix
-    buf.seek(1)
+    buf.seek(0)
     assert binascii.b2a_hex(buf.read()) == expected_hex
 
     # reset reader and test decoder
     buf.seek(0)
-    assert msg_idx == ProtobufDeserializer._decode_index(buf)
+    decoded_msg_idx = ProtobufDeserializer._read_index_array(buf, zigzag=zigzag)
+    assert decoded_msg_idx == msg_idx
diff --git a/tests/soak/README.md b/tests/soak/README.md
index 9eae249..a26a2fc 100644
--- a/tests/soak/README.md
+++ b/tests/soak/README.md
@@ -10,4 +10,22 @@ DataDog reporting supported by setting datadog.api_key a and datadog.app_key
 in the soak client configuration file.
 
 
-Use ubuntu-bootstrap.sh in this directory set up the environment (e.g., on ec2).
+There are some convenience script to get you started.
+
+On the host (ec2) where you aim to run the soaktest, do:
+
+$ git clone https://github.com/confluentinc/librdkafka
+$ git clone https://github.com/confluentinc/confluent-kafka-python
+
+# Build librdkafka and python
+$ ~/confluent-kafka-python/tests/soak/build.sh <librdkafka-version> <cfl-python-version>
+
+# Set up config:
+$ cp ~/confluent-kafka-python/tests/soak/ccloud.config.example ~/confluent-kafka-python/ccloud.config
+
+# Start a screen session
+$ screen bash
+
+# Within the screen session, run the soak client
+(screen)$ ~/run.sh
+(screen)$ Ctrl-A d  # to detach
diff --git a/tests/soak/build.sh b/tests/soak/build.sh
new file mode 100755
index 0000000..795dcfb
--- /dev/null
+++ b/tests/soak/build.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+
+librdkafka_version=$1
+cflpy_version=$2
+
+if [[ -z $cflpy_version ]]; then
+    echo "Usage: $0 <librdkafka_version|tag|branch> <cfl-kafka-python-version|tag|branch>"
+    exit 1
+fi
+
+set -eu
+
+
+
+echo "Building and installing librdkafka $librdkafka_version"
+pushd librdkafka
+sudo make uninstall
+git fetch --tags
+git checkout $librdkafka_version
+./configure --reconfigure
+make clean
+make -j
+sudo make install
+popd
+
+
+echo "Building confluent-kafka-python $cflpy_version"
+set +u
+source venv/bin/activate
+set -u
+pushd confluent-kafka-python
+git fetch --tags
+git checkout $cflpy_version
+python3 setup.py clean -a
+python3 setup.py build
+python3 -m pip install .
+popd
+
+echo ""
+echo "=============================================================================="
+(cd / ; python3 -c 'import confluent_kafka as c; print("python", c.version(), "librdkafka", c.libversion())')
+
diff --git a/tests/soak/ccloud.config.example b/tests/soak/ccloud.config.example
new file mode 100644
index 0000000..328642a
--- /dev/null
+++ b/tests/soak/ccloud.config.example
@@ -0,0 +1,14 @@
+bootstrap.servers=<add your bootstraps here>
+sasl.mechanisms=PLAIN
+security.protocol=SASL_SSL
+sasl.username=<your ccloud access key>
+sasl.password=<your ccloud secret>
+enable.idempotence=true
+debug=eos,generic,broker,security,consumer
+linger.ms=2
+compression.type=lz4
+# DataDog options/config
+datadog.api_key=<datadog api key>
+datadog.app_key=<datadog app key>
+
+
diff --git a/tests/soak/run.sh b/tests/soak/run.sh
new file mode 100755
index 0000000..bac5fa8
--- /dev/null
+++ b/tests/soak/run.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+
+set -e
+source venv/bin/activate
+
+librdkafka_version=$(python3 -c 'from confluent_kafka import libversion; print(libversion()[0])')
+
+if [[ -z $librdkafka_version ]]; then
+    echo "No librdkafka version found.."
+    exit 1
+fi
+
+if [[ -z $STY ]]; then
+    echo "This script should be run from inside a screen session"
+    exit 1
+fi
+
+set -u
+topic="pysoak-$librdkafka_version"
+logfile="${topic}.log.bz2"
+
+echo "Starting soak client using topic $topic with logs written to $logfile"
+set +x
+time confluent-kafka-python/tests/soak/soakclient.py -t $topic -r 80 -f  confluent-kafka-python/ccloud.config 2>&1 \
+    | tee /dev/stderr | bzip2 > $logfile
+ret=$?
+echo "Python client exited with status $ret"
+exit $ret
+
+
diff --git a/tests/soak/soakclient.py b/tests/soak/soakclient.py
index 8fdfedf..e7e914c 100755
--- a/tests/soak/soakclient.py
+++ b/tests/soak/soakclient.py
@@ -45,6 +45,7 @@ import datadog
 
 class SoakRecord (object):
     """ A private record type, with JSON serializer and deserializer """
+
     def __init__(self, msgid, name=None):
         self.msgid = msgid
         if name is None:
@@ -222,7 +223,7 @@ class SoakClient (object):
 
             try:
                 # Deserialize message
-                record = SoakRecord.deserialize(msg.value()) # noqa unused variable
+                record = SoakRecord.deserialize(msg.value())  # noqa unused variable
             except ValueError as ex:
                 self.logger.info("consumer: Failed to deserialize message in "
                                  "{} [{}] at offset {} (headers {}): {}".format(
diff --git a/tests/system/Jenkinsfile b/tests/system/Jenkinsfile
index 2af4fd7..ba290c4 100644
--- a/tests/system/Jenkinsfile
+++ b/tests/system/Jenkinsfile
@@ -1,6 +1,6 @@
 def config = jobConfig {
     cron = '@midnight'
-    nodeLabel = 'docker-oraclejdk8'
+    nodeLabel = 'docker-debian-10-system-test-jdk8'
     realJobPrefixes = ['system-test-python-client']
     owner = 'client'
     slackChannel = 'clients-eng'
@@ -8,16 +8,20 @@ def config = jobConfig {
 }
 
 def job = {
+    def mavenSettingsFile = "${env.WORKSPACE_TMP}/maven-global-settings.xml"
+
     configureGitSSH("github/confluent_jenkins", "private_key")
     withVaultEnv([["artifactory/tools_jenkins", "user", "TOOLS_ARTIFACTORY_USER"],
         ["artifactory/tools_jenkins", "password", "TOOLS_ARTIFACTORY_PASSWORD"],
         ["sonatype/confluent", "user", "SONATYPE_OSSRH_USER"],
         ["sonatype/confluent", "password", "SONATYPE_OSSRH_PASSWORD"]]) {
-        withVaultFile([["maven/jenkins_maven_global_settings", "settings_xml",
-            "/home/jenkins/.m2/settings.xml", "MAVEN_GLOBAL_SETTINGS"],
-            ["muckrake/2017-06-01", "pem", "muckrake-2017-06-01.pem", "MUCKRAKE_PEM"]]) {
-            stage("Run tests") {
-                sh 'tests/system/run-tests.sh'
+        withVaultFile([["muckrake/2017-06-01", "pem", "muckrake-2017-06-01.pem", "MUCKRAKE_PEM"]]) {
+            withMavenSettings("maven/jenkins_maven_global_settings", "settings", "MAVEN_GLOBAL_SETTINGS", mavenSettingsFile) {
+                withMaven(globalMavenSettingsFilePath: mavenSettingsFile) {
+                    stage("Run tests") {
+                        sh 'tests/system/run-tests.sh'
+                    }
+                }
             }
         }
     }
diff --git a/tests/test_Admin.py b/tests/test_Admin.py
index c4a80b0..7b56653 100644
--- a/tests/test_Admin.py
+++ b/tests/test_Admin.py
@@ -1,21 +1,75 @@
 #!/usr/bin/env python
 import pytest
 
-from confluent_kafka.admin import AdminClient, NewTopic, NewPartitions, ConfigResource
-from confluent_kafka import KafkaException, KafkaError, libversion
-import confluent_kafka
+from confluent_kafka.admin import AdminClient, NewTopic, NewPartitions, \
+    ConfigResource, AclBinding, AclBindingFilter, ResourceType, ResourcePatternType, \
+    AclOperation, AclPermissionType
+from confluent_kafka import KafkaException, KafkaError, libversion, \
+    TopicPartition, ConsumerGroupTopicPartitions, ConsumerGroupState
 import concurrent.futures
 
 
 def test_types():
-    ConfigResource(confluent_kafka.admin.RESOURCE_BROKER, "2")
+    ConfigResource(ResourceType.BROKER, "2")
     ConfigResource("broker", "2")
-    ConfigResource(confluent_kafka.admin.RESOURCE_GROUP, "mygroup")
-    ConfigResource(confluent_kafka.admin.RESOURCE_TOPIC, "")
+    ConfigResource(ResourceType.GROUP, "mygroup")
+    ConfigResource(ResourceType.TOPIC, "")
     with pytest.raises(ValueError):
         ConfigResource("doesnt exist", "hi")
     with pytest.raises(ValueError):
-        ConfigResource(confluent_kafka.admin.RESOURCE_TOPIC, None)
+        ConfigResource(ResourceType.TOPIC, None)
+
+
+def test_acl_binding_type():
+    attrs = [ResourceType.TOPIC, "topic", ResourcePatternType.LITERAL,
+             "User:u1", "*", AclOperation.WRITE, AclPermissionType.ALLOW]
+
+    attrs_nullable_acl_binding_filter = [1, 3, 4]
+
+    # at first it creates correctly
+    AclBinding(*attrs)
+    for i, _ in enumerate(attrs):
+
+        # no attribute is nullable
+        attrs_copy = list(attrs)
+        attrs_copy[i] = None
+        with pytest.raises(ValueError):
+            AclBinding(*attrs_copy)
+
+        # string attributes of AclBindingFilter are nullable
+        if i in attrs_nullable_acl_binding_filter:
+            AclBindingFilter(*attrs_copy)
+        else:
+            with pytest.raises(ValueError):
+                AclBindingFilter(*attrs_copy)
+
+    for (attr_num, attr_value) in [
+        (0, ResourceType.ANY),
+        (2, ResourcePatternType.ANY),
+        (2, ResourcePatternType.MATCH),
+        (5, AclOperation.ANY),
+        (6, AclPermissionType.ANY),
+    ]:
+        attrs_copy = list(attrs)
+        attrs_copy[attr_num] = attr_value
+        # forbidden enums in AclBinding
+        with pytest.raises(ValueError):
+            AclBinding(*attrs_copy)
+
+        # AclBindingFilter can hold all the enum values
+        AclBindingFilter(*attrs_copy)
+
+    # UNKNOWN values are not forbidden, for received values
+    for (attr_num, attr_value) in [
+        (0, ResourceType.UNKNOWN),
+        (2, ResourcePatternType.UNKNOWN),
+        (2, ResourcePatternType.UNKNOWN),
+        (5, AclOperation.UNKNOWN),
+        (6, AclPermissionType.UNKNOWN),
+    ]:
+        attrs_copy = list(attrs)
+        attrs_copy[attr_num] = attr_value
+        AclBinding(*attrs_copy)
 
 
 @pytest.mark.skipif(libversion()[1] < 0x000b0500,
@@ -68,6 +122,11 @@ def test_create_topics_api():
     with pytest.raises(Exception):
         a.create_topics([None, NewTopic("mytopic", 1, 2)])
 
+    try:
+        a.create_topics([NewTopic("mytopic")])
+    except Exception as err:
+        assert False, f"When none of the partitions, \
+            replication and assignment is present, the request should not fail, but it does with error {err}"
     fs = a.create_topics([NewTopic("mytopic", 3, 2)])
     with pytest.raises(KafkaException):
         for f in concurrent.futures.as_completed(iter(fs.values())):
@@ -206,7 +265,7 @@ def test_describe_configs_api():
         is no broker configured. """
 
     a = AdminClient({"socket.timeout.ms": 10})
-    fs = a.describe_configs([ConfigResource(confluent_kafka.admin.RESOURCE_BROKER, "3")])
+    fs = a.describe_configs([ConfigResource(ResourceType.BROKER, "3")])
     # ignore the result
 
     with pytest.raises(Exception):
@@ -219,10 +278,10 @@ def test_describe_configs_api():
         a.describe_configs([])
 
     with pytest.raises(ValueError):
-        a.describe_configs([None, ConfigResource(confluent_kafka.admin.RESOURCE_TOPIC, "mytopic")])
+        a.describe_configs([None, ConfigResource(ResourceType.TOPIC, "mytopic")])
 
-    fs = a.describe_configs([ConfigResource(confluent_kafka.admin.RESOURCE_TOPIC, "mytopic"),
-                             ConfigResource(confluent_kafka.admin.RESOURCE_GROUP, "mygroup")],
+    fs = a.describe_configs([ConfigResource(ResourceType.TOPIC, "mytopic"),
+                             ConfigResource(ResourceType.GROUP, "mygroup")],
                             request_timeout=0.123)
     with pytest.raises(KafkaException):
         for f in concurrent.futures.as_completed(iter(fs.values())):
@@ -236,7 +295,7 @@ def test_alter_configs_api():
         is no broker configured. """
 
     a = AdminClient({"socket.timeout.ms": 10})
-    fs = a.alter_configs([ConfigResource(confluent_kafka.admin.RESOURCE_BROKER, "3",
+    fs = a.alter_configs([ConfigResource(ResourceType.BROKER, "3",
                                          set_config={"some": "config"})])
     # ignore the result
 
@@ -252,10 +311,389 @@ def test_alter_configs_api():
     fs = a.alter_configs([ConfigResource("topic", "mytopic",
                                          set_config={"set": "this",
                                                      "and": "this"}),
-                          ConfigResource(confluent_kafka.admin.RESOURCE_GROUP,
+                          ConfigResource(ResourceType.GROUP,
                                          "mygroup")],
                          request_timeout=0.123)
 
     with pytest.raises(KafkaException):
         for f in concurrent.futures.as_completed(iter(fs.values())):
             f.result(timeout=1)
+
+
+def test_create_acls_api():
+    """ create_acls() tests, these wont really do anything since there is no
+        broker configured. """
+
+    a = AdminClient({"socket.timeout.ms": 10})
+
+    acl_binding1 = AclBinding(ResourceType.TOPIC, "topic1", ResourcePatternType.LITERAL,
+                              "User:u1", "*", AclOperation.WRITE, AclPermissionType.ALLOW)
+    acl_binding2 = AclBinding(ResourceType.TOPIC, "topic2", ResourcePatternType.LITERAL,
+                              "User:u2", "*", AclOperation.READ, AclPermissionType.DENY)
+
+    f = a.create_acls([acl_binding1],
+                      request_timeout=10.0)
+    # ignore the result
+
+    with pytest.raises(TypeError):
+        a.create_acls(None)
+
+    with pytest.raises(ValueError):
+        a.create_acls("topic")
+
+    with pytest.raises(ValueError):
+        a.create_acls([])
+
+    with pytest.raises(ValueError):
+        a.create_acls(["topic"])
+
+    with pytest.raises(ValueError):
+        a.create_acls([None, "topic"])
+
+    with pytest.raises(ValueError):
+        a.create_acls([None, acl_binding1])
+
+    with pytest.raises(ValueError):
+        a.create_acls([acl_binding1, acl_binding1])
+
+    fs = a.create_acls([acl_binding1, acl_binding2])
+    with pytest.raises(KafkaException):
+        for f in fs.values():
+            f.result(timeout=1)
+
+    fs = a.create_acls([acl_binding1, acl_binding2],
+                       request_timeout=0.5)
+    for f in concurrent.futures.as_completed(iter(fs.values())):
+        e = f.exception(timeout=1)
+        assert isinstance(e, KafkaException)
+        assert e.args[0].code() == KafkaError._TIMED_OUT
+
+    with pytest.raises(ValueError):
+        a.create_acls([acl_binding1],
+                      request_timeout=-5)
+
+    with pytest.raises(TypeError):
+        a.create_acls([acl_binding1],
+                      unknown_operation="it is")
+
+
+def test_delete_acls_api():
+    """ delete_acls() tests, these wont really do anything since there is no
+        broker configured. """
+
+    a = AdminClient({"socket.timeout.ms": 10})
+
+    acl_binding_filter1 = AclBindingFilter(ResourceType.ANY, None, ResourcePatternType.ANY,
+                                           None, None, AclOperation.ANY, AclPermissionType.ANY)
+    acl_binding_filter2 = AclBindingFilter(ResourceType.ANY, "topic2", ResourcePatternType.MATCH,
+                                           None, "*", AclOperation.WRITE, AclPermissionType.ALLOW)
+
+    fs = a.delete_acls([acl_binding_filter1])
+    # ignore the result
+
+    with pytest.raises(TypeError):
+        a.delete_acls(None)
+
+    with pytest.raises(ValueError):
+        a.delete_acls([])
+
+    with pytest.raises(ValueError):
+        a.delete_acls([None, acl_binding_filter1])
+
+    with pytest.raises(ValueError):
+        a.delete_acls([acl_binding_filter1, acl_binding_filter1])
+
+    fs = a.delete_acls([acl_binding_filter1, acl_binding_filter2])
+    with pytest.raises(KafkaException):
+        for f in concurrent.futures.as_completed(iter(fs.values())):
+            f.result(timeout=1)
+
+    fs = a.delete_acls([acl_binding_filter1, acl_binding_filter2],
+                       request_timeout=0.5)
+    for f in concurrent.futures.as_completed(iter(fs.values())):
+        e = f.exception(timeout=1)
+        assert isinstance(e, KafkaException)
+        assert e.args[0].code() == KafkaError._TIMED_OUT
+
+    with pytest.raises(ValueError):
+        a.create_acls([acl_binding_filter1],
+                      request_timeout=-5)
+
+    with pytest.raises(TypeError):
+        a.delete_acls([acl_binding_filter1],
+                      unknown_operation="it is")
+
+
+def test_describe_acls_api():
+    """ describe_acls() tests, these wont really do anything since there is no
+        broker configured. """
+
+    a = AdminClient({"socket.timeout.ms": 10})
+
+    acl_binding_filter1 = AclBindingFilter(ResourceType.ANY, None, ResourcePatternType.ANY,
+                                           None, None, AclOperation.ANY, AclPermissionType.ANY)
+    acl_binding1 = AclBinding(ResourceType.TOPIC, "topic1", ResourcePatternType.LITERAL,
+                              "User:u1", "*", AclOperation.WRITE, AclPermissionType.ALLOW)
+
+    a.describe_acls(acl_binding_filter1)
+    # ignore the result
+
+    with pytest.raises(TypeError):
+        a.describe_acls(None)
+
+    with pytest.raises(TypeError):
+        a.describe_acls(acl_binding1)
+
+    f = a.describe_acls(acl_binding_filter1)
+    with pytest.raises(KafkaException):
+        f.result(timeout=1)
+
+    f = a.describe_acls(acl_binding_filter1,
+                        request_timeout=0.5)
+    e = f.exception(timeout=1)
+    assert isinstance(e, KafkaException)
+    assert e.args[0].code() == KafkaError._TIMED_OUT
+
+    with pytest.raises(ValueError):
+        a.describe_acls(acl_binding_filter1,
+                        request_timeout=-5)
+
+    with pytest.raises(TypeError):
+        a.describe_acls(acl_binding_filter1,
+                        unknown_operation="it is")
+
+
+def test_list_consumer_groups_api():
+    a = AdminClient({"socket.timeout.ms": 10})
+
+    a.list_consumer_groups()
+
+    a.list_consumer_groups(states={ConsumerGroupState.EMPTY, ConsumerGroupState.STABLE})
+
+    with pytest.raises(TypeError):
+        a.list_consumer_groups(states="EMPTY")
+
+    with pytest.raises(TypeError):
+        a.list_consumer_groups(states=["EMPTY"])
+
+    with pytest.raises(TypeError):
+        a.list_consumer_groups(states=[ConsumerGroupState.EMPTY, ConsumerGroupState.STABLE])
+
+
+def test_describe_consumer_groups_api():
+    a = AdminClient({"socket.timeout.ms": 10})
+
+    group_ids = ["test-group-1", "test-group-2"]
+
+    a.describe_consumer_groups(group_ids)
+
+    with pytest.raises(TypeError):
+        a.describe_consumer_groups("test-group-1")
+
+    with pytest.raises(ValueError):
+        a.describe_consumer_groups([])
+
+
+def test_delete_consumer_groups_api():
+    a = AdminClient({"socket.timeout.ms": 10})
+
+    group_ids = ["test-group-1", "test-group-2"]
+
+    a.delete_consumer_groups(group_ids)
+
+    with pytest.raises(TypeError):
+        a.delete_consumer_groups("test-group-1")
+
+    with pytest.raises(ValueError):
+        a.delete_consumer_groups([])
+
+
+def test_list_consumer_group_offsets_api():
+
+    a = AdminClient({"socket.timeout.ms": 10})
+
+    only_group_id_request = ConsumerGroupTopicPartitions("test-group1")
+    request_with_group_and_topic_partition = ConsumerGroupTopicPartitions(
+        "test-group2", [TopicPartition("test-topic1", 1)])
+    same_name_request = ConsumerGroupTopicPartitions("test-group2", [TopicPartition("test-topic1", 3)])
+
+    a.list_consumer_group_offsets([only_group_id_request])
+
+    with pytest.raises(TypeError):
+        a.list_consumer_group_offsets(None)
+
+    with pytest.raises(TypeError):
+        a.list_consumer_group_offsets(1)
+
+    with pytest.raises(TypeError):
+        a.list_consumer_group_offsets("")
+
+    with pytest.raises(ValueError):
+        a.list_consumer_group_offsets([])
+
+    with pytest.raises(ValueError):
+        a.list_consumer_group_offsets([only_group_id_request,
+                                       request_with_group_and_topic_partition])
+
+    with pytest.raises(ValueError):
+        a.list_consumer_group_offsets([request_with_group_and_topic_partition,
+                                       same_name_request])
+
+    fs = a.list_consumer_group_offsets([only_group_id_request])
+    with pytest.raises(KafkaException):
+        for f in fs.values():
+            f.result(timeout=10)
+
+    fs = a.list_consumer_group_offsets([only_group_id_request],
+                                       request_timeout=0.5)
+    for f in concurrent.futures.as_completed(iter(fs.values())):
+        e = f.exception(timeout=1)
+        assert isinstance(e, KafkaException)
+        assert e.args[0].code() == KafkaError._TIMED_OUT
+
+    with pytest.raises(ValueError):
+        a.list_consumer_group_offsets([only_group_id_request],
+                                      request_timeout=-5)
+
+    with pytest.raises(TypeError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions()])
+
+    with pytest.raises(TypeError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions(1)])
+
+    with pytest.raises(TypeError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions(None)])
+
+    with pytest.raises(TypeError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions([])])
+
+    with pytest.raises(ValueError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions("")])
+
+    with pytest.raises(TypeError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", "test-topic")])
+
+    with pytest.raises(ValueError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", [])])
+
+    with pytest.raises(ValueError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", [None])])
+
+    with pytest.raises(TypeError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", ["test"])])
+
+    with pytest.raises(TypeError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", [TopicPartition(None)])])
+
+    with pytest.raises(ValueError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", [TopicPartition("")])])
+
+    with pytest.raises(ValueError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions(
+            "test-group1", [TopicPartition("test-topic", -1)])])
+
+    with pytest.raises(ValueError):
+        a.list_consumer_group_offsets([ConsumerGroupTopicPartitions(
+            "test-group1", [TopicPartition("test-topic", 1, 1)])])
+
+    a.list_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1")])
+    a.list_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group2", [TopicPartition("test-topic1", 1)])])
+
+
+def test_alter_consumer_group_offsets_api():
+
+    a = AdminClient({"socket.timeout.ms": 10})
+
+    request_with_group_and_topic_partition_offset1 = ConsumerGroupTopicPartitions(
+        "test-group1", [TopicPartition("test-topic1", 1, 5)])
+    same_name_request = ConsumerGroupTopicPartitions("test-group1", [TopicPartition("test-topic2", 4, 3)])
+    request_with_group_and_topic_partition_offset2 = ConsumerGroupTopicPartitions(
+        "test-group2", [TopicPartition("test-topic2", 1, 5)])
+
+    a.alter_consumer_group_offsets([request_with_group_and_topic_partition_offset1])
+
+    with pytest.raises(TypeError):
+        a.alter_consumer_group_offsets(None)
+
+    with pytest.raises(TypeError):
+        a.alter_consumer_group_offsets(1)
+
+    with pytest.raises(TypeError):
+        a.alter_consumer_group_offsets("")
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([])
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([request_with_group_and_topic_partition_offset1,
+                                       request_with_group_and_topic_partition_offset2])
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([request_with_group_and_topic_partition_offset1,
+                                        same_name_request])
+
+    fs = a.alter_consumer_group_offsets([request_with_group_and_topic_partition_offset1])
+    with pytest.raises(KafkaException):
+        for f in fs.values():
+            f.result(timeout=10)
+
+    fs = a.alter_consumer_group_offsets([request_with_group_and_topic_partition_offset1],
+                                        request_timeout=0.5)
+    for f in concurrent.futures.as_completed(iter(fs.values())):
+        e = f.exception(timeout=1)
+        assert isinstance(e, KafkaException)
+        assert e.args[0].code() == KafkaError._TIMED_OUT
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([request_with_group_and_topic_partition_offset1],
+                                       request_timeout=-5)
+
+    with pytest.raises(TypeError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions()])
+
+    with pytest.raises(TypeError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions(1)])
+
+    with pytest.raises(TypeError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions(None)])
+
+    with pytest.raises(TypeError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions([])])
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions("")])
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1")])
+
+    with pytest.raises(TypeError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", "test-topic")])
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", [])])
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", [None])])
+
+    with pytest.raises(TypeError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", ["test"])])
+
+    with pytest.raises(TypeError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", [TopicPartition(None)])])
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", [TopicPartition("")])])
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions("test-group1", [TopicPartition("test-topic")])])
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions(
+            "test-group1", [TopicPartition("test-topic", -1)])])
+
+    with pytest.raises(ValueError):
+        a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions(
+            "test-group1", [TopicPartition("test-topic", 1, -1001)])])
+
+    a.alter_consumer_group_offsets([ConsumerGroupTopicPartitions(
+        "test-group2", [TopicPartition("test-topic1", 1, 23)])])
diff --git a/tests/test_Consumer.py b/tests/test_Consumer.py
index e91a113..a939af1 100644
--- a/tests/test_Consumer.py
+++ b/tests/test_Consumer.py
@@ -230,7 +230,7 @@ def test_multiple_close_does_not_throw_exception():
 
 
 def test_any_method_after_close_throws_exception():
-    """ Calling any consumer method after close should thorw a RuntimeError
+    """ Calling any consumer method after close should throw a RuntimeError
     """
     c = Consumer({'group.id': 'test',
                   'enable.auto.commit': True,
diff --git a/tests/test_Producer.py b/tests/test_Producer.py
index ef82b41..4c45c56 100644
--- a/tests/test_Producer.py
+++ b/tests/test_Producer.py
@@ -206,8 +206,8 @@ def test_transaction_api():
     # Any subsequent APIs will fail since init did not succeed.
     with pytest.raises(KafkaException) as ex:
         p.begin_transaction()
-    assert ex.value.args[0].code() == KafkaError._STATE
-    assert ex.value.args[0].retriable() is False
+    assert ex.value.args[0].code() == KafkaError._CONFLICT
+    assert ex.value.args[0].retriable() is True
     assert ex.value.args[0].fatal() is False
     assert ex.value.args[0].txn_requires_abort() is False
 
@@ -218,22 +218,22 @@ def test_transaction_api():
     with pytest.raises(KafkaException) as ex:
         p.send_offsets_to_transaction([TopicPartition("topic", 0, 123)],
                                       group_metadata)
-    assert ex.value.args[0].code() == KafkaError._STATE
-    assert ex.value.args[0].retriable() is False
+    assert ex.value.args[0].code() == KafkaError._CONFLICT
+    assert ex.value.args[0].retriable() is True
     assert ex.value.args[0].fatal() is False
     assert ex.value.args[0].txn_requires_abort() is False
 
     with pytest.raises(KafkaException) as ex:
         p.commit_transaction(0.5)
-    assert ex.value.args[0].code() == KafkaError._STATE
-    assert ex.value.args[0].retriable() is False
+    assert ex.value.args[0].code() == KafkaError._CONFLICT
+    assert ex.value.args[0].retriable() is True
     assert ex.value.args[0].fatal() is False
     assert ex.value.args[0].txn_requires_abort() is False
 
     with pytest.raises(KafkaException) as ex:
         p.abort_transaction(0.5)
-    assert ex.value.args[0].code() == KafkaError._STATE
-    assert ex.value.args[0].retriable() is False
+    assert ex.value.args[0].code() == KafkaError._CONFLICT
+    assert ex.value.args[0].retriable() is True
     assert ex.value.args[0].fatal() is False
     assert ex.value.args[0].txn_requires_abort() is False
 
@@ -271,3 +271,13 @@ def test_purge():
     p.purge()
     p.flush(0.002)
     assert cb_detector["on_delivery_called"]
+
+
+def test_producer_bool_value():
+    """
+    Make sure producer has a truth-y bool value
+    See https://github.com/confluentinc/confluent-kafka-python/issues/1427
+    """
+
+    p = Producer({})
+    assert bool(p)
diff --git a/tests/test_log.py b/tests/test_log.py
index 60b27e3..6cd5108 100644
--- a/tests/test_log.py
+++ b/tests/test_log.py
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
 
+from io import StringIO
 import confluent_kafka
 import confluent_kafka.avro
 import logging
@@ -114,3 +115,46 @@ def test_logging_constructor():
             p.poll(timeout=0.5)
 
         print('%s: %s: %d log messages seen' % (how, f.name, f.cnt))
+
+
+def test_producer_logger_logging_in_given_format():
+    """Test that asserts that logging is working by matching part of the log message"""
+
+    stringBuffer = StringIO()
+    logger = logging.getLogger('Producer')
+    logger.setLevel(logging.DEBUG)
+    handler = logging.StreamHandler(stringBuffer)
+    handler.setFormatter(logging.Formatter('%(name)s Logger | %(message)s'))
+    logger.addHandler(handler)
+
+    p = confluent_kafka.Producer(
+        {"bootstrap.servers": "test", "logger": logger, "debug": "msg"})
+    val = 1
+    while val > 0:
+        val = p.flush()
+    logMessage = stringBuffer.getvalue().strip()
+    stringBuffer.close()
+    print(logMessage)
+
+    assert "Producer Logger | INIT" in logMessage
+
+
+def test_consumer_logger_logging_in_given_format():
+    """Test that asserts that logging is working by matching part of the log message"""
+
+    stringBuffer = StringIO()
+    logger = logging.getLogger('Consumer')
+    logger.setLevel(logging.DEBUG)
+    handler = logging.StreamHandler(stringBuffer)
+    handler.setFormatter(logging.Formatter('%(name)s Logger | %(message)s'))
+    logger.addHandler(handler)
+
+    c = confluent_kafka.Consumer(
+        {"bootstrap.servers": "test", "group.id": "test", "logger": logger, "debug": "msg"})
+    c.poll(0)
+
+    logMessage = stringBuffer.getvalue().strip()
+    stringBuffer.close()
+    c.close()
+
+    assert "Consumer Logger | INIT" in logMessage
diff --git a/tests/test_misc.py b/tests/test_misc.py
index cdf1147..aca7b5a 100644
--- a/tests/test_misc.py
+++ b/tests/test_misc.py
@@ -24,22 +24,18 @@ def test_version():
     assert confluent_kafka.version()[0] == confluent_kafka.__version__
 
 
-# global variable for error_cb call back function
-seen_error_cb = False
-
-
 def test_error_cb():
     """ Tests error_cb. """
+    seen_error_cb = False
 
     def error_cb(error_msg):
-        global seen_error_cb
+        nonlocal seen_error_cb
         seen_error_cb = True
         acceptable_error_codes = (confluent_kafka.KafkaError._TRANSPORT, confluent_kafka.KafkaError._ALL_BROKERS_DOWN)
         assert error_msg.code() in acceptable_error_codes
 
     conf = {'bootstrap.servers': 'localhost:65531',  # Purposely cause connection refused error
             'group.id': 'test',
-            'socket.timeout.ms': '100',
             'session.timeout.ms': 1000,  # Avoid close() blocking too long
             'error_cb': error_cb
             }
@@ -47,26 +43,22 @@ def test_error_cb():
     kc = confluent_kafka.Consumer(**conf)
     kc.subscribe(["test"])
     while not seen_error_cb:
-        kc.poll(timeout=1)
+        kc.poll(timeout=0.1)
 
     kc.close()
 
 
-# global variable for stats_cb call back function
-seen_stats_cb = False
-
-
 def test_stats_cb():
     """ Tests stats_cb. """
+    seen_stats_cb = False
 
     def stats_cb(stats_json_str):
-        global seen_stats_cb
+        nonlocal seen_stats_cb
         seen_stats_cb = True
         stats_json = json.loads(stats_json_str)
         assert len(stats_json['name']) > 0
 
     conf = {'group.id': 'test',
-            'socket.timeout.ms': '100',
             'session.timeout.ms': 1000,  # Avoid close() blocking too long
             'statistics.interval.ms': 200,
             'stats_cb': stats_cb
@@ -76,22 +68,20 @@ def test_stats_cb():
 
     kc.subscribe(["test"])
     while not seen_stats_cb:
-        kc.poll(timeout=1)
+        kc.poll(timeout=0.1)
     kc.close()
 
 
-seen_stats_cb_check_no_brokers = False
-
-
 def test_conf_none():
     """ Issue #133
     Test that None can be passed for NULL by setting bootstrap.servers
     to None. If None would be converted to a string then a broker would
     show up in statistics. Verify that it doesnt. """
+    seen_stats_cb_check_no_brokers = False
 
     def stats_cb_check_no_brokers(stats_json_str):
         """ Make sure no brokers are reported in stats """
-        global seen_stats_cb_check_no_brokers
+        nonlocal seen_stats_cb_check_no_brokers
         stats = json.loads(stats_json_str)
         assert len(stats['brokers']) == 0, "expected no brokers in stats: %s" % stats_json_str
         seen_stats_cb_check_no_brokers = True
@@ -101,9 +91,8 @@ def test_conf_none():
             'stats_cb': stats_cb_check_no_brokers}
 
     p = confluent_kafka.Producer(conf)
-    p.poll(timeout=1)
+    p.poll(timeout=0.1)
 
-    global seen_stats_cb_check_no_brokers
     assert seen_stats_cb_check_no_brokers
 
 
@@ -130,15 +119,12 @@ def test_throttle_event_types():
     assert str(throttle_event) == "broker/0 throttled for 10000 ms"
 
 
-# global variable for oauth_cb call back function
-seen_oauth_cb = False
-
-
 def test_oauth_cb():
     """ Tests oauth_cb. """
+    seen_oauth_cb = False
 
     def oauth_cb(oauth_config):
-        global seen_oauth_cb
+        nonlocal seen_oauth_cb
         seen_oauth_cb = True
         assert oauth_config == 'oauth_cb'
         return 'token', time.time() + 300.0
@@ -146,7 +132,6 @@ def test_oauth_cb():
     conf = {'group.id': 'test',
             'security.protocol': 'sasl_plaintext',
             'sasl.mechanisms': 'OAUTHBEARER',
-            'socket.timeout.ms': '100',
             'session.timeout.ms': 1000,  # Avoid close() blocking too long
             'sasl.oauthbearer.config': 'oauth_cb',
             'oauth_cb': oauth_cb
@@ -155,7 +140,59 @@ def test_oauth_cb():
     kc = confluent_kafka.Consumer(**conf)
 
     while not seen_oauth_cb:
-        kc.poll(timeout=1)
+        kc.poll(timeout=0.1)
+    kc.close()
+
+
+def test_oauth_cb_principal_sasl_extensions():
+    """ Tests oauth_cb. """
+    seen_oauth_cb = False
+
+    def oauth_cb(oauth_config):
+        nonlocal seen_oauth_cb
+        seen_oauth_cb = True
+        assert oauth_config == 'oauth_cb'
+        return 'token', time.time() + 300.0, oauth_config, {"extone": "extoneval", "exttwo": "exttwoval"}
+
+    conf = {'group.id': 'test',
+            'security.protocol': 'sasl_plaintext',
+            'sasl.mechanisms': 'OAUTHBEARER',
+            'session.timeout.ms': 100,  # Avoid close() blocking too long
+            'sasl.oauthbearer.config': 'oauth_cb',
+            'oauth_cb': oauth_cb
+            }
+
+    kc = confluent_kafka.Consumer(**conf)
+
+    while not seen_oauth_cb:
+        kc.poll(timeout=0.1)
+    kc.close()
+
+
+def test_oauth_cb_failure():
+    """ Tests oauth_cb. """
+    oauth_cb_count = 0
+
+    def oauth_cb(oauth_config):
+        nonlocal oauth_cb_count
+        oauth_cb_count += 1
+        assert oauth_config == 'oauth_cb'
+        if oauth_cb_count == 2:
+            return 'token', time.time() + 100.0, oauth_config, {"extthree": "extthreeval"}
+        raise Exception
+
+    conf = {'group.id': 'test',
+            'security.protocol': 'sasl_plaintext',
+            'sasl.mechanisms': 'OAUTHBEARER',
+            'session.timeout.ms': 1000,  # Avoid close() blocking too long
+            'sasl.oauthbearer.config': 'oauth_cb',
+            'oauth_cb': oauth_cb
+            }
+
+    kc = confluent_kafka.Consumer(**conf)
+
+    while oauth_cb_count < 2:
+        kc.poll(timeout=0.1)
     kc.close()
 
 
@@ -194,11 +231,9 @@ def test_unordered_dict(init_func):
     client.poll(0)
 
 
-# global variable for on_delivery call back function
-seen_delivery_cb = False
-
-
 def test_topic_config_update():
+    seen_delivery_cb = False
+
     # *NOTE* default.topic.config has been deprecated.
     # This example remains to ensure backward-compatibility until its removal.
     confs = [{"message.timeout.ms": 600000, "default.topic.config": {"message.timeout.ms": 1000}},
@@ -207,7 +242,7 @@ def test_topic_config_update():
 
     def on_delivery(err, msg):
         # Since there is no broker, produced messages should time out.
-        global seen_delivery_cb
+        nonlocal seen_delivery_cb
         seen_delivery_cb = True
         assert err.code() == confluent_kafka.KafkaError._MSG_TIMED_OUT
 
@@ -227,3 +262,21 @@ def test_topic_config_update():
         if "CI" in os.environ:
             pytest.xfail("Timeout exceeded")
         pytest.fail("Timeout exceeded")
+
+
+def test_set_sasl_credentials_api():
+    clients = [
+        AdminClient({}),
+        confluent_kafka.Consumer({"group.id": "dummy"}),
+        confluent_kafka.Producer({})]
+
+    for c in clients:
+        c.set_sasl_credentials('username', 'password')
+
+        c.set_sasl_credentials('override', 'override')
+
+        with pytest.raises(TypeError):
+            c.set_sasl_credentials(None, 'password')
+
+        with pytest.raises(TypeError):
+            c.set_sasl_credentials('username', None)
diff --git a/tools/RELEASE.md b/tools/RELEASE.md
index 90511da..72bc622 100644
--- a/tools/RELEASE.md
+++ b/tools/RELEASE.md
@@ -8,10 +8,10 @@ confluent-kafka-python uses semver versioning and loosely follows
 librdkafka's version, e.g. v0.11.4 for the final release and
 v0.11.4rc3 for the 3rd v0.11.4 release candidate.
 
-With the addition of prebuilt binary wheels we make use of travis-ci.org
-to build OSX, Linux and Winodws binaries which are uploaded to Confluent's
-private S3 bucket. These artifacts are downloaded by the `tools/download-s3.py` script
-and then uploaded manually to PyPi.
+With the addition of prebuilt binary wheels we make use of Semaphore CI
+to build OSX, Linux and Windows binaries which are uploaded to build's
+artifact directory. These artifacts are downloaded and then uploaded manually
+to PyPi.
 
 **Note**: Python package versions use a lowercase `rcN` suffix to indicate
           release candidates while librdkafka uses `-RCN`. The Python format
@@ -116,7 +116,7 @@ tag (e.g., v0.11.4-RC5).
 
 Change to the latest librdkafka version in the following files:
 
- * `.travis.yml`
+ * `.semaphore/semaphore.yml`
  * `examples/docker/Dockerfile.alpine`
 
 Change to the latest version of the confluent-librdkafka-plugins in (this step
@@ -126,7 +126,7 @@ is usually not necessary):
 
 Commit these changes as necessary:
 
-    $ git commit -m "librdkafka version v0.11.4-RC5" .travis.yml examples/docker/Dockerfile.alpine
+    $ git commit -m "librdkafka version v0.11.4-RC5" .semaphore/semaphore.yml examples/docker/Dockerfile.alpine
     $ git commit -m "confluent-librdkafka-plugins version v0.11.0" tools/install-interceptors.sh
 
 
@@ -183,10 +183,10 @@ be removed after the build passes.
 **TEST ITERATION**:
 
     # Repeat with new tags until all build issues are solved.
-    $ git tag v0.11.4rc1-test2
+    $ git tag v0.11.4rc1-dev2
 
     # Delete any previous test tag you've created.
-    $ git tag tag -d v0.11.4rc1-test1
+    $ git tag tag -d v0.11.4rc1-dev1
 
 
 **CANDIDATE ITERATION**:
@@ -221,8 +221,8 @@ Remove `--dry-run` when you're happy with the results.
 
 ### 5.3. Wait for CI builds to complete
 
-Monitor travis-ci builds by looking at the *tag* build at
-[travis-ci](https://travis-ci.org/confluentinc/confluent-kafka-python)
+Monitor Semaphore CI builds by looking at the *tag* build at
+[Semaphore CI](https://confluentinc.semaphoreci.com/projects/confluent-kafka-python)
 
 CI jobs are flaky and may fail temporarily. If you see a temporary build error,
 e.g., a timeout, restart the specific job.
@@ -231,18 +231,13 @@ If there are permanent errors, fix them and then go back to 5.1. to create
 and push a new test tag. Don't forget to delete your previous test tag.
 
 
-### 5.4. Download build artifacts from S3
-
-*Note*: You will need set up your AWS credentials in `~/.aws/credentials` to
-        gain access to the S3 bucket.
+### 5.4. Download build artifacts
 
 When all CI builds are successful it is time to download the resulting
-artifacts from S3 using:
-
-    $ tools/download-s3.py v0.11.4rc1  # replace with your tagged version
-
-The artifacts will be downloaded to `dl-<tag>/`.
+artifacts from build's Artifact directory located in another tab in the build:
 
+**Note:** The artifacts should be extracted in the folder `tools\dl-<tag>` for
+subsequent steps to work properly.
 
 ### 5.5. Verify packages
 
@@ -377,6 +372,14 @@ Write a tweet to announce the new release, something like:
 Create a PR to update the confluent-kafka-python version tag for the
 Python API docs on docs.confluent.io.
 
+    # Update the Python API docs to the latest version: includes
+      https://github.com/confluentinc/docs and
+      https://github.com/confluentinc/docs-platform.
+
+    # Update docs.confluent.io: cut the docs release branch of
+      https://github.com/confluentinc/docs-clients-confluent-kafka-python,
+      refers to https://confluentinc.atlassian.net/wiki/spaces/TOOLS/pages/2044330444/Create+a+new+version+of+a+documentation+repo#Create-new-release-branches.
+
 
 ### 6.3. Done!
 
diff --git a/tools/bootstrap-librdkafka.sh b/tools/bootstrap-librdkafka.sh
index bbfc6b4..a8c0e01 100755
--- a/tools/bootstrap-librdkafka.sh
+++ b/tools/bootstrap-librdkafka.sh
@@ -29,7 +29,7 @@ mkdir -p "$BUILDDIR/librdkafka"
 pushd "$BUILDDIR/librdkafka"
 
 test -f configure ||
-curl -q -L "https://github.com/edenhill/librdkafka/archive/${VERSION}.tar.gz" | \
+curl -q -L "https://github.com/confluentinc/librdkafka/archive/refs/tags/${VERSION}.tar.gz" | \
     tar -xz --strip-components=1 -f -
 
 ./configure --clean
diff --git a/tools/build-manylinux.sh b/tools/build-manylinux.sh
index 92e9dab..7df2c7d 100755
--- a/tools/build-manylinux.sh
+++ b/tools/build-manylinux.sh
@@ -33,7 +33,13 @@ if [[ ! -f /.dockerenv ]]; then
         exit 1
     fi
 
-    docker run -t -v $(pwd):/io quay.io/pypa/manylinux2010_x86_64:latest  /io/tools/build-manylinux.sh "$LIBRDKAFKA_VERSION"
+    if [[ $ARCH == arm64* ]]; then
+        docker_image=quay.io/pypa/manylinux_2_28_aarch64:latest
+    else
+        docker_image=quay.io/pypa/manylinux_2_28_x86_64:latest
+    fi
+
+    docker run -t -v $(pwd):/io $docker_image  /io/tools/build-manylinux.sh "v${LIBRDKAFKA_VERSION}"
 
     exit $?
 fi
@@ -44,14 +50,14 @@ fi
 #
 
 echo "# Installing basic system dependencies"
-yum install -y zlib-devel gcc-c++
+yum install -y zlib-devel gcc-c++ python3 curl-devel perl-IPC-Cmd perl-Pod-Html
 
 echo "# Building librdkafka ${LIBRDKAFKA_VERSION}"
 $(dirname $0)/bootstrap-librdkafka.sh --require-ssl ${LIBRDKAFKA_VERSION} /usr
 
 # Compile wheels
 echo "# Compile"
-for PYBIN in /opt/python/*/bin; do
+for PYBIN in /opt/python/cp*/bin; do
     echo "## Compiling $PYBIN"
     CFLAGS="-Werror -Wno-strict-aliasing -Wno-parentheses" \
           "${PYBIN}/pip" wheel /io/ -w unrepaired-wheelhouse/
@@ -73,13 +79,13 @@ done
 
 # Install packages and test
 echo "# Installing wheels"
-for PYBIN in /opt/python/*/bin/; do
+for PYBIN in /opt/python/cp*/bin/; do
     echo "## Installing $PYBIN"
     "${PYBIN}/pip" install confluent_kafka -f /io/wheelhouse
     "${PYBIN}/python" -c 'import confluent_kafka; print(confluent_kafka.libversion())'
+    "${PYBIN}/pip" install -r /io/tests/requirements.txt
+    "${PYBIN}/pytest" /io/tests/test_Producer.py
     echo "## Uninstalling $PYBIN"
     "${PYBIN}/pip" uninstall -y confluent_kafka
 done
 
-
-
diff --git a/tools/download-s3.py b/tools/download-s3.py
index 807d6fc..eb9e8dc 100755
--- a/tools/download-s3.py
+++ b/tools/download-s3.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 #
 #
 # Collects CI artifacts from S3 storage, downloading them
diff --git a/tools/mingw-w64/msys2-dependencies.sh b/tools/mingw-w64/msys2-dependencies.sh
new file mode 100644
index 0000000..7baedc7
--- /dev/null
+++ b/tools/mingw-w64/msys2-dependencies.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+set -e
+
+export msys2='cmd //C RefreshEnv.cmd '
+export msys2+='& set MSYS=winsymlinks:nativestrict '
+export msys2+='& C:\\msys64\\msys2_shell.cmd -defterm -no-start'
+export mingw64="$msys2 -mingw64 -full-path -here -c "\"\$@"\" --"
+export msys2+=" -msys2 -c "\"\$@"\" --"
+
+# Have to update pacman first or choco upgrade will failure due to migration
+# to zstd instead of xz compression
+$msys2 pacman -Sy --noconfirm pacman
+
+## Install more MSYS2 packages from https://packages.msys2.org/base here
+$msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-gcc
+
+## Install unzip
+$msys2 pacman --sync --noconfirm --needed unzip
diff --git a/tools/mingw-w64/semaphore_commands.sh b/tools/mingw-w64/semaphore_commands.sh
new file mode 100644
index 0000000..1c93901
--- /dev/null
+++ b/tools/mingw-w64/semaphore_commands.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+$msys2 pacman -S python --version 3.8.0
+
+set -e
+
+export PATH="$PATH;C:\Python38;C:\Python38\Scripts"
+export MAKE=mingw32-make  # so that Autotools can find it
+
+cmd /c mklink /D C:\Python38\python3.exe C:\Python38\python.exe
+
+python -m pip install cibuildwheel==2.12.0
diff --git a/tools/mingw-w64/setup-msys2.ps1 b/tools/mingw-w64/setup-msys2.ps1
new file mode 100644
index 0000000..cf72850
--- /dev/null
+++ b/tools/mingw-w64/setup-msys2.ps1
@@ -0,0 +1,31 @@
+# Install (if necessary) and set up msys2.
+
+
+$url="https://github.com/msys2/msys2-installer/releases/download/2022-10-28/msys2-base-x86_64-20221028.sfx.exe"
+$sha256="e365b79b4b30b6f4baf34bd93f3d2a41c0a92801c7a96d79cddbfca1090a0554"
+
+
+if (!(Test-Path -Path "c:\msys64\usr\bin\bash.exe")) {
+    echo "Downloading and installing msys2 to c:\msys64"
+
+    (New-Object System.Net.WebClient).DownloadFile($url, './msys2-installer.exe')
+
+    # Verify checksum
+    (Get-FileHash -Algorithm "SHA256" .\msys2-installer.exe).hash -eq $sha256
+
+    # Install msys2
+    .\msys2-installer.exe -y -oc:\
+
+    Remove-Item msys2-installer.exe
+
+    # Set up msys2 the first time
+    echo "Setting up msys"
+    c:\msys64\usr\bin\bash -lc ' '
+
+} else {
+    echo "Using previously installed msys2"
+}
+
+# Update packages
+echo "Updating msys2 packages"
+c:\msys64\usr\bin\bash -lc "pacman --noconfirm -Syuu --overwrite '*'"
diff --git a/tools/prepare-osx.sh b/tools/prepare-osx.sh
deleted file mode 100755
index adb7c5e..0000000
--- a/tools/prepare-osx.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-#
-# This script prepares the Travis OSX env with a particular interpreter
-# https://docs.travis-ci.com/user/languages/python/
-#
-# Default OSX environment
-# https://docs.travis-ci.com/user/reference/osx/#compilers-and-build-toolchain
-#
-PY_INTERPRETER=$1
-VENV_HOME=$2
-
-set -ev
-
-export HOMEBREW_NO_AUTO_UPDATE=1
-export HOMEBREW_NO_INSTALL_CLEANUP=1
-brew upgrade libtool || brew install libtool
-
-if [[ -z ${PY_INTERPRETER} ]] || [[  -z ${VENV_HOME} ]]; then
-    echo "Usage: $0 <Python interpreter version> <destination>"
-    exit 1
-fi
-
-# Update virtualenv and install requested interpreter
-echo "# Updating basic dependencies"
-pip install -U pip
-pip install virtualenv
-pyenv install -f ${PY_INTERPRETER}
-
-# Create virtualenv
-echo "# Constructing virtualenv for interpreter ${PY_INTERPRETER}"
-virtualenv -p ~/.pyenv/versions/${PY_INTERPRETER}/bin/python ${VENV_HOME}
diff --git a/tools/smoketest.sh b/tools/smoketest.sh
index ded0671..acfb4ac 100755
--- a/tools/smoketest.sh
+++ b/tools/smoketest.sh
@@ -29,8 +29,8 @@ fi
 
 pyvers_tested=
 
-# Run tests with both python2 and python3 (whatever versions the OS provides)
-for py in 2.7 3.8 ; do
+# Run tests with python3
+for py in 3.8 ; do
     echo "$0: # Smoketest with Python$py"
 
     if ! python$py -V ; then
diff --git a/tools/source-package-verification.sh b/tools/source-package-verification.sh
new file mode 100644
index 0000000..a220a8d
--- /dev/null
+++ b/tools/source-package-verification.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+#
+# Source Package Verification
+#
+
+pip install -r docs/requirements.txt
+pip install -U protobuf
+pip install -r tests/requirements.txt
+
+lib_dir=dest/runtimes/$OS_NAME-$ARCH/native
+tools/wheels/install-librdkafka.sh "${LIBRDKAFKA_VERSION#v}" dest
+export CFLAGS="$CFLAGS -I${PWD}/dest/build/native/include"
+export LDFLAGS="$LDFLAGS -L${PWD}/${lib_dir}"
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$PWD/$lib_dir"
+export DYLD_LIBRARY_PATH="$DYLD_LIBRARY_PATH:$PWD/$lib_dir"
+
+python setup.py build && python setup.py install
+if [[ $OS_NAME == linux && $ARCH == x64 ]]; then
+    flake8 --exclude ./_venv
+    make docs
+    python -m pytest --timeout 600 --ignore=dest
+else
+    python -m pytest --timeout 600 --ignore=dest --ignore=tests/integration
+fi
diff --git a/tools/style-format.sh b/tools/style-format.sh
new file mode 100755
index 0000000..a686cc6
--- /dev/null
+++ b/tools/style-format.sh
@@ -0,0 +1,132 @@
+#!/bin/bash
+#
+# Check or apply/fix the project coding style to all files passed as arguments.
+# Uses clang-format for C and flake8 for Python.
+#
+# Requires clang-format version 10  (apt install clang-format-10).
+#
+
+
+CLANG_FORMAT=${CLANG_FORMAT:-clang-format}
+
+set -e
+
+ret=0
+
+if [[ -z $1 ]]; then
+    echo "Usage: $0 [--fix] srcfile1.c srcfile2.h srcfile3.c ..."
+    echo ""
+    exit 0
+fi
+
+if [[ $1 == "--fix" ]]; then
+    fix=1
+    shift
+else
+    fix=0
+fi
+
+clang_format_version=$(${CLANG_FORMAT} --version | sed -Ee 's/.*version ([[:digit:]]+)\.[[:digit:]]+\.[[:digit:]]+.*/\1/')
+if [[ $clang_format_version != "10" ]] ; then
+    echo "$0: clang-format version 10, '$clang_format_version' detected"
+    exit 1
+fi
+
+# Get list of files from .formatignore to ignore formatting for.
+ignore_files=( $(grep '^[^#]..' .formatignore) )
+
+function ignore {
+    local file=$1
+
+    local f
+    for f in "${ignore_files[@]}" ; do
+        [[ $file == $f ]] && return 0
+    done
+
+    return 1
+}
+
+extra_info=""
+
+for f in $*; do
+
+    if ignore $f ; then
+        echo "$f is ignored by .formatignore" 1>&2
+        continue
+    fi
+
+    lang="c"
+    if [[ $f == *.py ]]; then
+        lang="py"
+        style="pep8"
+        stylename="pep8"
+    else
+        style="file"  # Use .clang-format
+        stylename="C"
+    fi
+
+    check=0
+
+    if [[ $fix == 1 ]]; then
+        # Convert tabs to 8 spaces first.
+        if grep -ql $'\t' "$f"; then
+            sed -i -e 's/\t/        /g' "$f"
+            echo "$f: tabs converted to spaces"
+        fi
+
+        if [[ $lang == c ]]; then
+            # Run clang-format to reformat the file
+            ${CLANG_FORMAT} --style="$style" "$f" > _styletmp
+
+        else
+            # Run autopep8 to reformat the file.
+            python3 -m autopep8 -a "$f" > _styletmp
+            # autopep8 can't fix all errors, so we also perform a flake8 check.
+            check=1
+        fi
+
+        if ! cmp -s "$f" _styletmp; then
+            echo "$f: style fixed ($stylename)"
+            # Use cp to preserve target file mode/attrs.
+            cp _styletmp "$f"
+            rm _styletmp
+        fi
+    fi
+
+    if [[ $fix == 0 || $check == 1 ]]; then
+        # Check for tabs
+        if grep -q $'\t' "$f" ; then
+            echo "$f: contains tabs: convert to 8 spaces instead"
+            ret=1
+        fi
+
+        # Check style
+        if [[ $lang == c ]]; then
+            if ! ${CLANG_FORMAT} --style="$style" --Werror --dry-run "$f" ; then
+                echo "$f: had style errors ($stylename): see clang-format output above"
+                ret=1
+            fi
+        elif [[ $lang == py ]]; then
+            if ! python3 -m flake8 "$f"; then
+                echo "$f: had style errors ($stylename): see flake8 output above"
+                if [[ $fix == 1 ]]; then
+                    # autopep8 couldn't fix all errors. Let the user know.
+                    extra_info="Error: autopep8 could not fix all errors, fix the flake8 errors manually and run again."
+                fi
+                ret=1
+            fi
+        fi
+    fi
+
+done
+
+rm -f _styletmp
+
+if [[ $ret != 0 ]]; then
+    echo ""
+    echo "You can run the following command to automatically fix the style:"
+    echo "  $ make style-fix"
+    [[ -n $extra_info ]] && echo "$extra_info"
+fi
+
+exit $ret
diff --git a/tools/test-manylinux.sh b/tools/test-manylinux.sh
index 83e3af7..26ef952 100755
--- a/tools/test-manylinux.sh
+++ b/tools/test-manylinux.sh
@@ -30,19 +30,13 @@ fi
 
 echo "$0 running from $(pwd)"
 
-
-function setup_centos {
-    # CentOS container setup
-    yum install -q -y python python3 epel-release curl
-}
-
 function setup_ubuntu {
     # Ubuntu container setup
     apt-get update
-    apt-get install -y python python3 curl
+    apt-get install -y python3.8 curl
     # python3-distutils is required on Ubuntu 18.04 and later but does
     # not exist on 14.04.
-    apt-get install -y python3-distutils || true
+    apt-get install -y python3.8-distutils || true
 }
 
 
@@ -57,9 +51,7 @@ function run_single_in_docker {
     fi
 
     # Detect OS
-    if grep -qi centos /etc/system-release /etc/redhat-release 2>/dev/null ; then
-        setup_centos
-    elif grep -qiE 'ubuntu|debian' /etc/os-release 2>/dev/null ; then
+    if grep -qiE 'ubuntu|debian' /etc/os-release 2>/dev/null ; then
         setup_ubuntu
     else
         echo "WARNING: Don't know what platform I'm on: $(uname -a)"
@@ -69,7 +61,7 @@ function run_single_in_docker {
     # in a plethora of possibly outdated Python requirements that
     # might interfere with the newer packages from PyPi, such as six.
     # Instead install it directly from PyPa.
-    curl https://bootstrap.pypa.io/get-pip.py | python
+    curl https://bootstrap.pypa.io/get-pip.py | python3.8
 
     /io/tools/smoketest.sh "$wheelhouse"
 }
@@ -86,8 +78,7 @@ function run_all_with_docker {
 
     [[ ! -z $DOCKER_IMAGES ]] || \
         # LTS and stable release of popular Linux distros.
-        # We require >= Python 2.7 to be available (which rules out Centos 6.6)
-        DOCKER_IMAGES="ubuntu:14.04 ubuntu:16.04 ubuntu:18.04 ubuntu:20.04 centos:7 centos:8"
+        DOCKER_IMAGES="ubuntu:18.04 ubuntu:20.04"
 
 
     _wheels="$wheelhouse/*manylinux*.whl"
diff --git a/tools/wheels/build-wheels.bat b/tools/wheels/build-wheels.bat
index e81fb89..8c5ef3e 100644
--- a/tools/wheels/build-wheels.bat
+++ b/tools/wheels/build-wheels.bat
@@ -13,28 +13,24 @@ set WHEELHOUSE=%4
 if [%WHEELHOUSE%]==[] goto usage
 echo on
 
-set CIBW_BUILD=cp27-%BW_ARCH% cp36-%BW_ARCH% cp37-%BW_ARCH% cp38-%BW_ARCH% cp39-%BW_ARCH%
-set CIBW_BEFORE_BUILD=python -m pip install delvewheel==0.0.6
+set CIBW_BUILD=cp36-%BW_ARCH% cp37-%BW_ARCH% cp38-%BW_ARCH% cp39-%BW_ARCH% cp310-%BW_ARCH% cp311-%BW_ARCH%
+set CIBW_BEFORE_BUILD=python -m pip install delvewheel==1.1.4
 set CIBW_TEST_REQUIRES=-r tests/requirements.txt
 set CIBW_TEST_COMMAND=pytest {project}\tests\test_Producer.py
 rem set CIBW_BUILD_VERBOSITY=3
 set include=%cd%\%DEST%\build\native\include
-set lib=%cd%\%DEST%\build\native\lib\win\%ARCH%\win-%ARCH%-Release\v120
+set lib=%cd%\%DEST%\build\native\lib\win\%ARCH%\win-%ARCH%-Release\v142
 set DLL_DIR=%cd%\%DEST%\runtimes\win-%ARCH%\native
 set CIBW_REPAIR_WHEEL_COMMAND=python -m delvewheel repair --add-path %DLL_DIR% -w {dest_dir} {wheel}
 
 set PATH=%PATH%;c:\Program Files\Git\bin\
 
-python -m pip install cibuildwheel==1.11.0 || goto :error
-
-python -m cibuildwheel --output-dir %WHEELHOUSE% --platform windows || goto :error
-
-dir %WHEELHOUSE%
+python3 -m cibuildwheel --output-dir %WHEELHOUSE% --platform windows || goto :error
 
 goto :eof
 
 :usage
-@echo "Usage: %0 x86|x64 win32|win_amd64 wheelhouse-dir"
+@echo "Usage: %0 x86|x64 win32|win_amd64 librdkafka-dir wheelhouse-dir"
 exit /B 1
 
 :error
diff --git a/tools/wheels/build-wheels.sh b/tools/wheels/build-wheels.sh
index bc99fd2..162b67f 100755
--- a/tools/wheels/build-wheels.sh
+++ b/tools/wheels/build-wheels.sh
@@ -7,8 +7,8 @@
 this_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
 
 
-# Skip PyPy, old Python3 versions, and x86 builds.
-export CIBW_SKIP="pp* cp35-* *i686"
+# Skip PyPy, Python2, old Python3 versions, musl, and x86 builds.
+export CIBW_SKIP="pp* cp27-* cp35-* *i686 *musllinux* $CIBW_SKIP"
 # Run a simple test suite
 export CIBW_TEST_REQUIRES="-r tests/requirements.txt"
 export CIBW_TEST_COMMAND="pytest {project}/tests/test_Producer.py"
@@ -16,6 +16,7 @@ export CIBW_TEST_COMMAND="pytest {project}/tests/test_Producer.py"
 
 librdkafka_version=$1
 wheeldir=$2
+cibuildwheel_version="2.12.0"
 
 if [[ -z $wheeldir ]]; then
     echo "Usage: $0 <librdkafka-nuget-version> <wheeldir>"
@@ -26,19 +27,20 @@ set -ex
 
 [[ -d $wheeldir ]] || mkdir -p "$wheeldir"
 
+ARCH=${ARCH:-x64}
 
 case $OSTYPE in
     linux*)
         os=linux
         # Need to set up env vars (in docker) so that setup.py
         # finds librdkafka.
-        lib_dir=dest/runtimes/linux-x64/native
+        lib_dir=dest/runtimes/linux-$ARCH/native
         export CIBW_ENVIRONMENT="CFLAGS=-I\$PWD/dest/build/native/include LDFLAGS=-L\$PWD/$lib_dir LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:\$PWD/$lib_dir"
         ;;
     darwin*)
         os=macos
         # Need to set up env vars so that setup.py finds librdkafka.
-        lib_dir=dest/runtimes/osx-x64/native
+        lib_dir=dest/runtimes/osx-$ARCH/native
         export CFLAGS="-I${PWD}/dest/build/native/include"
         export LDFLAGS="-L${PWD}/$lib_dir"
         ;;
@@ -48,18 +50,21 @@ case $OSTYPE in
         ;;
 esac
 
-
 $this_dir/install-librdkafka.sh $librdkafka_version dest
 
-install_pkgs=cibuildwheel==1.11.0
+install_pkgs=cibuildwheel==$cibuildwheel_version
 
-python3 -m pip install $install_pkgs ||
-    pip3 install $install_pkgs
+python -m pip install ${PIP_INSTALL_OPTS} $install_pkgs ||
+    pip3 install ${PIP_INSTALL_OPTS} $install_pkgs
 
 if [[ -z $TRAVIS ]]; then
     cibw_args="--platform $os"
 fi
 
+if [[ $os == "macos" ]]; then
+    python3 $this_dir/install-macos-python-required-by-cibuildwheel.py $cibuildwheel_version
+fi
+
 LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/$lib_dir python3 -m cibuildwheel --output-dir $wheeldir $cibw_args
 
 ls $wheeldir
diff --git a/tools/wheels/install-librdkafka.sh b/tools/wheels/install-librdkafka.sh
index 1b8751c..03c7335 100755
--- a/tools/wheels/install-librdkafka.sh
+++ b/tools/wheels/install-librdkafka.sh
@@ -23,22 +23,27 @@ curl -L -o lrk$VER.zip https://www.nuget.org/api/v2/package/librdkafka.redist/$V
 
 unzip lrk$VER.zip
 
+ARCH=${ARCH:-x64}
 
 if [[ $OSTYPE == linux* ]]; then
     # Linux
 
     # Copy the librdkafka build with least dependencies to librdkafka.so.1
-    cp -v runtimes/linux-x64/native/{centos6-librdkafka.so,librdkafka.so.1}
-    ldd runtimes/linux-x64/native/librdkafka.so.1
+    if [[ $ARCH == arm64* ]]; then
+        cp -v runtimes/linux-$ARCH/native/{librdkafka.so,librdkafka.so.1}
+    else
+        cp -v runtimes/linux-$ARCH/native/{centos6-librdkafka.so,librdkafka.so.1}
+    fi
+    ldd runtimes/linux-$ARCH/native/librdkafka.so.1
 
 elif [[ $OSTYPE == darwin* ]]; then
     # MacOS X
 
     # Change the library's self-referencing name from
     # /Users/travis/.....somelocation/librdkafka.1.dylib to its local path.
-    install_name_tool -id $PWD/runtimes/osx-x64/native/librdkafka.dylib runtimes/osx-x64/native/librdkafka.dylib
+    install_name_tool -id $PWD/runtimes/osx-$ARCH/native/librdkafka.dylib runtimes/osx-$ARCH/native/librdkafka.dylib
 
-    otool -L runtimes/osx-x64/native/librdkafka.dylib
+    otool -L runtimes/osx-$ARCH/native/librdkafka.dylib
 fi
 
 popd
diff --git a/tools/wheels/install-macos-python-required-by-cibuildwheel.py b/tools/wheels/install-macos-python-required-by-cibuildwheel.py
new file mode 100644
index 0000000..2391e63
--- /dev/null
+++ b/tools/wheels/install-macos-python-required-by-cibuildwheel.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+#
+#
+# Get python versions required for cibuildwheel from their config and
+# install them. This implementation is based on cibuildwheel 2.12.0
+# version. Might need tweak if something changes in cibuildwheel.
+#
+# This was added as there is a permission issue when cibuildwheel
+# tries to install these versions on its own.
+#
+
+import platform
+import sys
+import os
+import tomli
+import urllib.request
+import re
+import shutil
+
+
+cibuildwheel_version = sys.argv[1]
+config_url = f"https://raw.githubusercontent.com/pypa/cibuildwheel/v{cibuildwheel_version}/cibuildwheel/resources/build-platforms.toml"
+print(f"Config URL is '{config_url}'")
+
+response = urllib.request.urlopen(config_url).read()
+
+content = response.decode('utf-8')
+d = tomli.loads(content)
+macos_config = d['macos']['python_configurations']
+
+machine_arc = platform.machine()
+print(f"Machine Architecture is '{machine_arc}'")
+machine_arc_regex_string = f".*{machine_arc}"
+machine_arc_regex = re.compile(machine_arc_regex_string)
+
+skip_versions = os.environ['CIBW_SKIP']
+print(f"Versions to skip are '{skip_versions}'")
+skip_versions_list = skip_versions.split()
+skip_versions_regex_string = ("|".join(skip_versions_list)).replace("*", ".*")
+skip_versions_regex = re.compile(skip_versions_regex_string)
+
+py_versions_info = []
+
+for py_version_config in macos_config:
+    identifier = py_version_config['identifier']
+    if not skip_versions_regex.match(identifier) and machine_arc_regex.match(identifier):
+        pkg_url = py_version_config['url']
+        py_versions_info.append((identifier, pkg_url))
+
+tmp_download_dir = "tmp_download_dir"
+tmp_pkg_file_name = "Package.pkg"
+this_file_path = os.getcwd()
+print(f"CWD is: '{this_file_path}'")
+tmp_download_dir_full_path = os.path.join(os.getcwd(), tmp_download_dir)
+tmp_pkg_file_full_path = os.path.join(tmp_download_dir_full_path, tmp_pkg_file_name)
+if os.path.exists(tmp_download_dir_full_path):
+    shutil.rmtree(tmp_download_dir_full_path)
+os.mkdir(tmp_download_dir)
+os.chdir(tmp_download_dir)
+install_command = f"sudo installer -pkg {tmp_pkg_file_name} -target /"
+
+for py_version_info in py_versions_info:
+    identifier = py_version_info[0]
+    pkg_url = py_version_info[1]
+    print(f"Installing '{identifier}' from '{pkg_url}'")
+    os.system(f"curl {pkg_url} --output {tmp_pkg_file_name}")
+    os.system(install_command)
+    os.remove(tmp_pkg_file_full_path)
+
+os.chdir(this_file_path)
+shutil.rmtree(tmp_download_dir_full_path)
diff --git a/tools/windows-copy-librdkafka.bat b/tools/windows-copy-librdkafka.bat
index 20deb28..4335a62 100644
--- a/tools/windows-copy-librdkafka.bat
+++ b/tools/windows-copy-librdkafka.bat
@@ -32,12 +32,12 @@ if exist %pypath64% (
 
 rem Copy x86 libs and dlls
 if exist %pypath% (
-	echo A | xcopy /F dest\librdkafka.redist.%librdkafka_version%\build\native\lib\win\x86\win-x86-Release\v120\librdkafka.lib %pypath%\libs\* || exit /b 1
+	echo A | xcopy /F dest\librdkafka.redist.%librdkafka_version%\build\native\lib\win\x86\win-x86-Release\v142\librdkafka.lib %pypath%\libs\* || exit /b 1
 	echo A | xcopy /I /F /S dest\librdkafka.redist.%librdkafka_version%\runtimes\win-x86\native\* %pypath%\libs || exit /b 1
 )
 
 rem Copy x64 libs and dlls
 if exist %pypath64% (
-	echo A | xcopy /F dest\librdkafka.redist.%librdkafka_version%\build\native\lib\win\x64\win-x64-Release\v120\librdkafka.lib %pypath64%\libs\* || exit /b 1
+	echo A | xcopy /F dest\librdkafka.redist.%librdkafka_version%\build\native\lib\win\x64\win-x64-Release\v142\librdkafka.lib %pypath64%\libs\* || exit /b 1
 	echo A | xcopy /I /F /S dest\librdkafka.redist.%librdkafka_version%\runtimes\win-x64\native\* %pypath64%\libs || exit /b 1
 )
diff --git a/tools/windows-install-librdkafka.bat b/tools/windows-install-librdkafka.bat
index 7405752..8322133 100644
--- a/tools/windows-install-librdkafka.bat
+++ b/tools/windows-install-librdkafka.bat
@@ -16,4 +16,3 @@ curl -s https://raw.githubusercontent.com/chemeris/msinttypes/master/stdint.h -o
 for %%V in (27, 35, 36, 37) do (
     call tools\windows-copy-librdkafka.bat %librdkafka_version% c:\Python%%~V || exit /b 1
 )
-
diff --git a/tox.ini b/tox.ini
index 48ef7b2..e2a1c68 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = flake8,py27,py36,py38
+envlist = flake8,py37,py38,py39,py310
 
 [testenv]
 setenv =

More details

Full run details

Historical runs