aioetcd3 version grpc aio

This commit is contained in:
Mickael BOURNEUF 2025-01-19 20:43:02 +01:00
commit a3e8dc2be6
45 changed files with 6254 additions and 0 deletions

8
.coveragerc Normal file
View File

@ -0,0 +1,8 @@
[run]
source = aioetcd3
omit = aioetcd3/_etcdv3/*
[report]
exclude_lines =
pragma: no cover
pass

104
.gitignore vendored Normal file
View File

@ -0,0 +1,104 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# dotenv
.env
# virtualenv
.venv
venv/
ENV/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
# idea
.idea/

29
.travis.yml Normal file
View File

@ -0,0 +1,29 @@
sudo: required
language: python
python:
- "3.6"
- "3.7"
- "3.8"
services:
- docker
install:
- pip install codecov
- pip install coverage
- pip install -r requirements.txt
- docker run -d -p 2379:2379 -p 2380:2380 --name etcd-v3.2 --volume=/tmp/etcd-data:/etcd-data gcr.io/etcd-development/etcd:v3.2 /usr/local/bin/etcd --name my-etcd-1 --data-dir /etcd-data --listen-client-urls http://0.0.0.0:2379 --advertise-client-urls http://0.0.0.0:2379 --listen-peer-urls http://0.0.0.0:2380 --initial-advertise-peer-urls http://0.0.0.0:2380 --initial-cluster my-etcd-1=http://0.0.0.0:2380 --initial-cluster-token 123456789 --initial-cluster-state new --auto-compaction-retention 1
- docker run -d -p 2378:2379 -p 2381:2380 --name etcd-v3.2-auth --volume=/tmp/etcd-data2:/etcd-data --volume=`pwd`/test/cfssl:/cfssl gcr.io/etcd-development/etcd:v3.2 /usr/local/bin/etcd --name my-etcd-1 --data-dir /etcd-data --listen-client-urls https://0.0.0.0:2379 --advertise-client-urls https://0.0.0.0:2378 --client-cert-auth --trusted-ca-file=/cfssl/ca.pem --cert-file=/cfssl/server.pem --key-file=/cfssl/server-key.pem
- docker run -d -p 2377:2379 -p 2382:2380 --name etcd-v3.2-ssl --volume=/tmp/etcd-data3:/etcd-data --volume=`pwd`/test/cfssl:/cfssl gcr.io/etcd-development/etcd:v3.2 /usr/local/bin/etcd --name my-etcd-2 --data-dir /etcd-data --listen-client-urls https://0.0.0.0:2379 --advertise-client-urls https://0.0.0.0:2377 --cert-file=/cfssl/server.pem --key-file=/cfssl/server-key.pem
script:
- pwd
- coverage run -m unittest discover -v
- coverage report
after_success:
- codecov

46
README.md Normal file
View File

@ -0,0 +1,46 @@
# aioetcd3
[![Build Status](https://travis-ci.org/gaopeiliang/aioetcd3.svg?branch=master)](https://travis-ci.org/gaopeiliang/aioetcd3)
[![Code Coverage](https://codecov.io/gh/gaopeiliang/aioetcd3/branch/master/graphs/badge.svg)](https://codecov.io/gh/gaopeiliang/aioetcd3)
## AsyncIO bindings for etcd V3
example:
```
from aioetcd3.client import client
from aioetcd3.help import range_all
from aioetcd3.kv import KV
from aioetcd3 import transaction
etcd_client = client(endpoints="127.0.0.1:2379")
await etcd_client.put('/foo', 'foo')
value, meta = await etcd_client.get('/foo')
value_list = await etcd_client.range(range_all())
await etcd_client.delete('/foo')
lease = await etcd_client.grant_lease(ttl=5)
await etcd_client.put('/foo1', 'foo', lease=lease)
is_success, response = await etcd_client.txn(compare=[
transaction.Value('/trans1') == b'trans1',
transaction.Value('/trans2') == b'trans2'
], success=[
KV.delete.txn('/trans1'),
KV.put.txn('/trans3', 'trans3', prev_kv=True)
], fail=[
KV.delete.txn('/trans1')
])
await self.client.user_add(username="test user", password='1234')
await self.client.role_add(name="test_role")
```
## Install
```
pip install aioetcd3
```

1
aioetcd3/__init__.py Normal file
View File

@ -0,0 +1 @@
from aioetcd3.help import *

View File

@ -0,0 +1,3 @@
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))

View File

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# NO CHECKED-IN PROTOBUF GENCODE
# source: auth.proto
# Protobuf Python Version: 5.29.0
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import runtime_version as _runtime_version
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
_runtime_version.ValidateProtobufRuntimeVersion(
_runtime_version.Domain.PUBLIC,
5,
29,
0,
'',
'auth.proto'
)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\nauth.proto\x12\x06\x61uthpb\"5\n\x04User\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\x10\n\x08password\x18\x02 \x01(\x0c\x12\r\n\x05roles\x18\x03 \x03(\t\"\x83\x01\n\nPermission\x12)\n\x08permType\x18\x01 \x01(\x0e\x32\x17.authpb.Permission.Type\x12\x0b\n\x03key\x18\x02 \x01(\x0c\x12\x11\n\trange_end\x18\x03 \x01(\x0c\"*\n\x04Type\x12\x08\n\x04READ\x10\x00\x12\t\n\x05WRITE\x10\x01\x12\r\n\tREADWRITE\x10\x02\"?\n\x04Role\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12)\n\rkeyPermission\x18\x02 \x03(\x0b\x32\x12.authpb.Permissionb\x06proto3')
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'auth_pb2', _globals)
if not _descriptor._USE_C_DESCRIPTORS:
DESCRIPTOR._loaded_options = None
_globals['_USER']._serialized_start=22
_globals['_USER']._serialized_end=75
_globals['_PERMISSION']._serialized_start=78
_globals['_PERMISSION']._serialized_end=209
_globals['_PERMISSION_TYPE']._serialized_start=167
_globals['_PERMISSION_TYPE']._serialized_end=209
_globals['_ROLE']._serialized_start=211
_globals['_ROLE']._serialized_end=274
# @@protoc_insertion_point(module_scope)

View File

@ -0,0 +1,24 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import warnings
GRPC_GENERATED_VERSION = '1.69.0'
GRPC_VERSION = grpc.__version__
_version_not_supported = False
try:
from grpc._utilities import first_version_is_lower
_version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
except ImportError:
_version_not_supported = True
if _version_not_supported:
raise RuntimeError(
f'The grpc package installed is at version {GRPC_VERSION},'
+ f' but the generated code in auth_pb2_grpc.py depends on'
+ f' grpcio>={GRPC_GENERATED_VERSION}.'
+ f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
)

View File

@ -0,0 +1,40 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# NO CHECKED-IN PROTOBUF GENCODE
# source: kv.proto
# Protobuf Python Version: 5.29.0
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import runtime_version as _runtime_version
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
_runtime_version.ValidateProtobufRuntimeVersion(
_runtime_version.Domain.PUBLIC,
5,
29,
0,
'',
'kv.proto'
)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x08kv.proto\x12\x06mvccpb\"u\n\x08KeyValue\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12\x17\n\x0f\x63reate_revision\x18\x02 \x01(\x03\x12\x14\n\x0cmod_revision\x18\x03 \x01(\x03\x12\x0f\n\x07version\x18\x04 \x01(\x03\x12\r\n\x05value\x18\x05 \x01(\x0c\x12\r\n\x05lease\x18\x06 \x01(\x03\"\x91\x01\n\x05\x45vent\x12%\n\x04type\x18\x01 \x01(\x0e\x32\x17.mvccpb.Event.EventType\x12\x1c\n\x02kv\x18\x02 \x01(\x0b\x32\x10.mvccpb.KeyValue\x12!\n\x07prev_kv\x18\x03 \x01(\x0b\x32\x10.mvccpb.KeyValue\" \n\tEventType\x12\x07\n\x03PUT\x10\x00\x12\n\n\x06\x44\x45LETE\x10\x01\x62\x06proto3')
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'kv_pb2', _globals)
if not _descriptor._USE_C_DESCRIPTORS:
DESCRIPTOR._loaded_options = None
_globals['_KEYVALUE']._serialized_start=20
_globals['_KEYVALUE']._serialized_end=137
_globals['_EVENT']._serialized_start=140
_globals['_EVENT']._serialized_end=285
_globals['_EVENT_EVENTTYPE']._serialized_start=253
_globals['_EVENT_EVENTTYPE']._serialized_end=285
# @@protoc_insertion_point(module_scope)

View File

@ -0,0 +1,24 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import warnings
GRPC_GENERATED_VERSION = '1.69.0'
GRPC_VERSION = grpc.__version__
_version_not_supported = False
try:
from grpc._utilities import first_version_is_lower
_version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
except ImportError:
_version_not_supported = True
if _version_not_supported:
raise RuntimeError(
f'The grpc package installed is at version {GRPC_VERSION},'
+ f' but the generated code in kv_pb2_grpc.py depends on'
+ f' grpcio>={GRPC_GENERATED_VERSION}.'
+ f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
)

226
aioetcd3/_etcdv3/rpc_pb2.py Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

118
aioetcd3/auth.py Normal file
View File

@ -0,0 +1,118 @@
import functools
from aioetcd3.base import StubMixin
from aioetcd3._etcdv3 import rpc_pb2 as rpc
from aioetcd3._etcdv3 import auth_pb2 as auth
from aioetcd3.utils import put_key_range
import aioetcd3._etcdv3.rpc_pb2_grpc as stub
def call_grpc(request, response_func, method, skip_auth=False):
def _f(f):
@functools.wraps(f)
async def call(self, *args, **kwargs):
r = await self.grpc_call(method(self), request(*args, **kwargs), skip_auth=skip_auth)
return response_func(r)
return call
return _f
class Auth(StubMixin):
def _update_channel(self, channel):
super()._update_channel(channel)
self._auth_stub = stub.AuthStub(channel)
@call_grpc(lambda: rpc.AuthEnableRequest(), lambda r: None, lambda s: s._auth_stub.AuthEnable)
async def auth_enable(self):
pass
@call_grpc(lambda: rpc.AuthDisableRequest(), lambda r: None, lambda s: s._auth_stub.AuthDisable)
async def auth_disable(self):
pass
# The method should be called without password authentication to avoid the infinite recursion
@call_grpc(lambda username, password: rpc.AuthenticateRequest(name=username, password=password),
lambda r: r.token, lambda s: s._auth_stub.Authenticate, skip_auth=True)
async def authenticate(self, username, password):
pass
@call_grpc(lambda: rpc.AuthUserListRequest(), lambda r: [u for u in r.users], lambda s: s._auth_stub.UserList)
async def user_list(self):
pass
@call_grpc(lambda username: rpc.AuthUserGetRequest(name=username), lambda r: [r for r in r.roles],
lambda s: s._auth_stub.UserGet)
async def user_get(self, username):
pass
@call_grpc(lambda username, password: rpc.AuthUserAddRequest(name=username, password=password), lambda r: None,
lambda s: s._auth_stub.UserAdd)
async def user_add(self, username, password):
pass
@call_grpc(lambda username: rpc.AuthUserDeleteRequest(name=username), lambda r: None,
lambda s: s._auth_stub.UserDelete)
async def user_delete(self, username):
pass
@call_grpc(lambda username, password: rpc.AuthUserChangePasswordRequest(name=username, password=password),
lambda r: None, lambda s: s._auth_stub.UserChangePassword)
async def user_change_password(self, username, password):
pass
@call_grpc(lambda username, role: rpc.AuthUserGrantRoleRequest(user=username, role=role), lambda r: None,
lambda s: s._auth_stub.UserGrantRole)
async def user_grant_role(self, username, role):
pass
@call_grpc(lambda username, role: rpc.AuthUserRevokeRoleRequest(name=username, role=role), lambda r: None,
lambda s: s._auth_stub.UserRevokeRole)
async def user_revoke_role(self, username, role):
pass
@call_grpc(lambda: rpc.AuthRoleListRequest(), lambda r: [role for role in r.roles],
lambda s: s._auth_stub.RoleList)
async def role_list(self):
pass
@call_grpc(lambda name: rpc.AuthRoleGetRequest(role=name), lambda r: [p for p in r.perm],
lambda s: s._auth_stub.RoleGet)
async def role_get(self, name):
pass
@call_grpc(lambda name: rpc.AuthRoleAddRequest(name=name), lambda r: None, lambda s: s._auth_stub.RoleAdd)
async def role_add(self, name):
pass
@call_grpc(lambda name: rpc.AuthRoleDeleteRequest(role=name), lambda r: None, lambda s: s._auth_stub.RoleDelete)
async def role_delete(self, name):
pass
@staticmethod
def role_grant_request(name, key_range, permission):
if permission not in [auth.Permission.READ, auth.Permission.WRITE, auth.Permission.READWRITE]:
raise ValueError("permission must be read, write or readwrite")
per = auth.Permission(permType=permission)
put_key_range(per, key_range)
request = rpc.AuthRoleGrantPermissionRequest(name=name, perm=per)
return request
@call_grpc(role_grant_request.__func__, lambda r: None, lambda s: s._auth_stub.RoleGrantPermission)
async def role_grant_permission(self, name, key_range, permission):
pass
@staticmethod
def role_revoke_request(name, key_range):
request = rpc.AuthRoleRevokePermissionRequest(role=name)
put_key_range(request, key_range)
return request
@call_grpc(role_revoke_request.__func__, lambda r: None, lambda s: s._auth_stub.RoleRevokePermission)
async def role_revoke_permission(self, name, key_range):
pass

91
aioetcd3/base.py Normal file
View File

@ -0,0 +1,91 @@
import asyncio
from grpc import (
metadata_call_credentials, AuthMetadataPlugin, RpcError, StatusCode
)
from .exceptions import AuthError, STATUS_MAP
_default_timeout = object()
class _EtcdTokenCallCredentials(AuthMetadataPlugin):
def __init__(self, access_token):
self._access_token = access_token
def __call__(self, context, callback):
metadata = (("token", self._access_token),)
callback(metadata, None)
class StubMixin(object):
def __init__(self, channel, timeout, username=None, password=None):
self.username = username
self.password = password
self.channel = channel
self.timeout = timeout
self._auth_lock = asyncio.Lock()
self.last_response_info = None
self._metadata = None
self._call_credentials = None
self._update_channel(channel)
async def _authenticate(self):
async with self._auth_lock: # Avoiding concurrent authentications for the client instance
if self._metadata is not None: # Avoiding double authentication
return
token = await self.authenticate(username=self.username, password=self.password)
self._metadata = (("token", token),)
self._call_credentials = metadata_call_credentials(_EtcdTokenCallCredentials(token))
def _update_channel(self, channel):
self.channel = channel
self._loop = channel._loop
def _update_cluster_info(self, header):
self.last_response_info = header
def get_cluster_info(self):
return self.last_response_info
async def _authenticate_if_needed(self, skip_auth=False):
if self.username is not None and self.password is not None and not skip_auth:
if self._metadata is None: # We need to call self._authenticate for the first rpc call only
try:
await self._authenticate()
except RpcError as exc:
if exc._state.code == StatusCode.INVALID_ARGUMENT:
raise AuthError(exc._state.details, exc._state.debug_error_string)
raise exc
async def grpc_call(self, stub_func, request, timeout=_default_timeout, skip_auth=False):
if timeout is _default_timeout:
timeout = self.timeout
# If the username and password are set, trying to call the auth.authenticate
# method to get the auth token. If the token already received - just use it.
await self._authenticate_if_needed(skip_auth=skip_auth)
try:
response = await stub_func(
request, timeout=timeout, credentials=self._call_credentials, metadata=self._metadata
)
except RpcError as exc:
_process_rpc_error(exc)
self._update_cluster_info(response.header)
return response
def _process_rpc_error(exc: RpcError):
"""Wraps grpc.RpcError to a specific library's exception.
If there is no specific exception found in the map, the original
exception will be raised
"""
try:
new_exc = STATUS_MAP.get(exc._state.code)
if new_exc is not None:
raise new_exc(exc._state.details, exc._state.debug_error_string)
except AttributeError:
pass
raise exc

125
aioetcd3/client.py Normal file
View File

@ -0,0 +1,125 @@
import os
import logging
from grpc import aio as grpc
from aioetcd3.kv import KV
from aioetcd3.lease import Lease
from aioetcd3.auth import Auth
from aioetcd3.watch import Watch
from aioetcd3.maintenance import Maintenance
from aioetcd3.cluster import Cluster
from aioetcd3.utils import get_secure_creds
logger = logging.getLogger(__name__)
class Client(KV, Lease, Auth, Watch, Maintenance, Cluster):
def __init__(self, endpoint, ssl=False,
ca_cert=None, cert_key=None, cert_cert=None,
default_ca=False, grpc_options=None, timeout=5,
username=None, password=None,
*, loop=None, executor=None):
channel = self._create_grpc_channel(endpoint=endpoint, ssl=ssl,
ca_cert=ca_cert,
cert_key=cert_key, cert_cert=cert_cert,
default_ca=default_ca,
options=grpc_options,
loop=loop,
executor=executor)
if cert_key and cert_cert and username and password:
logger.warning("Certificate and password authentication methods are used simultaneously")
super().__init__(channel, timeout, username=username, password=password)
def update_server_list(self, endpoint):
self.close()
channel = self._recreate_grpc_channel(endpoint)
self._update_channel(channel)
def _create_grpc_channel(self, endpoint, ssl=False,
ca_cert=None, cert_key=None, cert_cert=None, default_ca=False, options=None,
*, loop=None, executor=None):
credentials = None
if not ssl:
channel = grpc.insecure_channel(endpoint, options=options)
else:
if default_ca:
ca_cert = None
else:
if ca_cert is None:
logger.warning("Certificate authority is not specified. Empty CA will be used. To use system CA set"
" `default_ca=True`")
ca_cert = ''
# to ensure ssl connect , set grpc env
# os.environ['GRPC_SSL_CIPHER_SUITES'] = 'ECDHE-ECDSA-AES256-GCM-SHA384'
credentials = aiogrpc.ssl_channel_credentials(ca_cert, cert_key, cert_cert)
channel = aiogrpc.secure_channel(endpoint, credentials, options=options,
loop=loop, executor=executor,
standalone_pool_for_streaming=True)
# Save parameters for auto-recreate
self._credentials = credentials
self._options = options
self._loop = channel._loop
self._executor = executor
return channel
def _recreate_grpc_channel(self, endpoint):
self._call_credentials = None
self._metadata = None
if self._credentials:
channel = aiogrpc.secure_channel(endpoint, self._credentials, options=self._options,
loop=self._loop, executor=self._executor,
standalone_pool_for_streaming=True)
else:
channel = grpc.insecure_channel(endpoint, options=self._options)
return channel
def close(self):
return self.channel.close()
def client(endpoint, grpc_options=None, timeout=None, username=None, password=None):
# user `ip:port,ip:port` to user grpc balance
return Client(endpoint, grpc_options=grpc_options, username=username, password=password, timeout=timeout)
def ssl_client(endpoint, ca_file=None, cert_file=None, key_file=None, default_ca=False, grpc_options=None,
timeout=None, username=None, password=None):
ca, key, cert = get_secure_creds(ca_cert=ca_file, cert_cert=cert_file, cert_key=key_file)
return Client(endpoint, ssl=True, ca_cert=ca, cert_key=key, cert_cert=cert,
default_ca=default_ca, grpc_options=grpc_options, timeout=timeout,
username=username, password=password)
def set_grpc_cipher(enable_rsa=True, enable_ecdsa=True, ciphers=None):
"""
Set GRPC_SSL_CIPHER_SUITES environment variable to change the SSL cipher
used by GRPC. By default the GRPC C core only supports RSA.
:param enable_rsa: Enable RSA cipher
:param enable_ecdsa: Enable ECDSA cipher
:param ciphers: Override the cipher list to a list of strings
"""
if ciphers:
os.environ['GRPC_SSL_CIPHER_SUITES'] = ':'.join(ciphers)
else:
rsa_ciphers = 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-SHA256:' \
'ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-GCM-SHA384'
ecdsa_ciphers = 'ECDHE-ECDSA-AES256-GCM-SHA384'
if enable_rsa:
if enable_ecdsa:
env = rsa_ciphers + ':' + ecdsa_ciphers
else:
env = rsa_ciphers
else:
if enable_ecdsa:
env = ecdsa_ciphers
else:
env = None
if env is None:
if 'GRPC_SSL_CIPHER_SUITES' in os.environ:
del os.environ['GRPC_SSL_CIPHER_SUITES']
else:
os.environ['GRPC_SSL_CIPHER_SUITES'] = env

89
aioetcd3/cluster.py Normal file
View File

@ -0,0 +1,89 @@
import functools
import aiogrpc
import grpc
from aioetcd3._etcdv3 import rpc_pb2 as rpc
from aioetcd3.base import StubMixin
import aioetcd3._etcdv3.rpc_pb2_grpc as stub
from aioetcd3.utils import ipv4_endpoints
from aioetcd3.maintenance import Maintenance
def call_grpc(request, response_func, method):
def _f(f):
@functools.wraps(f)
async def call(self, *args, **kwargs):
r = await self.grpc_call(method(self), request(*args, **kwargs))
return response_func(r)
return call
return _f
class Cluster(StubMixin):
def _update_channel(self, channel):
super()._update_channel(channel)
self._cluster_stub = stub.ClusterStub(channel)
@call_grpc(lambda peerurls: rpc.MemberAddRequest(peerURLs=peerurls),
lambda r: r.member, lambda s: s._cluster_stub.MemberAdd)
async def member_add(self, peerurls):
pass
@call_grpc(lambda mid: rpc.MemberRemoveRequest(ID=mid),
lambda r: [m for m in r.members],
lambda s: s._cluster_stub.MemberRemove)
async def member_remove(self, mid):
pass
@call_grpc(lambda mid, urls: rpc.MemberUpdateRequest(ID=mid, peerURLs=urls),
lambda r: [m for m in r.members],
lambda s: s._cluster_stub.MemberUpdate)
async def member_update(self, mid, peerurls):
pass
@call_grpc(lambda: rpc.MemberListRequest(), lambda r: [m for m in r.members],
lambda s: s._cluster_stub.MemberList)
async def member_list(self):
pass
async def member_healthy(self, members=None):
if not members:
members = await self.member_list()
members = [m.clientURLs for m in members]
health_members = []
unhealth_members = []
for m in members:
m = [u.rpartition("//")[2] for u in m]
m = [u for u in m if u]
if m:
server_endpoint = ipv4_endpoints(m)
if self._credentials:
channel = aiogrpc.secure_channel(server_endpoint, self._credentials, options=self._options,
loop=self._loop, executor=self._executor,
standalone_pool_for_streaming=True)
else:
channel = aiogrpc.insecure_channel(server_endpoint, options=self._options, loop=self._loop,
executor=self._executor, standalone_pool_for_streaming=True)
try:
maintenance = Maintenance(channel=channel, timeout=2, username=self.username, password=self.password)
try:
await maintenance.status()
except grpc.RpcError:
unhealth_members.append(m)
else:
health_members.append(m)
finally:
await channel.close()
else:
unhealth_members.append(m)
return health_members, unhealth_members

41
aioetcd3/exceptions.py Normal file
View File

@ -0,0 +1,41 @@
# All of the custom errors are inherited from the grpc.RpcError
# for the backward compatibility
from grpc import RpcError, StatusCode
class EtcdError(RpcError):
code = StatusCode.UNKNOWN
def __init__(self, details, debug_info=None):
self.details = details
self.debug_info = debug_info
def __repr__(self):
return "`{}`: reason: `{}`".format(self.code, self.details)
class AuthError(EtcdError):
code = StatusCode.INVALID_ARGUMENT
class Unauthenticated(EtcdError):
code = StatusCode.UNAUTHENTICATED
class InvalidArgument(EtcdError):
code = StatusCode.INVALID_ARGUMENT
class PermissionDenied(EtcdError):
code = StatusCode.PERMISSION_DENIED
class FailedPrecondition(EtcdError):
code = StatusCode.FAILED_PRECONDITION
STATUS_MAP = {
StatusCode.UNAUTHENTICATED: Unauthenticated,
StatusCode.PERMISSION_DENIED: PermissionDenied,
StatusCode.FAILED_PRECONDITION: FailedPrecondition,
}

91
aioetcd3/help.py Normal file
View File

@ -0,0 +1,91 @@
from aioetcd3.utils import increment_last_byte, to_bytes, next_valid_key
from aioetcd3._etcdv3 import auth_pb2 as _auth
from aioetcd3._etcdv3 import rpc_pb2 as _rpc
SORT_ASCEND = 'ascend'
SORT_DESCEND = 'descend'
PER_R = _auth.Permission.READ
PER_W = _auth.Permission.WRITE
PER_RW = _auth.Permission.READWRITE
ALARM_ACTION_GET = _rpc.AlarmRequest.GET
ALARM_ACTION_ACTIVATE = _rpc.AlarmRequest.ACTIVATE
ALARM_ACTION_DEACTIVATE = _rpc.AlarmRequest.DEACTIVATE
ALARM_TYPE_NONE = _rpc.NONE
ALARM_TYPE_NOSPACE = _rpc.NOSPACE
def range_prefix(key):
if not key:
return range_all()
else:
return to_bytes(key), increment_last_byte(to_bytes(key))
def range_prefix_excluding(prefix, with_out):
"""
Return a list of key_range, union of which is a prefix range excluding some keys
:param prefix: the key to generate the range prefix
:param with_out: a list of key_range (key or (start,end) tuple)
:return: a list of key_range, union of which is a prefix range excluding some keys
"""
return range_excluding(range_prefix(prefix), with_out)
def range_excluding(range_, with_out):
"""
Return a list of key_range, union of which is a range excluding some keys
:param range_: the original range
:param with_out: a list of key_range (key or (start,end) tuple)
:return: a list of key_range, union of which is a prefix range excluding some keys
"""
# Merge with_out
with_out_ranges = [(to_bytes(v), next_valid_key(v)) if isinstance(v, str) or isinstance(v, bytes)
else (to_bytes(v[0]), to_bytes(v[1]))
for v in with_out]
with_out_ranges.sort()
range_start, range_end = range_
range_start = to_bytes(range_start)
range_end = to_bytes(range_end)
re_range = []
next_start_key = range_start
for s, e in with_out_ranges:
if s >= range_end != b'\x00':
break
start, end = next_start_key, s
if start < end:
re_range.append((start, end))
if e == b'\x00':
next_start_key = None
break
else:
next_start_key = max(next_start_key, e)
if next_start_key is not None and \
(next_start_key < range_end or
range_end == b'\x00'):
re_range.append((next_start_key, range_end))
return re_range
def range_greater(key):
return next_valid_key(key), b'\0'
def range_greater_equal(key):
return key, b'\0'
def range_less(key):
return b'\0', key
def range_less_equal(key):
return b'\0', next_valid_key(key)
def range_all():
return b'\0', b'\0'

257
aioetcd3/kv.py Normal file
View File

@ -0,0 +1,257 @@
from aioetcd3._etcdv3 import rpc_pb2 as rpc
from aioetcd3.utils import to_bytes, put_key_range
from aioetcd3.base import StubMixin, _default_timeout
from inspect import getcallargs
import functools
import aioetcd3._etcdv3.rpc_pb2_grpc as stub
class KVMetadata(object):
def __init__(self, keyvalue):
# self.key = keyvalue.key
self.create_revision = keyvalue.create_revision
self.mod_revision = keyvalue.mod_revision
self.version = keyvalue.version
self.lease_id = keyvalue.lease
_default = object()
_sort_order_dict = {"ascend": rpc.RangeRequest.ASCEND,
None: rpc.RangeRequest.NONE,
"descend": rpc.RangeRequest.DESCEND}
_sort_target_dict = {"key": rpc.RangeRequest.KEY,
None: rpc.RangeRequest.KEY,
'version': rpc.RangeRequest.VERSION,
'create': rpc.RangeRequest.CREATE,
'mod': rpc.RangeRequest.MOD,
'value': rpc.RangeRequest.VALUE}
def _get_grpc_args(func, *args, **kwargs):
params = getcallargs(func, None, *args, **kwargs)
params.pop('self')
params.pop('timeout')
return params
def _kv(request_builder, response_builder, method):
def _decorator(f):
def txn(*args, timeout=_default_timeout, **kwargs):
call_args = _get_grpc_args(f, *args, **kwargs)
return (request_builder(**call_args), response_builder(**call_args))
f.txn = txn
@functools.wraps(f)
async def grpc_func(self, *args, timeout=_default_timeout, **kwargs):
request, response = txn(*args, **kwargs)
return response(await self.grpc_call(method(self), request, timeout=timeout))
return grpc_func
return _decorator
def _create_txn_response_builder(success, fail, **kwargs):
def _response_builder(response):
if response.succeeded:
return True, [t[1](_get_op_response(r)) for t, r in zip(success, response.responses)]
else:
return False, [t[1](_get_op_response(r)) for t, r in zip(fail, response.responses)]
return _response_builder
def _range_request(key_range, sort_order=None, sort_target='key', **kwargs):
range_request = rpc.RangeRequest()
put_key_range(range_request, key_range)
for k, v in kwargs.items():
if v is not None:
setattr(range_request, k, v)
if sort_order in _sort_order_dict:
range_request.sort_order = _sort_order_dict[sort_order]
else:
raise ValueError('unknown sort order: "{}"'.format(sort_order))
if sort_target in _sort_target_dict:
range_request.sort_target=_sort_target_dict[sort_target]
else:
raise ValueError('sort_target must be one of "key", '
'"version", "create", "mod" or "value"')
return range_request
def _range_response(kv_response):
result = []
for kv in kv_response.kvs:
result.append((kv.key, kv.value, KVMetadata(kv)))
return result
def _static_builder(f):
def _builder(*args, **kwargs):
return f
return _builder
def _partial_builder(f):
def _builder(**kwargs):
return functools.partial(f, **kwargs)
return _builder
def _put_request(key, value, lease=None, prev_kv=False, ignore_value=False, ignore_lease=False):
if lease is None:
lease = 0
elif hasattr(lease, 'id'):
lease = lease.id
put_request = rpc.PutRequest(key=to_bytes(key),
prev_kv=prev_kv, ignore_value=ignore_value,
ignore_lease=ignore_lease)
if not ignore_value:
put_request.value = to_bytes(value)
if not ignore_lease:
put_request.lease = lease
return put_request
def _delete_request(key_range, prev_kv=False):
delete_request = rpc.DeleteRangeRequest(prev_kv=prev_kv)
put_key_range(delete_request, key_range)
return delete_request
def _get_response(response):
if response.kvs:
return response.kvs[0].value, KVMetadata(response.kvs[0])
else:
return None, None
def _range_keys_response(response):
result = []
for kv in response.kvs:
result.append((kv.key, KVMetadata(kv)))
return result
def _delete_response(response, prev_kv=False, **kwargs):
# when set prev_kv to return prev value,
# but it is not existed , response has no prev_kvs
if prev_kv:
r = []
for kv in response.prev_kvs:
r.append((kv.key, kv.value, KVMetadata(kv)))
return r
else:
return response.deleted
def _put_response(response, prev_kv=False, **kwargs):
# when set prev_kv to return prev value,
# but it is not existed , response has no prev_kv
if prev_kv and response.HasField('prev_kv'):
return response.prev_kv.value, KVMetadata(response.prev_kv)
else:
return None, None
def _create_op_request(request):
if isinstance(request, rpc.PutRequest):
return rpc.RequestOp(request_put=request)
elif isinstance(request, rpc.RangeRequest):
return rpc.RequestOp(request_range=request)
elif isinstance(request, rpc.DeleteRangeRequest):
return rpc.RequestOp(request_delete_range=request)
elif isinstance(request, rpc.TxnRequest):
return rpc.RequestOp(request_txn=request)
else:
raise TypeError("Unsupported request OP: " + repr(request))
def _get_op_response(response):
return getattr(response, response.WhichOneof('response'))
def _compare_request(compare, success, fail):
compare_message = [c.build_message() for c in compare]
success_message = [_create_op_request(request=r) for r, _ in success]
fail_message = [_create_op_request(request=r) for r, _ in fail]
request = rpc.TxnRequest(compare=compare_message, success=success_message, failure=fail_message)
return request
class KV(StubMixin):
def _update_channel(self, channel):
super()._update_channel(channel)
self._kv_stub = stub.KVStub(channel)
@_kv(_range_request, _static_builder(_range_response), lambda x: x._kv_stub.Range)
async def range(self, key_range, limit=None, revision=None, timeout=_default_timeout, sort_order=None, sort_target='key',
serializable=None, keys_only=None, count_only=None, min_mod_revision=None, max_mod_revision=None,
min_create_revision=None, max_create_revision=None):
# implemented in decorator
pass
@_kv(functools.partial(_range_request, count_only=True),
_static_builder(lambda r: r.count), lambda x: x._kv_stub.Range)
async def count(self, key_range, revision=None, timeout=_default_timeout, min_mod_revision=None,
max_mod_revision=None, min_create_revision=None, max_create_revision=None):
pass
@_kv(functools.partial(_range_request, keys_only=True), _static_builder(_range_keys_response),
lambda x: x._kv_stub.Range)
async def range_keys(self, key_range, limit=None, revison=None, sort_order=None,
sort_target='key', timeout=_default_timeout, serializable=None, count_only=None,
min_mod_revision=None, max_mod_revision=None, min_create_revision=None,
max_create_revision=None):
pass
@_kv(_range_request, _static_builder(_get_response), lambda x: x._kv_stub.Range)
async def get(self, key_range, revision=None, timeout=_default_timeout, serializable=None,
min_mod_revision=None, max_mod_revision=None, min_create_revision=None,
max_create_revision=None):
pass
@_kv(_put_request, _partial_builder(_put_response), lambda x: x._kv_stub.Put)
async def put(self, key, value, lease=0, prev_kv=False, timeout=_default_timeout, ignore_value=False, ignore_lease=False):
pass
@_kv(_delete_request, _partial_builder(_delete_response), lambda x: x._kv_stub.DeleteRange)
async def delete(self, key_range, timeout=_default_timeout, prev_kv=False):
pass
@_kv(functools.partial(_delete_request, prev_kv=True),
_partial_builder(functools.partial(_delete_response, prev_kv=True)),
lambda x: x._kv_stub.DeleteRange)
async def pop(self, key_range, timeout=_default_timeout):
pass
@_kv(_compare_request, _create_txn_response_builder, lambda x: x._kv_stub.Txn)
async def txn(self, compare, success, fail=[], *, timeout=_default_timeout):
pass
async def compact(self, revision, physical=False, *, timeout=_default_timeout):
"""
Compact etcd KV storage
:param revision: compact to specified revision
:param physical: return until data is physically compacted
:param timeout: maximum time to wait
"""
await self.grpc_call(self._kv_stub.Compact,
rpc.CompactionRequest(revision=revision,
physical=physical),
timeout=timeout)

114
aioetcd3/lease.py Normal file
View File

@ -0,0 +1,114 @@
from aioetcd3._etcdv3 import rpc_pb2 as rpc
from aioetcd3.base import StubMixin
import functools
import inspect
import asyncio
import aioetcd3._etcdv3.rpc_pb2_grpc as stub
def call_grpc(request, response_func, method):
def _f(f):
@functools.wraps(f)
async def call(self, *args, **kwargs):
params = inspect.getcallargs(f, self, *args, **kwargs)
params.pop('self')
r = await self.grpc_call(method(self), request(**params))
return response_func(r, client=self)
return call
return _f
class RLease(object):
def __init__(self, ttl, id, client):
self.ttl = ttl
self.id = id
self.client = client
async def __aenter__(self):
lease = await self.client.grant_lease(ttl=self.ttl)
self.ttl = lease.ttl
self.id = lease.id
refresh_ttl = self.ttl // 2
async def task(cycle):
while True:
await asyncio.sleep(cycle)
await self.refresh()
self.refresh_task = asyncio.ensure_future(task(refresh_ttl))
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, 'refresh_task'):
self.refresh_task.cancel()
await asyncio.wait([self.refresh_task])
await self.revoke()
async def revoke(self):
return await self.client.revoke_lease(self.id)
async def refresh(self):
return await self.client.refresh_lease(self.id)
async def info(self):
return await self.client.get_lease_info(self.id)
class Lease(StubMixin):
def _update_channel(self, channel):
super()._update_channel(channel)
self._lease_stub = stub.LeaseStub(channel)
@call_grpc(lambda ttl, id: rpc.LeaseGrantRequest(TTL=ttl, ID=id),
lambda r, client: RLease(r.TTL, r.ID, client),
lambda s: s._lease_stub.LeaseGrant)
async def grant_lease(self, ttl, id=0):
pass
def grant_lease_scope(self, ttl, id=0):
return RLease(ttl, id, self)
@call_grpc(lambda lease: rpc.LeaseRevokeRequest(ID=get_lease_id(lease)),
lambda r, client: None, lambda s: s._lease_stub.LeaseRevoke)
async def revoke_lease(self, lease):
pass
async def refresh_lease(self, lease):
lease_id = get_lease_id(lease)
lease_request = rpc.LeaseKeepAliveRequest(ID=lease_id)
async def generate_request(request):
for re in [request]:
yield re
await self._authenticate_if_needed()
new_lease = None
async for r in self._lease_stub.LeaseKeepAlive(
generate_request(lease_request),
credentials=self._call_credentials,
metadata=self._metadata
):
self._update_cluster_info(r.header)
new_lease = RLease(r.TTL, r.ID, self)
return new_lease
@call_grpc(lambda lease: rpc.LeaseTimeToLiveRequest(ID=get_lease_id(lease), keys=True),
lambda r, client: (RLease(r.TTL, r.ID, client), [k for k in r.keys]) if r.TTL >= 0 else (None, []),
lambda s: s._lease_stub.LeaseTimeToLive)
async def get_lease_info(self, lease):
pass
def get_lease_id(lease):
if hasattr(lease, 'id'):
return lease.id
else:
return lease

58
aioetcd3/maintenance.py Normal file
View File

@ -0,0 +1,58 @@
import functools
from aioetcd3._etcdv3 import rpc_pb2 as rpc
from aioetcd3.base import StubMixin
import aioetcd3._etcdv3.rpc_pb2_grpc as stub
class Status(object):
def __init__(self, status):
self.version = status.version
self.dbSize = status.dbSize
self.leader = status.leader
self.raftIndex = status.raftIndex
self.raftTerm = status.raftTerm
def call_grpc(request, response_func, method):
def _f(f):
@functools.wraps(f)
async def call(self, *args, **kwargs):
r = await self.grpc_call(method(self), request(*args, **kwargs))
return response_func(r)
return call
return _f
class Maintenance(StubMixin):
def _update_channel(self, channel):
super()._update_channel(channel)
self._maintenance_stub = stub.MaintenanceStub(channel)
@call_grpc(lambda: rpc.StatusRequest(), lambda r: Status(r),
lambda s: s._maintenance_stub.Status)
async def status(self):
pass
@call_grpc(lambda action, type, mid: rpc.AlarmRequest(action=action, memberID=mid, alarm=type),
lambda r: [m for m in r.alarms], lambda s: s._maintenance_stub.Alarm)
async def alarm(self, action, type, mid=0):
pass
@call_grpc(lambda: rpc.SnapshotRequest(), lambda r: (r.remaining_bytes, r.blob),
lambda s: s._maintenance_stub.Snapshot)
async def snapshot(self):
pass
@call_grpc(lambda: rpc.HashRequest(), lambda r: r.hash,
lambda s: s._maintenance_stub.Hash)
async def hash(self):
pass
@call_grpc(lambda: rpc.DefragmentRequest(), lambda r: None,
lambda s: s._maintenance_stub.Defragment)
async def defragment(self):
pass

19
aioetcd3/produce.sh Normal file
View File

@ -0,0 +1,19 @@
#!/bin/bash
sed -i -e '/gogoproto/d' protos/rpc.proto
sed -i -e 's/etcd\/mvcc\/mvccpb\/kv.proto/kv.proto/g' protos/rpc.proto
sed -i -e 's/etcd\/auth\/authpb\/auth.proto/auth.proto/g' protos/rpc.proto
sed -i -e '/google\/api\/annotations.proto/d' protos/rpc.proto
sed -i -e '/option (google.api.http)/,+3d' protos/rpc.proto
sed -i -e '/gogoproto/d' protos/kv.proto
sed -i -e '/gogoproto/d' protos/auth.proto
python3 -m grpc.tools.protoc -Iprotos --python_out=rpc --grpc_python_out=rpc protos/rpc.proto protos/auth.proto protos/kv.proto
sed -i -e 's/import auth_pb2/from aioetcd3.rpc import auth_pb2/g' rpc/rpc_pb2.py
sed -i -e 's/import kv_pb2/from aioetcd3.rpc import kv_pb2/g' rpc/rpc_pb2.py

View File

@ -0,0 +1,31 @@
syntax = "proto3";
package authpb;
// User is a single entry in the bucket authUsers
message User {
bytes name = 1;
bytes password = 2;
repeated string roles = 3;
}
// Permission is a single entity
message Permission {
enum Type {
READ = 0;
WRITE = 1;
READWRITE = 2;
}
Type permType = 1;
bytes key = 2;
bytes range_end = 3;
}
// Role is a single entry in the bucket authRoles
message Role {
bytes name = 1;
repeated Permission keyPermission = 2;
}

43
aioetcd3/protos/kv.proto Normal file
View File

@ -0,0 +1,43 @@
syntax = "proto3";
package mvccpb;
message KeyValue {
// key is the key in bytes. An empty key is not allowed.
bytes key = 1;
// create_revision is the revision of last creation on this key.
int64 create_revision = 2;
// mod_revision is the revision of last modification on this key.
int64 mod_revision = 3;
// version is the version of the key. A deletion resets
// the version to zero and any modification of the key
// increases its version.
int64 version = 4;
// value is the value held by the key, in bytes.
bytes value = 5;
// lease is the ID of the lease that attached to key.
// When the attached lease expires, the key will be deleted.
// If lease is 0, then no lease is attached to the key.
int64 lease = 6;
}
message Event {
enum EventType {
PUT = 0;
DELETE = 1;
}
// type is the kind of event. If type is a PUT, it indicates
// new data has been stored to the key. If type is a DELETE,
// it indicates the key was deleted.
EventType type = 1;
// kv holds the KeyValue for the event.
// A PUT event contains current kv pair.
// A PUT event with kv.Version=1 indicates the creation of a key.
// A DELETE/EXPIRE event contains the deleted key with
// its modification revision set to the revision of deletion.
KeyValue kv = 2;
// prev_kv holds the key-value pair before the event happens.
KeyValue prev_kv = 3;
}

881
aioetcd3/protos/rpc.proto Normal file
View File

@ -0,0 +1,881 @@
syntax = "proto3";
package etcdserverpb;
import "kv.proto";
import "auth.proto";
// for grpc-gateway
service KV {
// Range gets the keys in the range from the key-value store.
rpc Range(RangeRequest) returns (RangeResponse) {
}
// Put puts the given key into the key-value store.
// A put request increments the revision of the key-value store
// and generates one event in the event history.
rpc Put(PutRequest) returns (PutResponse) {
}
// DeleteRange deletes the given range from the key-value store.
// A delete request increments the revision of the key-value store
// and generates a delete event in the event history for every deleted key.
rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
}
// Txn processes multiple requests in a single transaction.
// A txn request increments the revision of the key-value store
// and generates events with the same revision for every completed request.
// It is not allowed to modify the same key several times within one txn.
rpc Txn(TxnRequest) returns (TxnResponse) {
}
// Compact compacts the event history in the etcd key-value store. The key-value
// store should be periodically compacted or the event history will continue to grow
// indefinitely.
rpc Compact(CompactionRequest) returns (CompactionResponse) {
}
}
service Watch {
// Watch watches for events happening or that have happened. Both input and output
// are streams; the input stream is for creating and canceling watchers and the output
// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
// for several watches at once. The entire event history can be watched starting from the
// last compaction revision.
rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
}
}
service Lease {
// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
// within a given time to live period. All keys attached to the lease will be expired and
// deleted if the lease expires. Each expired key generates a delete event in the event history.
rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
}
// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
}
// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
// to the server and streaming keep alive responses from the server to the client.
rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
}
// LeaseTimeToLive retrieves lease information.
rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
}
// TODO(xiangli) List all existing Leases?
}
service Cluster {
// MemberAdd adds a member into the cluster.
rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
}
// MemberRemove removes an existing member from the cluster.
rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
}
// MemberUpdate updates the member configuration.
rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
}
// MemberList lists all the members in the cluster.
rpc MemberList(MemberListRequest) returns (MemberListResponse) {
}
}
service Maintenance {
// Alarm activates, deactivates, and queries alarms regarding cluster health.
rpc Alarm(AlarmRequest) returns (AlarmResponse) {
}
// Status gets the status of the member.
rpc Status(StatusRequest) returns (StatusResponse) {
}
// Defragment defragments a member's backend database to recover storage space.
rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
}
// Hash computes the hash of the KV's backend.
// This is designed for testing; do not use this in production when there
// are ongoing transactions.
rpc Hash(HashRequest) returns (HashResponse) {
}
// HashKV computes the hash of all MVCC keys up to a given revision.
rpc HashKV(HashKVRequest) returns (HashKVResponse) {
}
// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
}
// MoveLeader requests current leader node to transfer its leadership to transferee.
rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
}
}
service Auth {
// AuthEnable enables authentication.
rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
}
// AuthDisable disables authentication.
rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
}
// Authenticate processes an authenticate request.
rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
}
// UserAdd adds a new user.
rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
}
// UserGet gets detailed user information.
rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
}
// UserList gets a list of all users.
rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
}
// UserDelete deletes a specified user.
rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
}
// UserChangePassword changes the password of a specified user.
rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
}
// UserGrant grants a role to a specified user.
rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
}
// UserRevokeRole revokes a role of specified user.
rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
}
// RoleAdd adds a new role.
rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
}
// RoleGet gets detailed role information.
rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
}
// RoleList gets lists of all roles.
rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
}
// RoleDelete deletes a specified role.
rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
}
// RoleGrantPermission grants a permission of a specified key or range to a specified role.
rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
}
// RoleRevokePermission revokes a key or range permission of a specified role.
rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
}
}
message ResponseHeader {
// cluster_id is the ID of the cluster which sent the response.
uint64 cluster_id = 1;
// member_id is the ID of the member which sent the response.
uint64 member_id = 2;
// revision is the key-value store revision when the request was applied.
int64 revision = 3;
// raft_term is the raft term when the request was applied.
uint64 raft_term = 4;
}
message RangeRequest {
enum SortOrder {
NONE = 0; // default, no sorting
ASCEND = 1; // lowest target value first
DESCEND = 2; // highest target value first
}
enum SortTarget {
KEY = 0;
VERSION = 1;
CREATE = 2;
MOD = 3;
VALUE = 4;
}
// key is the first key for the range. If range_end is not given, the request only looks up key.
bytes key = 1;
// range_end is the upper bound on the requested range [key, range_end).
// If range_end is '\0', the range is all keys >= key.
// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
// then the range request gets all keys prefixed with key.
// If both key and range_end are '\0', then the range request returns all keys.
bytes range_end = 2;
// limit is a limit on the number of keys returned for the request. When limit is set to 0,
// it is treated as no limit.
int64 limit = 3;
// revision is the point-in-time of the key-value store to use for the range.
// If revision is less or equal to zero, the range is over the newest key-value store.
// If the revision has been compacted, ErrCompacted is returned as a response.
int64 revision = 4;
// sort_order is the order for returned sorted results.
SortOrder sort_order = 5;
// sort_target is the key-value field to use for sorting.
SortTarget sort_target = 6;
// serializable sets the range request to use serializable member-local reads.
// Range requests are linearizable by default; linearizable requests have higher
// latency and lower throughput than serializable requests but reflect the current
// consensus of the cluster. For better performance, in exchange for possible stale reads,
// a serializable range request is served locally without needing to reach consensus
// with other nodes in the cluster.
bool serializable = 7;
// keys_only when set returns only the keys and not the values.
bool keys_only = 8;
// count_only when set returns only the count of the keys in the range.
bool count_only = 9;
// min_mod_revision is the lower bound for returned key mod revisions; all keys with
// lesser mod revisions will be filtered away.
int64 min_mod_revision = 10;
// max_mod_revision is the upper bound for returned key mod revisions; all keys with
// greater mod revisions will be filtered away.
int64 max_mod_revision = 11;
// min_create_revision is the lower bound for returned key create revisions; all keys with
// lesser create trevisions will be filtered away.
int64 min_create_revision = 12;
// max_create_revision is the upper bound for returned key create revisions; all keys with
// greater create revisions will be filtered away.
int64 max_create_revision = 13;
}
message RangeResponse {
ResponseHeader header = 1;
// kvs is the list of key-value pairs matched by the range request.
// kvs is empty when count is requested.
repeated mvccpb.KeyValue kvs = 2;
// more indicates if there are more keys to return in the requested range.
bool more = 3;
// count is set to the number of keys within the range when requested.
int64 count = 4;
}
message PutRequest {
// key is the key, in bytes, to put into the key-value store.
bytes key = 1;
// value is the value, in bytes, to associate with the key in the key-value store.
bytes value = 2;
// lease is the lease ID to associate with the key in the key-value store. A lease
// value of 0 indicates no lease.
int64 lease = 3;
// If prev_kv is set, etcd gets the previous key-value pair before changing it.
// The previous key-value pair will be returned in the put response.
bool prev_kv = 4;
// If ignore_value is set, etcd updates the key using its current value.
// Returns an error if the key does not exist.
bool ignore_value = 5;
// If ignore_lease is set, etcd updates the key using its current lease.
// Returns an error if the key does not exist.
bool ignore_lease = 6;
}
message PutResponse {
ResponseHeader header = 1;
// if prev_kv is set in the request, the previous key-value pair will be returned.
mvccpb.KeyValue prev_kv = 2;
}
message DeleteRangeRequest {
// key is the first key to delete in the range.
bytes key = 1;
// range_end is the key following the last key to delete for the range [key, range_end).
// If range_end is not given, the range is defined to contain only the key argument.
// If range_end is one bit larger than the given key, then the range is all the keys
// with the prefix (the given key).
// If range_end is '\0', the range is all keys greater than or equal to the key argument.
bytes range_end = 2;
// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
// The previous key-value pairs will be returned in the delete response.
bool prev_kv = 3;
}
message DeleteRangeResponse {
ResponseHeader header = 1;
// deleted is the number of keys deleted by the delete range request.
int64 deleted = 2;
// if prev_kv is set in the request, the previous key-value pairs will be returned.
repeated mvccpb.KeyValue prev_kvs = 3;
}
message RequestOp {
// request is a union of request types accepted by a transaction.
oneof request {
RangeRequest request_range = 1;
PutRequest request_put = 2;
DeleteRangeRequest request_delete_range = 3;
TxnRequest request_txn = 4;
}
}
message ResponseOp {
// response is a union of response types returned by a transaction.
oneof response {
RangeResponse response_range = 1;
PutResponse response_put = 2;
DeleteRangeResponse response_delete_range = 3;
TxnResponse response_txn = 4;
}
}
message Compare {
enum CompareResult {
EQUAL = 0;
GREATER = 1;
LESS = 2;
NOT_EQUAL = 3;
}
enum CompareTarget {
VERSION = 0;
CREATE = 1;
MOD = 2;
VALUE= 3;
LEASE = 4;
}
// result is logical comparison operation for this comparison.
CompareResult result = 1;
// target is the key-value field to inspect for the comparison.
CompareTarget target = 2;
// key is the subject key for the comparison operation.
bytes key = 3;
oneof target_union {
// version is the version of the given key
int64 version = 4;
// create_revision is the creation revision of the given key
int64 create_revision = 5;
// mod_revision is the last modified revision of the given key.
int64 mod_revision = 6;
// value is the value of the given key, in bytes.
bytes value = 7;
// lease is the lease id of the given key.
int64 lease = 8;
// leave room for more target_union field tags, jump to 64
}
// range_end compares the given target to all keys in the range [key, range_end).
// See RangeRequest for more details on key ranges.
bytes range_end = 64;
// TODO: fill out with most of the rest of RangeRequest fields when needed.
}
// From google paxosdb paper:
// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
// and consists of three components:
// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
// may apply to the same or different entries in the database. All tests in the guard are applied and
// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
// it executes f op (see item 3 below).
// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
// lookup operation, and applies to a single database entry. Two different operations in the list may apply
// to the same or different entries in the database. These operations are executed
// if guard evaluates to
// true.
// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
message TxnRequest {
// compare is a list of predicates representing a conjunction of terms.
// If the comparisons succeed, then the success requests will be processed in order,
// and the response will contain their respective responses in order.
// If the comparisons fail, then the failure requests will be processed in order,
// and the response will contain their respective responses in order.
repeated Compare compare = 1;
// success is a list of requests which will be applied when compare evaluates to true.
repeated RequestOp success = 2;
// failure is a list of requests which will be applied when compare evaluates to false.
repeated RequestOp failure = 3;
}
message TxnResponse {
ResponseHeader header = 1;
// succeeded is set to true if the compare evaluated to true or false otherwise.
bool succeeded = 2;
// responses is a list of responses corresponding to the results from applying
// success if succeeded is true or failure if succeeded is false.
repeated ResponseOp responses = 3;
}
// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
// with a revision less than the compaction revision will be removed.
message CompactionRequest {
// revision is the key-value store revision for the compaction operation.
int64 revision = 1;
// physical is set so the RPC will wait until the compaction is physically
// applied to the local database such that compacted entries are totally
// removed from the backend database.
bool physical = 2;
}
message CompactionResponse {
ResponseHeader header = 1;
}
message HashRequest {
}
message HashKVRequest {
// revision is the key-value store revision for the hash operation.
int64 revision = 1;
}
message HashKVResponse {
ResponseHeader header = 1;
// hash is the hash value computed from the responding member's MVCC keys up to a given revision.
uint32 hash = 2;
// compact_revision is the compacted revision of key-value store when hash begins.
int64 compact_revision = 3;
}
message HashResponse {
ResponseHeader header = 1;
// hash is the hash value computed from the responding member's KV's backend.
uint32 hash = 2;
}
message SnapshotRequest {
}
message SnapshotResponse {
// header has the current key-value store information. The first header in the snapshot
// stream indicates the point in time of the snapshot.
ResponseHeader header = 1;
// remaining_bytes is the number of blob bytes to be sent after this message
uint64 remaining_bytes = 2;
// blob contains the next chunk of the snapshot in the snapshot stream.
bytes blob = 3;
}
message WatchRequest {
// request_union is a request to either create a new watcher or cancel an existing watcher.
oneof request_union {
WatchCreateRequest create_request = 1;
WatchCancelRequest cancel_request = 2;
}
}
message WatchCreateRequest {
// key is the key to register for watching.
bytes key = 1;
// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
// only the key argument is watched. If range_end is equal to '\0', all keys greater than
// or equal to the key argument are watched.
// If the range_end is one bit larger than the given key,
// then all keys with the prefix (the given key) will be watched.
bytes range_end = 2;
// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
int64 start_revision = 3;
// progress_notify is set so that the etcd server will periodically send a WatchResponse with
// no events to the new watcher if there are no recent events. It is useful when clients
// wish to recover a disconnected watcher starting from a recent known revision.
// The etcd server may decide how often it will send notifications based on current load.
bool progress_notify = 4;
enum FilterType {
// filter out put event.
NOPUT = 0;
// filter out delete event.
NODELETE = 1;
}
// filters filter the events at server side before it sends back to the watcher.
repeated FilterType filters = 5;
// If prev_kv is set, created watcher gets the previous KV before the event happens.
// If the previous KV is already compacted, nothing will be returned.
bool prev_kv = 6;
}
message WatchCancelRequest {
// watch_id is the watcher id to cancel so that no more events are transmitted.
int64 watch_id = 1;
}
message WatchResponse {
ResponseHeader header = 1;
// watch_id is the ID of the watcher that corresponds to the response.
int64 watch_id = 2;
// created is set to true if the response is for a create watch request.
// The client should record the watch_id and expect to receive events for
// the created watcher from the same stream.
// All events sent to the created watcher will attach with the same watch_id.
bool created = 3;
// canceled is set to true if the response is for a cancel watch request.
// No further events will be sent to the canceled watcher.
bool canceled = 4;
// compact_revision is set to the minimum index if a watcher tries to watch
// at a compacted index.
//
// This happens when creating a watcher at a compacted revision or the watcher cannot
// catch up with the progress of the key-value store.
//
// The client should treat the watcher as canceled and should not try to create any
// watcher with the same start_revision again.
int64 compact_revision = 5;
// cancel_reason indicates the reason for canceling the watcher.
string cancel_reason = 6;
repeated mvccpb.Event events = 11;
}
message LeaseGrantRequest {
// TTL is the advisory time-to-live in seconds.
int64 TTL = 1;
// ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
int64 ID = 2;
}
message LeaseGrantResponse {
ResponseHeader header = 1;
// ID is the lease ID for the granted lease.
int64 ID = 2;
// TTL is the server chosen lease time-to-live in seconds.
int64 TTL = 3;
string error = 4;
}
message LeaseRevokeRequest {
// ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
int64 ID = 1;
}
message LeaseRevokeResponse {
ResponseHeader header = 1;
}
message LeaseKeepAliveRequest {
// ID is the lease ID for the lease to keep alive.
int64 ID = 1;
}
message LeaseKeepAliveResponse {
ResponseHeader header = 1;
// ID is the lease ID from the keep alive request.
int64 ID = 2;
// TTL is the new time-to-live for the lease.
int64 TTL = 3;
}
message LeaseTimeToLiveRequest {
// ID is the lease ID for the lease.
int64 ID = 1;
// keys is true to query all the keys attached to this lease.
bool keys = 2;
}
message LeaseTimeToLiveResponse {
ResponseHeader header = 1;
// ID is the lease ID from the keep alive request.
int64 ID = 2;
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
int64 TTL = 3;
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
int64 grantedTTL = 4;
// Keys is the list of keys attached to this lease.
repeated bytes keys = 5;
}
message Member {
// ID is the member ID for this member.
uint64 ID = 1;
// name is the human-readable name of the member. If the member is not started, the name will be an empty string.
string name = 2;
// peerURLs is the list of URLs the member exposes to the cluster for communication.
repeated string peerURLs = 3;
// clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
repeated string clientURLs = 4;
}
message MemberAddRequest {
// peerURLs is the list of URLs the added member will use to communicate with the cluster.
repeated string peerURLs = 1;
}
message MemberAddResponse {
ResponseHeader header = 1;
// member is the member information for the added member.
Member member = 2;
// members is a list of all members after adding the new member.
repeated Member members = 3;
}
message MemberRemoveRequest {
// ID is the member ID of the member to remove.
uint64 ID = 1;
}
message MemberRemoveResponse {
ResponseHeader header = 1;
// members is a list of all members after removing the member.
repeated Member members = 2;
}
message MemberUpdateRequest {
// ID is the member ID of the member to update.
uint64 ID = 1;
// peerURLs is the new list of URLs the member will use to communicate with the cluster.
repeated string peerURLs = 2;
}
message MemberUpdateResponse{
ResponseHeader header = 1;
// members is a list of all members after updating the member.
repeated Member members = 2;
}
message MemberListRequest {
}
message MemberListResponse {
ResponseHeader header = 1;
// members is a list of all members associated with the cluster.
repeated Member members = 2;
}
message DefragmentRequest {
}
message DefragmentResponse {
ResponseHeader header = 1;
}
message MoveLeaderRequest {
// targetID is the node ID for the new leader.
uint64 targetID = 1;
}
message MoveLeaderResponse {
ResponseHeader header = 1;
}
enum AlarmType {
NONE = 0; // default, used to query if any alarm is active
NOSPACE = 1; // space quota is exhausted
}
message AlarmRequest {
enum AlarmAction {
GET = 0;
ACTIVATE = 1;
DEACTIVATE = 2;
}
// action is the kind of alarm request to issue. The action
// may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
// raised alarm.
AlarmAction action = 1;
// memberID is the ID of the member associated with the alarm. If memberID is 0, the
// alarm request covers all members.
uint64 memberID = 2;
// alarm is the type of alarm to consider for this request.
AlarmType alarm = 3;
}
message AlarmMember {
// memberID is the ID of the member associated with the raised alarm.
uint64 memberID = 1;
// alarm is the type of alarm which has been raised.
AlarmType alarm = 2;
}
message AlarmResponse {
ResponseHeader header = 1;
// alarms is a list of alarms associated with the alarm request.
repeated AlarmMember alarms = 2;
}
message StatusRequest {
}
message StatusResponse {
ResponseHeader header = 1;
// version is the cluster protocol version used by the responding member.
string version = 2;
// dbSize is the size of the backend database, in bytes, of the responding member.
int64 dbSize = 3;
// leader is the member ID which the responding member believes is the current leader.
uint64 leader = 4;
// raftIndex is the current raft index of the responding member.
uint64 raftIndex = 5;
// raftTerm is the current raft term of the responding member.
uint64 raftTerm = 6;
}
message AuthEnableRequest {
}
message AuthDisableRequest {
}
message AuthenticateRequest {
string name = 1;
string password = 2;
}
message AuthUserAddRequest {
string name = 1;
string password = 2;
}
message AuthUserGetRequest {
string name = 1;
}
message AuthUserDeleteRequest {
// name is the name of the user to delete.
string name = 1;
}
message AuthUserChangePasswordRequest {
// name is the name of the user whose password is being changed.
string name = 1;
// password is the new password for the user.
string password = 2;
}
message AuthUserGrantRoleRequest {
// user is the name of the user which should be granted a given role.
string user = 1;
// role is the name of the role to grant to the user.
string role = 2;
}
message AuthUserRevokeRoleRequest {
string name = 1;
string role = 2;
}
message AuthRoleAddRequest {
// name is the name of the role to add to the authentication system.
string name = 1;
}
message AuthRoleGetRequest {
string role = 1;
}
message AuthUserListRequest {
}
message AuthRoleListRequest {
}
message AuthRoleDeleteRequest {
string role = 1;
}
message AuthRoleGrantPermissionRequest {
// name is the name of the role which will be granted the permission.
string name = 1;
// perm is the permission to grant to the role.
authpb.Permission perm = 2;
}
message AuthRoleRevokePermissionRequest {
string role = 1;
string key = 2;
string range_end = 3;
}
message AuthEnableResponse {
ResponseHeader header = 1;
}
message AuthDisableResponse {
ResponseHeader header = 1;
}
message AuthenticateResponse {
ResponseHeader header = 1;
// token is an authorized token that can be used in succeeding RPCs
string token = 2;
}
message AuthUserAddResponse {
ResponseHeader header = 1;
}
message AuthUserGetResponse {
ResponseHeader header = 1;
repeated string roles = 2;
}
message AuthUserDeleteResponse {
ResponseHeader header = 1;
}
message AuthUserChangePasswordResponse {
ResponseHeader header = 1;
}
message AuthUserGrantRoleResponse {
ResponseHeader header = 1;
}
message AuthUserRevokeRoleResponse {
ResponseHeader header = 1;
}
message AuthRoleAddResponse {
ResponseHeader header = 1;
}
message AuthRoleGetResponse {
ResponseHeader header = 1;
repeated authpb.Permission perm = 2;
}
message AuthRoleListResponse {
ResponseHeader header = 1;
repeated string roles = 2;
}
message AuthUserListResponse {
ResponseHeader header = 1;
repeated string users = 2;
}
message AuthRoleDeleteResponse {
ResponseHeader header = 1;
}
message AuthRoleGrantPermissionResponse {
ResponseHeader header = 1;
}
message AuthRoleRevokePermissionResponse {
ResponseHeader header = 1;
}

72
aioetcd3/transaction.py Normal file
View File

@ -0,0 +1,72 @@
from aioetcd3._etcdv3 import rpc_pb2 as rpc
from aioetcd3.utils import to_bytes
class BaseCompare(object):
def __init__(self, key):
self.key = key
self.value = None
self.op = None
# Version, Mod and Create can only be ints
def __eq__(self, other):
self.value = other
self.op = rpc.Compare.EQUAL
return self
def __ne__(self, other):
self.value = other
self.op = rpc.Compare.NOT_EQUAL
return self
def __lt__(self, other):
self.value = other
self.op = rpc.Compare.LESS
return self
def __gt__(self, other):
self.value = other
self.op = rpc.Compare.GREATER
return self
def __repr__(self):
return "{}: {} {} '{}'".format(self.__class__, self.key, self.op, self.value)
def build_message(self):
compare = rpc.Compare()
compare.key = to_bytes(self.key)
if self.op is None:
raise ValueError('op must be one of =, < or >')
compare.result = self.op
self.build_compare(compare)
return compare
def build_compare(self, compare):
raise NotImplementedError
class Value(BaseCompare):
def build_compare(self, compare):
compare.target = rpc.Compare.VALUE
compare.value = to_bytes(self.value)
class Version(BaseCompare):
def build_compare(self, compare):
compare.target = rpc.Compare.VERSION
compare.version = int(self.value)
class Create(BaseCompare):
def build_compare(self, compare):
compare.target = rpc.Compare.CREATE
compare.create_revision = int(self.value)
class Mod(BaseCompare):
def build_compare(self, compare):
compare.target = rpc.Compare.MOD
compare.mod_revision = int(self.value)

81
aioetcd3/utils.py Normal file
View File

@ -0,0 +1,81 @@
def to_bytes(maybe_bytestring):
"""
Encode string to bytes.
Convenience function to do a simple encode('utf-8') if the input is not
already bytes. Returns the data unmodified if the input is bytes.
"""
if isinstance(maybe_bytestring, bytes):
return maybe_bytestring
else:
return maybe_bytestring.encode('utf-8')
def increment_last_byte(byte_string):
s = bytearray(to_bytes(byte_string))
for i in range(len(s) - 1, -1, -1):
if s[i] < 0xff:
s[i] += 1
return bytes(s[:i+1])
else:
return b'\x00'
def next_valid_key(byte_string):
return to_bytes(byte_string) + b'\x00'
def put_key_range(obj, key_range):
if isinstance(key_range, str) or isinstance(key_range, bytes):
obj.key = to_bytes(key_range)
else:
try:
key, range_end = key_range
except Exception:
raise ValueError("key_range must be either a str/bytes 'key', or ('key', 'range_end') tuple")
obj.key = to_bytes(key)
obj.range_end = to_bytes(range_end)
return obj
def ipv4_endpoints(server_list):
return 'ipv4:///' + ','.join(
s + ':2379' if ':' not in s else s
for s in server_list
)
def ipv6_endpoints(server_list):
format_server_list = []
for ip_address in server_list:
if ip_address.startswith('['):
if ip_address.endswith(']'):
ip_address = ip_address + ':2379'
else:
if not ip_address.endswith(']'):
ip_address = '[' + ip_address + ']:2379'
format_server_list.append(ip_address)
return 'ipv6:///' + ','.join(format_server_list)
def dns_endpoint(dns_name):
return 'dns:///' + dns_name
def get_secure_creds(ca_cert, cert_key, cert_cert):
ca_cert_value = None
cert_key_value = None
cert_value = None
if ca_cert is not None:
with open(ca_cert, 'rb') as ca_cert_file:
ca_cert_value = ca_cert_file.read()
if cert_key is not None:
with open(cert_key, 'rb') as cert_key_file:
cert_key_value = cert_key_file.read()
if cert_cert is not None:
with open(cert_cert, 'rb') as cert_cert_file:
cert_value = cert_cert_file.read()
return ca_cert_value, cert_key_value, cert_value

560
aioetcd3/watch.py Normal file
View File

@ -0,0 +1,560 @@
import asyncio
from asyncio import CancelledError
from aioetcd3.base import StubMixin
from aioetcd3._etcdv3 import rpc_pb2 as rpc
from aioetcd3._etcdv3 import kv_pb2 as kv
from asyncio.queues import Queue, QueueEmpty, QueueFull
from aioetcd3.utils import put_key_range
from aioetcd3.kv import KVMetadata
import aioetcd3._etcdv3.rpc_pb2_grpc as stub
EVENT_TYPE_MODIFY = "MODIFY"
EVENT_TYPE_DELETE = "DELETE"
EVENT_TYPE_CREATE = "CREATE"
class Event(object):
def __init__(self, event, revision):
if event.type == kv.Event.PUT:
if event.kv.version == 1:
self.type = EVENT_TYPE_CREATE
else:
self.type = EVENT_TYPE_MODIFY
else:
self.type = EVENT_TYPE_DELETE
self.key = event.kv.key
self.value = event.kv.value
self.meta = KVMetadata(event.kv)
self.pre_value = event.prev_kv.value
self.pre_meta = KVMetadata(event.prev_kv)
self.revision = revision
def is_put(self):
return self.type == EVENT_TYPE_CREATE or self.type == EVENT_TYPE_MODIFY
def __str__(self):
return f'{self.type} {self.key},{self.value}'
class WatchScope(object):
def __init__(self, _iter):
self._iter = _iter
async def __aenter__(self):
await self._iter.__anext__()
return self._iter
async def __aexit__(self, exc_type, exc_val, exc_tb):
try:
await self._iter.aclose()
except GeneratorExit:
pass
class _Pipe(object):
"""
Selectable asyncio channel
"""
def __init__(self, maxsize=None, *, loop=None):
self._loop = loop
self._notify = asyncio.Event(loop=loop)
self._full_notify = asyncio.Event(loop=loop)
self._queue = []
self._full_notify.set()
if maxsize is None or maxsize <= 0:
self._maxsize = None
else:
self._maxsize = maxsize
self._last_watch_version = None
def is_empty(self):
return not self._notify.is_set()
def is_full(self):
return not self._full_notify.is_set()
async def put(self, value):
await self.wait_full()
self.put_nowait(value)
def put_nowait(self, value):
if self.is_full():
raise QueueFull
self._queue.append(value)
if self.is_empty():
self._notify.set()
if self._maxsize and len(self._queue) >= self._maxsize:
self._full_notify.clear()
async def get(self, value):
await self.wait_empty()
return self.get_nowait()
def get_nowait(self):
if self.is_empty():
raise QueueEmpty
if self._maxsize or len(self._queue) <= self._maxsize:
self._full_notify.set()
if len(self._queue) == 1:
self._notify.clear()
return self._queue.pop(0)
async def read(self, limit=None):
await self.wait_empty()
return self.read_nowait(limit)
def read_nowait(self, limit=None):
if self.is_empty():
raise QueueEmpty
if limit is None or limit <= 0:
read_size = len(self._queue)
else:
read_size = min(len(self._queue), limit)
result = self._queue[:read_size]
del self._queue[:read_size]
if not self._maxsize or len(self._queue) < self._maxsize:
self._full_notify.set()
if len(self._queue) == 0:
self._notify.clear()
return result
async def write(self, values):
await self.wait_full()
return self.write_nowait(values)
def write_nowait(self, values):
if self.is_full():
raise QueueFull
if self._maxsize is None:
write_size = len(values)
else:
write_size = min(len(values), self._maxsize - len(self._queue))
self._queue.extend(values[:write_size])
if len(self._queue) > 0:
self._notify.set()
if self._maxsize and len(self._queue) >= self._maxsize:
self._full_notify.clear()
return write_size
async def wait_full(self):
while self.is_full():
await self._full_notify.wait()
async def wait_empty(self):
while self.is_empty():
await self._notify.wait()
async def _select(pipes, futures, *, loop=None):
futures = [asyncio.ensure_future(f, loop=loop) for f in futures]
_, pending = await asyncio.wait([p.wait_empty() for p in pipes] + list(futures),
loop=loop, return_when=asyncio.FIRST_COMPLETED)
for p in pending:
if p not in futures:
p.cancel()
try:
await p
except asyncio.CancelledError:
pass
except Exception:
pass
return [p for p in pipes if not p.is_empty()], [f for f in futures if f.done()]
class WatchException(Exception):
def _clone(self):
return type(self)(*self.args)
class CompactRevisonException(WatchException):
def __init__(self, revision):
super().__init__(f"Watch on compact revision. Min revision is {revision}")
self.revision = revision
def _clone(self):
return CompactRevisonException(self.revision)
class ServerCancelException(WatchException):
def __init__(self, cancel_reason):
super().__init__(f"Watch cancelled: {cancel_reason}")
self.cancel_reason = cancel_reason
def _clone(self):
return ServerCancelException(self.cancel_reason)
class Watch(StubMixin):
def __init__(self, channel, timeout, username=None, password=None):
# Put (WatchCreateRequest, output_queue, done_future) to create a watch
self._create_request_queue = _Pipe(5, loop=self._loop)
# Put (output_queue, done_future) to cancel a watch
self._cancel_request_queue = _Pipe(loop=self._loop)
self._reconnect_event = asyncio.Event(loop=self._loop)
self._watch_task_running = None
super().__init__(channel, timeout, username=username, password=password)
async def _watch_task(self, reconnect_event):
# Queue for WatchRequest
async def input_iterator(input_queue):
while True:
n = await input_queue.get()
if n is None:
break
yield n
async def watch_call(input_queue, watch_stub, output_pipe):
await self._authenticate_if_needed()
async for r in watch_stub.Watch(
input_iterator(input_queue), credentials=self._call_credentials, metadata=self._metadata
):
await output_pipe.put(r)
last_received_revision = None
# watch_id -> revision
last_watches_revision = {}
# watch_id -> (WatchCreateRequest, output_queue)
registered_watches = {}
# output_queue -> watch_id
registered_queues = {}
# A tuple (WatchCreateRequest, output_queue, done_future, cancel_future)
pending_create_request = None
# watch_id -> done_future
pending_cancel_requests = {}
# output_queue -> (WatchCreateRequest, done_future)
restore_creates = {}
quitting = False
def _reconnect_revision(watch_id):
if last_received_revision is None:
return None
else:
if watch_id in last_watches_revision:
last_revision = last_watches_revision[watch_id]
return max(last_revision + 1, last_received_revision)
else:
return None
try:
while not quitting: # Auto reconnect when failed or channel updated
reconnect_event.clear()
output_pipe = _Pipe(loop=self._loop)
input_queue = asyncio.Queue(loop=self._loop)
call_task = asyncio.ensure_future(watch_call(input_queue, self._watch_stub, output_pipe), loop=self._loop)
try:
# Restore registered watches
for watch_id, (create_request, output_queue) in registered_watches.items():
if watch_id in pending_cancel_requests:
# Already cancelling
fut = pending_cancel_requests.pop(watch_id)
if not fut.done():
fut.set_result(True)
continue
r = rpc.WatchCreateRequest()
r.CopyFrom(create_request)
restore_revision = _reconnect_revision(watch_id)
if restore_revision is not None:
r.start_revision = restore_revision
restore_creates[output_queue] = (r, None)
registered_watches.clear()
registered_queues.clear()
# Restore pending cancels - should already be processed though
for watch_id, fut in pending_cancel_requests.items():
fut.set_result(True)
pending_cancel_requests.clear()
# Restore pending create request
if pending_create_request is not None:
if pending_create_request[3] is not None: # Cancelled
pending_create_request[1].put_nowait((False, None, None))
if pending_create_request[2] is not None and not pending_create_request[2].done():
pending_create_request[2].set_result(True)
if pending_create_request[3] is not None and not pending_create_request[3].done():
pending_create_request[3].set_result(True)
else:
restore_creates[pending_create_request[1]] = (pending_create_request[0], pending_create_request[2])
pending_create_request = None
while True:
if pending_create_request is None:
if restore_creates:
q, (req, fut) = restore_creates.popitem()
# Send create request
pending_create_request = (req, q, fut, None)
input_queue.put_nowait(rpc.WatchRequest(create_request=req))
if pending_create_request is None:
select_pipes = [output_pipe, self._create_request_queue, self._cancel_request_queue]
else:
select_pipes = [output_pipe, self._cancel_request_queue]
reconn_wait = asyncio.ensure_future(reconnect_event.wait(), loop=self._loop)
select_futs = [reconn_wait, call_task]
if not pending_create_request and not registered_watches and \
not restore_creates:
select_futs.append(asyncio.sleep(2, loop=self._loop))
pipes, _ = await _select(select_pipes,
select_futs,
loop=self._loop)
reconn_wait.cancel()
if not pipes and not reconnect_event.is_set() and not call_task.done():
# No watch, stop the task
quitting = True
break
# Process cancel requests first
if self._cancel_request_queue in pipes:
cancel_requests = self._cancel_request_queue.read_nowait()
for output_queue, done_fut in cancel_requests:
if output_queue in pending_cancel_requests:
# Chain this future
pending_cancel_requests[output_queue].add_done_callback(
lambda f, done_fut=done_fut: done_fut.set_result(True)
)
elif output_queue in restore_creates:
# Cancel a request which is not started
_, fut = restore_creates.pop(output_queue)
output_queue.put_nowait((False, None, None))
if fut is not None and not fut.done():
fut.set_result(True)
if done_fut is not None and not done_fut.done():
done_fut.set_result(True)
elif pending_create_request is not None and \
pending_create_request[1] == output_queue:
# Cancel the pending create watch
if pending_create_request[3] is None:
pending_create_request = pending_create_request[:3] + (done_fut,)
else:
pending_create_request[3].add_done_callback(
lambda f, done_fut=done_fut: done_fut.set_result(True))
else:
watch_id = registered_queues.get(output_queue)
if watch_id is None:
done_fut.set_result(True)
else:
# Send cancel request and save it to pending requests
input_queue.put_nowait(
rpc.WatchRequest(
cancel_request=
rpc.WatchCancelRequest(watch_id=watch_id)
)
)
pending_cancel_requests[watch_id] = done_fut
# Process received events
if output_pipe in pipes:
outputs = output_pipe.read_nowait()
for response in outputs:
if response.created:
assert pending_create_request is not None
if response.compact_revision > 0:
# Cancelled (Is it possible?)
exc = CompactRevisonException(response.compact_revision)
pending_create_request[1].put_nowait((False, exc, response.compact_revision))
if pending_create_request[2] is not None and not \
pending_create_request[2].done():
pending_create_request[2].set_exception(exc)
if pending_create_request[3] is not None and not \
pending_create_request[3].done():
pending_create_request[3].set_result(True)
else:
registered_watches[response.watch_id] = pending_create_request[0:2]
registered_queues[pending_create_request[1]] = response.watch_id
if pending_create_request[2] is not None and not \
pending_create_request[2].done():
pending_create_request[2].set_result(True)
if pending_create_request[3] is not None:
# Immediately cancel the watch
input_queue.put_nowait(
rpc.WatchRequest(
cancel_request=
rpc.WatchCancelRequest(watch_id=response.watch_id)
)
)
pending_cancel_requests[response.watch_id] = pending_create_request[3]
pending_create_request = None
if response.events:
last_received_revision = response.header.revision
last_watches_revision[response.watch_id] = last_received_revision
if response.watch_id in registered_watches:
_, output_queue = registered_watches[response.watch_id]
output_queue.put_nowait((True,
[Event(e, last_received_revision) for e in response.events],
last_received_revision))
if response.compact_revision > 0:
if response.watch_id in registered_watches:
_, output_queue = registered_watches.pop(response.watch_id)
exc = CompactRevisonException(response.compact_revision)
output_queue.put_nowait((False, exc, response.compact_revision))
del registered_queues[output_queue]
if response.watch_id in pending_cancel_requests:
if not pending_cancel_requests[response.watch_id].done():
pending_cancel_requests[response.watch_id].set_result(True)
del pending_cancel_requests[response.watch_id]
if response.canceled:
# Cancel response
if response.watch_id in registered_watches:
_, output_queue = registered_watches.pop(response.watch_id)
if response.watch_id in pending_cancel_requests:
# Normal cancel
output_queue.put_nowait((False, None, None))
else:
output_queue.put_nowait((False, ServerCancelException(response.cancel_reason), _reconnect_revision(response.watch_id)))
del registered_queues[output_queue]
if response.watch_id in pending_cancel_requests:
if not pending_cancel_requests[response.watch_id].done():
pending_cancel_requests[response.watch_id].set_result(True)
del pending_cancel_requests[response.watch_id]
if self._create_request_queue in pipes:
while pending_create_request is None and not self._create_request_queue.is_empty():
create_req, output_queue, done_fut = self._create_request_queue.get_nowait()
if done_fut.done():
# Ignore cancelled create requests
output_queue.put_nowait((False, None, None))
continue
# Send create request
pending_create_request = (create_req, output_queue, done_fut, None)
input_queue.put_nowait(rpc.WatchRequest(create_request=create_req))
if reconnect_event.is_set():
# Reconnected
break
if call_task.done():
# Maybe not available
if call_task.exception() is not None:
await call_task
else:
break
finally:
input_queue.put_nowait(None)
call_task.cancel()
if quitting:
self._watch_task_running = None
try:
await call_task
except asyncio.CancelledError:
pass
except Exception:
pass
except Exception as exc:
if registered_queues:
for q, watch_id in registered_queues.items():
q.put_nowait((False, exc, _reconnect_revision(watch_id)))
if pending_create_request is not None:
pending_create_request[1].put_nowait((False, exc, None))
if pending_create_request[2] is not None and not pending_create_request[2].done():
pending_create_request[2].set_exception(exc)
if pending_create_request[3] is not None and not pending_create_request[3].done():
pending_create_request[3].set_result(True)
if pending_cancel_requests:
for _, fut in pending_cancel_requests.items():
if not fut.done():
fut.set_result(True)
if restore_creates:
for q, (_, fut) in restore_creates.items():
if fut is not None and not fut.done():
fut.set_result(exc)
q.put_nowait((False, exc, _reconnect_revision(watch_id)))
if not self._create_request_queue.is_empty():
create_requests = self._create_request_queue.read_nowait()
for r in create_requests:
r[1].put_nowait((False, exc, None))
if r[2] is not None and not r[2].done():
r[2].set_exception(exc)
if not self._cancel_request_queue.is_empty():
cancel_requests = self._cancel_request_queue.read_nowait()
for _, fut in cancel_requests:
if fut is not None and not fut.done():
fut.set_result(True)
if exc is CancelledError:
raise
except asyncio.CancelledError:
raise
finally:
self._watch_task_running = None
def _update_channel(self, channel):
super()._update_channel(channel)
self._watch_stub = stub.WatchStub(channel)
self._reconnect_event.set()
def _ensure_watch_task(self):
if self._watch_task_running is None:
self._watch_task_running = asyncio.ensure_future(self._watch_task(self._reconnect_event))
async def watch(self, key_range, start_revision=None, noput=False, nodelete=False, prev_kv=False,
always_reconnect=False, ignore_compact=False, batch_events=False, create_event=False):
filters = []
if noput:
filters.append(rpc.WatchCreateRequest.NOPUT)
if nodelete:
filters.append(rpc.WatchCreateRequest.NODELETE)
reconnect_revision = start_revision
done_future = None
try:
while True:
watch_request = rpc.WatchCreateRequest(start_revision=reconnect_revision,
filters=filters,
prev_kv=prev_kv)
put_key_range(watch_request, key_range)
self._ensure_watch_task()
output_queue = asyncio.Queue(loop=self._loop)
done_future = self._loop.create_future()
await self._create_request_queue.put((watch_request, output_queue, done_future))
try:
await done_future
if create_event:
yield None
create_event = False
while True:
is_event, result, revision = await output_queue.get()
if not is_event:
if revision is not None:
reconnect_revision = revision
if result is None:
break
else:
# When an exception is raised in multiple positions
# the traceback will mix up, so clone the exception
# for each raise
if isinstance(result, WatchException):
raise result._clone() from result
else:
raise WatchException("Watch failed with server exception") from result
else:
reconnect_revision = revision + 1
if batch_events:
yield tuple(result)
else:
for e in result:
yield e
except CompactRevisonException:
if ignore_compact:
continue
else:
raise
except CancelledError:
raise
except Exception:
if always_reconnect:
continue
else:
raise
else:
break
finally:
if done_future is not None and not done_future.done():
done_future.cancel()
if self._watch_task_running:
done_future = self._loop.create_future()
await self._cancel_request_queue.put((output_queue, done_future))
if self._watch_task_running:
try:
await done_future
except Exception:
pass
def watch_scope(self, key_range, start_revision=None, noput=False, nodelete=False, prev_kv=False,
always_reconnect=False, ignore_compact=False, batch_events=False):
return WatchScope(self.watch(key_range, start_revision=start_revision,
noput=noput, nodelete=nodelete, prev_kv=prev_kv, create_event=True,
always_reconnect=always_reconnect, ignore_compact=ignore_compact,
batch_events=batch_events))

5
codecov.yml Normal file
View File

@ -0,0 +1,5 @@
coverage:
status:
project:
default:
threshold: 5

2
requirements.txt Normal file
View File

@ -0,0 +1,2 @@
aiogrpc>=1.4
protobuf

40
setup.py Normal file
View File

@ -0,0 +1,40 @@
#!/usr/bin/env python
from setuptools import setup, find_packages
version = "1.13"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
long_description = ""
setup(
name="aioetcd3",
version=version,
author="gaopeiliang",
author_email="964911957@qq.com",
long_description=long_description,
description="asyncio wrapper for etcd v3",
license="Apache",
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Framework :: AsyncIO',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
'Framework :: AsyncIO',
],
url="https://github.com/gaopeiliang/aioetcd3",
platforms=['any'],
packages=find_packages(),
python_requires='>=3.6',
install_requires=[
'aiogrpc>=1.4',
'protobuf'
]
)

0
test/__init__.py Normal file
View File

23
test/cfssl/ca.pem Normal file
View File

@ -0,0 +1,23 @@
-----BEGIN CERTIFICATE-----
MIID3jCCAsagAwIBAgIUfLIjRt7LfB0n9SVKLCfJR54lUdYwDQYJKoZIhvcNAQEL
BQAwdTELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNV
BAcTAkNBMRgwFgYDVQQKEw9NeSBDb21wYW55IE5hbWUxEzARBgNVBAsTCk9yZyBV
bml0IDIxEjAQBgNVBAMTCU15IG93biBDQTAeFw0xNzA4MDEwODM3MDBaFw0yMjA3
MzEwODM3MDBaMHUxCzAJBgNVBAYTAlVTMRYwFAYDVQQIEw1TYW4gRnJhbmNpc2Nv
MQswCQYDVQQHEwJDQTEYMBYGA1UEChMPTXkgQ29tcGFueSBOYW1lMRMwEQYDVQQL
EwpPcmcgVW5pdCAyMRIwEAYDVQQDEwlNeSBvd24gQ0EwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDb8Ec7DVjLw74wmnG6Ke0DBXBKRxy2MVdQNw6a6vQ1
UPXcjPjctPVoy1IrDK6f7CH3LGeiAv/g2zbgDQdRT/f3b986DxvBQMRj/rmRCsp4
pcd+Nt0LtKBmKJCA7kk+urx/gmAS/9wa7RcC9kRg3husihIpa02AEMtd759Czjgy
JHlCtlIoBSqxCqrEkKzc0Zw8SfDI7zKtOGlfA9bia6lx/y3TMvdCuPrDAvf1FSSj
ECdXL70jYSSgA40VvhBVF5Nom/gsJ+/DmrYNwsGiA4klFp4ip4eKIyBcyk/Ni1uG
wzQoLSaB0UqUCXKvWCimCzEnl/I0IJZ/TcrFmNoauGuxAgMBAAGjZjBkMA4GA1Ud
DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRsYfmUuh40
o+DpLTJauE06g098JzAfBgNVHSMEGDAWgBRsYfmUuh40o+DpLTJauE06g098JzAN
BgkqhkiG9w0BAQsFAAOCAQEAR/+35fG9b0Wt7jwoUF3L2A2OrZa36avb8ktkC/OO
qm7skzUBDb7iz+iznEDICgrvecjXwq+te5ob92H3weDs6YJz0+T8EXBnUtzN2+bu
eapdky5dZwweMqofr0FF2hLUVPWErgsZRj1gH1eLbFSirwtCbskmAzqK5TRKCtQL
cOZ/WlsgmCdETzHSLztdKKTau1l/qHJBdH7hIppG4iEISMueHlW+H9+yu9haKu2L
4J9feFOqC8G/aR+81og79WGwb2HJWgpw92ji8JxLvF5M1B++9AvwndkovkVgjFnk
JBDagrwsg/gr+FVi3uw5NnktLgtzzcMD0VWGCmAEn/R3Mg==
-----END CERTIFICATE-----

View File

@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIKIpaNwPGovuVxJM1GMGiJDHZJWaDzJyXeXPoIjGsJqfoAoGCCqGSM49
AwEHoUQDQgAEYwsP1I307m9u1wrsqjacF5xdSk67iifUGT/MqbSSBtGlRK05VDn2
87ghkIrsX1B7j/LJcUCDLnzmJVPjfa8lWg==
-----END EC PRIVATE KEY-----

View File

@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIMD5KcGoWPEneh+YMw36oJqC8wwJHyTlP1saFYuqUSKGoAoGCCqGSM49
AwEHoUQDQgAELMjGp4dyHA3vW3nU8XHh+JIT5B/bdIaSvVpJIoTgFPNoLpDspJmt
GAxStBczUE8rwmRLfNbk0aG8zn8EsZoqDA==
-----END EC PRIVATE KEY-----

View File

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIC/TCCAeWgAwIBAgIUD8TzummshnrEovvmr4wqLzawxlowDQYJKoZIhvcNAQEL
BQAwdTELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNV
BAcTAkNBMRgwFgYDVQQKEw9NeSBDb21wYW55IE5hbWUxEzARBgNVBAsTCk9yZyBV
bml0IDIxEjAQBgNVBAMTCU15IG93biBDQTAeFw0xNzA4MDEwOTU5MDBaFw0yMjA3
MzEwOTU5MDBaMEExCzAJBgNVBAYTAlVTMRYwFAYDVQQIEw1TYW4gRnJhbmNpc2Nv
MQswCQYDVQQHEwJDQTENMAsGA1UEAxMEcm9vdDBZMBMGByqGSM49AgEGCCqGSM49
AwEHA0IABCzIxqeHchwN71t51PFx4fiSE+Qf23SGkr1aSSKE4BTzaC6Q7KSZrRgM
UrQXM1BPK8JkS3zW5NGhvM5/BLGaKgyjgYMwgYAwDgYDVR0PAQH/BAQDAgWgMBMG
A1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFJZWw4xW
lf9oa/ioycvblk/fzktLMB8GA1UdIwQYMBaAFGxh+ZS6HjSj4OktMlq4TTqDT3wn
MAsGA1UdEQQEMAKCADANBgkqhkiG9w0BAQsFAAOCAQEAo4N4uDXl6nIG8ndrOeoe
S2JpPTU+gkaez2fs21DpuGO3SLSEnIYLcaY3p6sdjU2m0m2yGweLKLfVmQLzHO0R
4sZtKQFY1sklhCAhmiU5YZbb98gyXMfPVaFXCy5IWnajDsmhmh0G0UbVV/zaWJw+
B+yzGVvWMBI4htG9Zz59yIt4Fku2TgqDudiFEzm9OB9LykYS+oKKLqb2DlLmWSdu
NOr0j+sSwzTiGNSstb4jaXhbO2f80mykg4Rs5oFbiqYMH7qOfk5uR/uSomu+l03v
cv9t71iCZ1/ss+ZgfS24crnsAUqUZBRmPGu1lrXRaVNzkK9BGZ5XAHT2eBEQAlqp
OA==
-----END CERTIFICATE-----

19
test/cfssl/client.pem Normal file
View File

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIC/zCCAeegAwIBAgIUfo2E4cRuVDSGcUlQbc8VWD0EHZcwDQYJKoZIhvcNAQEL
BQAwdTELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNV
BAcTAkNBMRgwFgYDVQQKEw9NeSBDb21wYW55IE5hbWUxEzARBgNVBAsTCk9yZyBV
bml0IDIxEjAQBgNVBAMTCU15IG93biBDQTAeFw0xNzA4MDEwOTU4MDBaFw0yMjA3
MzEwOTU4MDBaMEMxCzAJBgNVBAYTAlVTMRYwFAYDVQQIEw1TYW4gRnJhbmNpc2Nv
MQswCQYDVQQHEwJDQTEPMA0GA1UEAxMGY2xpZW50MFkwEwYHKoZIzj0CAQYIKoZI
zj0DAQcDQgAEYwsP1I307m9u1wrsqjacF5xdSk67iifUGT/MqbSSBtGlRK05VDn2
87ghkIrsX1B7j/LJcUCDLnzmJVPjfa8lWqOBgzCBgDAOBgNVHQ8BAf8EBAMCBaAw
EwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUG6Sc
mNr0LfHR3Th2ER8mo8UFOSIwHwYDVR0jBBgwFoAUbGH5lLoeNKPg6S0yWrhNOoNP
fCcwCwYDVR0RBAQwAoIAMA0GCSqGSIb3DQEBCwUAA4IBAQAk5+gTElEnkaKRIuy2
Uf/8GRSFAmlCbuEVGGuQ4Iif1KWVt0sklUC4EpknJGCDBDfRlH/n/O0cIAhxHJVd
8HXRfYy+ynhr08gdE7lbueavEpvUb3QNFR8ZrODcqwvJgHyWsffk7f87hpsx9lr2
mfLvokau0UhpVlq+x7IQ5dKw/ZzKv/zjI/A2guwK1UWdk5vr0W7LE5XY4pa+9/Qy
y6FxHcKc4z8FJ6ClRGy/RGJrbg0VgCmrTMa7NCpIyZ/onn7RaxHatSn7cnQPVNKq
fBHHBVdl1LlsJSPRZar0zGkUS1UeCtcSW1aqSmkO39p18tJ9hDWM0xkY6FlI3A+4
shAQ
-----END CERTIFICATE-----

View File

@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIKiO/qjfQEIJS4OYW3rHUQodAEk2/PzNj+V6Oy/+JSnIoAoGCCqGSM49
AwEHoUQDQgAE2azbBMQaqhuTm5d+7rzIdqlXnBrv0LxwDLIQnUTosRYlrS19+gEg
AccoyJEyzGFzd3+Ot6OOX3nXUxDSVFzlLA==
-----END EC PRIVATE KEY-----

19
test/cfssl/server.pem Normal file
View File

@ -0,0 +1,19 @@
-----BEGIN CERTIFICATE-----
MIIDFjCCAf6gAwIBAgIUHeUTcT0g5zmaUzPKeqEi6AZMZ5wwDQYJKoZIhvcNAQEL
BQAwdTELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNV
BAcTAkNBMRgwFgYDVQQKEw9NeSBDb21wYW55IE5hbWUxEzARBgNVBAsTCk9yZyBV
bml0IDIxEjAQBgNVBAMTCU15IG93biBDQTAeFw0xNzA4MDIwODM4MDBaFw0yMjA4
MDEwODM4MDBaMEQxCzAJBgNVBAYTAlVTMRYwFAYDVQQIEw1TYW4gRnJhbmNpc2Nv
MQswCQYDVQQHEwJDQTEQMA4GA1UEAxMHbWVtYmVyMzBZMBMGByqGSM49AgEGCCqG
SM49AwEHA0IABNms2wTEGqobk5uXfu68yHapV5wa79C8cAyyEJ1E6LEWJa0tffoB
IAHHKMiRMsxhc3d/jrejjl9511MQ0lRc5SyjgZkwgZYwDgYDVR0PAQH/BAQDAgWg
MBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFKmG
Ok0gF06fFZlNPVR8QoF3i7/XMB8GA1UdIwQYMBaAFGxh+ZS6HjSj4OktMlq4TTqD
T3wnMCEGA1UdEQQaMBiHBH8AAAGHBGRJLgOHBGRJLgSHBGRJLgUwDQYJKoZIhvcN
AQELBQADggEBAMOUiyg+R6Qh6CFmNjxZJCk1hlRPrYucA/7phWRsqOI/PCFpak8d
LIOCQdPwTC20hqZxszN0GmJVdIZy1FEDa7yDZip2pYrmCR2ePAAwXtEhUAGBblIU
IAFiLtFps5GQqi3+f9SUto0HnriQiBt8fSf4KNeD84gkOt524qliAY2bIOdphJaY
Y+Qg6jkn8+lzoY+rD8tzOx37dgL+/V0dCyo7fBLnW6mOEdHk5sMpPPyWzmCYSdeb
SqNpp9+EDOVhz175CQuuq0WD+x3D210UIQsEFvyjg/W6Vinl9W/hOG6yxv01IVcM
yhngfepPKZ20b4njhHJv+geOn5yshCcTQjI=
-----END CERTIFICATE-----

279
test/test_auth.py Normal file
View File

@ -0,0 +1,279 @@
import unittest
import asyncio
import functools
from aioetcd3.client import client, ssl_client, set_grpc_cipher
from aioetcd3.help import range_all, PER_RW
from aioetcd3.exceptions import AuthError, Unauthenticated, PermissionDenied
from .utils import switch_auth_off, switch_auth_on
def asynctest(f):
@functools.wraps(f)
def _f(self):
asyncio.get_event_loop().run_until_complete(f(self))
return _f
TEST_USER_NAME = 'test'
TEST_USER_PASSWORD = "test"
TEST_ROLE_NAME = 'admin'
class AuthTest(unittest.TestCase):
@asynctest
async def setUp(self):
endpoints = "127.0.0.1:2379"
self.client = client(endpoint=endpoints)
set_grpc_cipher()
auth_etcd_url = "127.0.0.1:2378"
self.root_client = ssl_client(endpoint=auth_etcd_url, ca_file="test/cfssl/ca.pem",
cert_file="test/cfssl/client-root.pem",
key_file="test/cfssl/client-root-key.pem")
self.client_client = ssl_client(endpoint=auth_etcd_url, ca_file="test/cfssl/ca.pem",
cert_file="test/cfssl/client.pem",
key_file="test/cfssl/client-key.pem")
await self.cleanUp()
@asynctest
async def test_auth_1(self):
await self.client.user_add(username=TEST_USER_NAME, password='1234')
users = await self.client.user_list()
self.assertIn(TEST_USER_NAME, users)
roles = await self.client.user_get(username=TEST_USER_NAME)
self.assertEqual(len(roles), 0)
await self.client.user_change_password(username=TEST_USER_NAME, password=TEST_USER_PASSWORD)
await self.client.user_delete(username=TEST_USER_NAME)
@asynctest
async def test_auth_2(self):
await self.client.role_add(name=TEST_ROLE_NAME)
roles = await self.client.role_list()
self.assertIn(TEST_ROLE_NAME, roles)
role_info = await self.client.role_get(name=TEST_ROLE_NAME)
await self.client.role_delete(name=TEST_ROLE_NAME)
@asynctest
async def test_auth_3(self):
await self.client.user_add(username=TEST_USER_NAME, password=TEST_USER_PASSWORD)
with self.assertRaises(Exception):
await self.client.user_grant_role(username=TEST_USER_NAME, role=TEST_ROLE_NAME)
await self.client.role_add(name=TEST_ROLE_NAME)
await self.client.user_grant_role(username=TEST_USER_NAME, role=TEST_ROLE_NAME)
await self.client.role_grant_permission(name=TEST_ROLE_NAME,
key_range=range_all(),
permission=PER_RW)
await self.client.user_revoke_role(username=TEST_USER_NAME, role=TEST_ROLE_NAME)
await self.client.role_revoke_permission(name=TEST_ROLE_NAME,
key_range=range_all())
@asynctest
async def test_auth_4(self):
await self.root_client.user_add(username='root', password='root')
await self.root_client.role_add(name='root')
await self.root_client.user_grant_role(username='root', role='root')
await self.root_client.auth_enable()
await self.root_client.user_add(username='client', password='client')
await self.root_client.role_add(name='client')
await self.root_client.put('/foo', '/foo')
value, meta = await self.root_client.get('/foo')
self.assertEqual(value, b'/foo')
with self.assertRaises(Exception):
await self.client_client.get('/foo')
await self.root_client.role_grant_permission(name='client', key_range='/foo', permission=PER_RW)
await self.root_client.user_grant_role(username='client', role='client')
value, meta = await self.client_client.get('/foo')
self.assertEqual(value, b'/foo')
await self.client_client.put('/foo', 'ssss')
async def delete_all_user(self):
users = await self.client.user_list()
for u in users:
await self.client.user_delete(username=u)
users = await self.root_client.user_list()
for u in users:
await self.root_client.user_delete(username=u)
async def delete_all_role(self):
roles = await self.client.role_list()
for r in roles:
await self.client.role_delete(name=r)
roles = await self.root_client.role_list()
for r in roles:
await self.root_client.role_delete(name=r)
async def cleanUp(self):
await self.client.delete(range_all())
await self.root_client.auth_disable()
await self.delete_all_user()
await self.delete_all_role()
@asynctest
async def tearDown(self):
await self.cleanUp()
await self.client.close()
class PasswordAuthTest(unittest.TestCase):
@asynctest
async def setUp(self):
self.endpoints = "127.0.0.1:2379"
self.unauthenticated_client = client(endpoint=self.endpoints)
await self.cleanUp()
await switch_auth_on(self.unauthenticated_client)
self.client_client = client(
endpoint=self.endpoints, username="client", password="client"
)
self.root_client = client(endpoint=self.endpoints, username="root", password="root")
async def create_kv_for_test(self):
await self.root_client.put('/foo', '/foo')
value, meta = await self.root_client.get('/foo')
self.assertEqual(value, b'/foo')
@asynctest
async def test_auth_1(self):
await self.create_kv_for_test()
with self.assertRaises(PermissionDenied):
await self.client_client.get('/foo')
await self.root_client.role_grant_permission(name='client', key_range='/foo', permission=PER_RW)
value, meta = await self.client_client.get('/foo')
self.assertEqual(value, b'/foo')
await self.client_client.put('/foo', 'ssss')
@asynctest
async def test_wrong_password(self):
wrong_password_client = client(endpoint=self.endpoints, username="client", password="wrong_password")
with self.assertRaises(AuthError) as exc:
await wrong_password_client.get("/foo")
assert repr(exc.exception) == "`{}`: reason: `{}`".format(exc.exception.code, exc.exception.details)
@asynctest
async def test_wrong_token(self):
await self.create_kv_for_test()
await self.root_client.role_grant_permission(name='client', key_range='/foo', permission=PER_RW)
new_client = client(endpoint=self.endpoints, username="client", password="client")
value, meta = await self.client_client.get('/foo')
self.assertEqual(value, b'/foo')
# Put invalid token
new_client._metadata = (("token", "invalid_token"),)
with self.assertRaises(Unauthenticated) as exc:
await new_client.get("/foo")
async def cleanUp(self):
await self.unauthenticated_client.delete(range_all())
@asynctest
async def tearDown(self):
await switch_auth_off(self.root_client, self.unauthenticated_client)
await self.cleanUp()
class PasswordAuthWithSslTest(unittest.TestCase):
@asynctest
async def setUp(self):
self.endpoints = "127.0.0.1:2377"
self.unauthenticated_client = ssl_client(
endpoint=self.endpoints,
ca_file="test/cfssl/ca.pem",
)
await self.cleanUp()
await switch_auth_on(self.unauthenticated_client)
self.root_client = ssl_client(endpoint=self.endpoints, ca_file="test/cfssl/ca.pem",
username="root", password="root")
self.client_client = ssl_client(endpoint=self.endpoints, ca_file="test/cfssl/ca.pem",
username="client", password="client")
async def create_kv_for_test(self):
await self.root_client.put('/foo', '/foo')
value, meta = await self.root_client.get('/foo')
self.assertEqual(value, b'/foo')
@asynctest
async def test_auth_1(self):
await self.create_kv_for_test()
with self.assertRaises(PermissionDenied):
await self.client_client.get('/foo')
await self.root_client.role_grant_permission(name='client', key_range='/foo', permission=PER_RW)
value, meta = await self.client_client.get('/foo')
self.assertEqual(value, b'/foo')
await self.client_client.put('/foo', 'ssss')
@asynctest
async def test_wrong_password(self):
wrong_password_client = ssl_client(
endpoint=self.endpoints, ca_file="test/cfssl/ca.pem",
username="client", password="wrong_password"
)
with self.assertRaises(AuthError) as exc:
await wrong_password_client.get("/foo")
assert repr(exc.exception) == "`{}`: reason: `{}`".format(exc.exception.code, exc.exception.details)
@asynctest
async def test_wrong_token(self):
await self.create_kv_for_test()
await self.root_client.role_grant_permission(name='client', key_range='/foo', permission=PER_RW)
new_client = ssl_client(
endpoint=self.endpoints, ca_file="test/cfssl/ca.pem",
username="root", password="root"
)
value, meta = await new_client.get('/foo')
self.assertEqual(value, b'/foo')
# Put invalid token
new_client._metadata = (("token", "invalid_token"),)
with self.assertRaises(Unauthenticated) as exc:
await new_client.get("/foo")
async def cleanUp(self):
await self.unauthenticated_client.delete(range_all())
@asynctest
async def tearDown(self):
await switch_auth_off(self.root_client, self.unauthenticated_client)
await self.cleanUp()

41
test/test_cluster.py Normal file
View File

@ -0,0 +1,41 @@
import unittest
import asyncio
import functools
from aioetcd3.client import client
from aioetcd3.help import range_all
def asynctest(f):
@functools.wraps(f)
def _f(self):
asyncio.get_event_loop().run_until_complete(f(self))
return _f
class ClusterTest(unittest.TestCase):
def setUp(self):
endpoints = "127.0.0.1:2379"
self.client = client(endpoint=endpoints)
@asynctest
async def test_member(self):
members = await self.client.member_list()
self.assertTrue(members)
m = members[0]
# urls = [u for u in m.clientURLs]
# urls = [u.rpartition("//")[2] for u in urls]
healthy, unhealthy = await self.client.member_healthy([m.clientURLs])
self.assertTrue(healthy)
self.assertFalse(unhealthy)
healthy, unhealthy = await self.client.member_healthy()
self.assertTrue(healthy)
self.assertFalse(unhealthy)
@asynctest
async def tearDown(self):
await self.client.close()

163
test/test_kv.py Normal file
View File

@ -0,0 +1,163 @@
import unittest
import asyncio
import functools
from aioetcd3.client import client
from aioetcd3.kv import KV
from aioetcd3.help import range_all, range_prefix, range_greater, range_greater_equal
from aioetcd3 import transaction
def asynctest(f):
@functools.wraps(f)
def _f(self):
return asyncio.get_event_loop().run_until_complete(f(self))
return _f
class KVTest(unittest.TestCase):
@asynctest
async def setUp(self):
endpoints = "127.0.0.1:2379"
self.client = client(endpoint=endpoints)
endpoints = "127.0.0.1:2379"
self.client.update_server_list(endpoint=endpoints)
await self.cleanUp()
async def cleanUp(self):
await self.client.delete(key_range=range_all())
@asynctest
async def tearDown(self):
await self.cleanUp()
await self.client.close()
@asynctest
async def test_put_get(self):
for i in range(0, 10):
key = '/test' + str(i)
value, meta = await self.client.put(key, str(i))
self.assertIsNone(value)
self.assertIsNone(meta)
value, meta = await self.client.put('/test9', "10", prev_kv=True)
self.assertEqual(value, b'9')
self.assertIsNotNone(meta)
value, meta = await self.client.put('/test9', "9", prev_kv=True, ignore_value=True)
self.assertEqual(value, b'10')
self.assertIsNotNone(meta)
value, meta = await self.client.put('/test9', "9", prev_kv=True)
self.assertEqual(value, b'10')
self.assertIsNotNone(meta)
count = await self.client.count(key_range=range_all())
self.assertEqual(count, 10)
value, meta = await self.client.get("/test9")
self.assertEqual(value, b'9')
self.assertIsNotNone(meta)
keys_list = await self.client.range_keys(key_range=range_all())
self.assertEqual(len(keys_list), 10)
value_list = await self.client.range(key_range=range_all())
self.assertEqual(len(value_list), 10)
value = [v[1].decode('utf-8') for v in value_list]
value.sort()
real_value = [str(i) for i in range(0, 10)]
self.assertEqual(value, real_value)
value_list = await self.client.range(key_range=range_all(), limit=5)
self.assertEqual(len(value_list), 5)
value_list = await self.client.range(key_range=range_prefix('/'))
self.assertEqual(len(value_list), 10)
value_list = await self.client.range(key_range=range_prefix('/'), limit=11)
self.assertEqual(len(value_list), 10)
value_list = await self.client.range(key_range=range_greater_equal('/test8'))
self.assertEqual(len(value_list), 2)
self.assertEqual(value_list[0][1], b'8')
self.assertEqual(value_list[1][1], b'9')
value_list = await self.client.range(key_range=range_greater('/testa'))
self.assertEqual(len(value_list), 0)
await self.client.delete(key_range='/test9')
value, meta = await self.client.get("/test9")
self.assertIsNone(value)
self.assertIsNone(meta)
value_list = await self.client.pop(key_range='/test8')
self.assertEqual(len(value_list), 1)
self.assertEqual(value_list[0][0], b'/test8')
self.assertEqual(value_list[0][1], b'8')
value_list = await self.client.delete(key_range=range_prefix('/'), prev_kv=True)
self.assertEqual(len(value_list), 8)
@asynctest
async def test_transaction(self):
await self.client.put('/trans1', 'trans1')
await self.client.put('/trans2', 'trans2')
is_success, response = await self.client.txn(compare=[
transaction.Value('/trans1') == b'trans1',
transaction.Value('/trans2') == b'trans2'
], success=[
KV.get.txn('/trans1'),
KV.range.txn('/trans2')
], fail=[
KV.delete.txn('/trans1')
])
self.assertEqual(is_success, True)
self.assertEqual(len(response), 2)
self.assertEqual(response[0][0], b'trans1')
self.assertEqual(response[1][0][:2], (b'/trans2', b'trans2'))
is_success, response = await self.client.txn(compare=[
transaction.Value('/trans1') == b'trans1',
transaction.Value('/trans2') == b'trans2'
], success=[
KV.delete.txn('/trans1'),
KV.put.txn('/trans2', 'trans2', prev_kv=True),
KV.put.txn('/trans3', 'trans3', prev_kv=True)
], fail=[
KV.delete.txn('/trans1')
])
self.assertEqual(is_success, True)
self.assertEqual(len(response), 3)
del_response = response[0]
self.assertEqual(del_response, 1)
put_response = response[1]
self.assertEqual(put_response[0], b'trans2')
put_response = response[2]
# there is not pre_kv None
self.assertIsNone(put_response[0])
is_success, response = await self.client.txn(compare=[
transaction.Value('/trans3') != b'trans3',
transaction.Version('/trans3') < 1000,
transaction.Mod('/trans3') > 100,
transaction.Create('/trans3') != 200
], success=[
], fail=[
KV.delete.txn('/trans3', prev_kv=True)
])
self.assertEqual(is_success, False)
self.assertEqual(len(response), 1)
self.assertEqual(len(response[0]), 1)
self.assertEqual(response[0][0][:2], (b'/trans3', b'trans3'))
if __name__ == '__main__':
unittest.main()

133
test/test_lease.py Normal file
View File

@ -0,0 +1,133 @@
import unittest
import asyncio
import functools
from aioetcd3.client import client
from aioetcd3.help import range_all, range_prefix, PER_RW
from .utils import switch_auth_on, switch_auth_off
def asynctest(f):
@functools.wraps(f)
def _f(self):
asyncio.get_event_loop().run_until_complete(f(self))
return _f
class LeaseTest(unittest.TestCase):
@asynctest
async def setUp(self):
self.endpoints = "127.0.0.1:2379"
self.client = client(endpoint=self.endpoints)
await self.cleanUp()
async def _lease_1(self):
lease = await self.client.grant_lease(ttl=5)
self.assertEqual(lease.ttl, 5)
await asyncio.sleep(1)
lease, keys = await self.client.get_lease_info(lease)
self.assertLessEqual(lease.ttl, 4)
self.assertEqual(len(keys), 0)
lease = await self.client.refresh_lease(lease)
self.assertEqual(lease.ttl, 5)
await self.client.revoke_lease(lease)
lease, keys = await self.client.get_lease_info(lease)
self.assertIsNone(lease)
self.assertEqual(len(keys), 0)
@asynctest
async def test_lease_1(self):
await self._lease_1()
async def _lease_2(self):
lease = await self.client.grant_lease(ttl=5)
self.assertEqual(lease.ttl, 5)
await asyncio.sleep(1)
lease, keys = await lease.info()
self.assertLessEqual(lease.ttl, 4)
self.assertEqual(len(keys), 0)
lease = await lease.refresh()
self.assertEqual(lease.ttl, 5)
await lease.revoke()
lease, keys = await lease.info()
self.assertIsNone(lease)
self.assertEqual(len(keys), 0)
lease = None
async with self.client.grant_lease_scope(ttl=5) as l:
lease = l
await asyncio.sleep(1)
lease, keys = await lease.info()
self.assertIsNone(lease)
self.assertEqual(len(keys), 0)
@asynctest
async def test_lease_2(self):
await self._lease_2()
async def _lease_3(self):
lease = await self.client.grant_lease(ttl=5)
self.assertEqual(lease.ttl, 5)
await self.client.put("/testlease", "testlease", lease=lease)
await asyncio.sleep(6)
lease, keys = await lease.info()
self.assertIsNone(lease, None)
self.assertEqual(len(keys), 0)
value, meta = await self.client.get('/testlease')
self.assertIsNone(value)
self.assertIsNone(meta)
@asynctest
async def test_lease_3(self):
await self._lease_3()
async def _run_test_with_auth(self, test):
default_client = self.client
await switch_auth_on(default_client)
root_client = client(endpoint=self.endpoints, username="root", password="root")
await root_client.role_grant_permission(name='client', key_range=range_prefix('/testlease'), permission=PER_RW)
self.client = client(endpoint=self.endpoints, username="client", password="client")
try:
await test()
finally:
await switch_auth_off(
root_client,
default_client
)
await root_client.close()
await self.client.close()
self.client = default_client
@asynctest
async def test_lease_1_with_auth(self):
await self._run_test_with_auth(self._lease_1)
@asynctest
async def test_lease_2_with_auth(self):
await self._run_test_with_auth(self._lease_2)
@asynctest
async def test_lease_3_with_auth(self):
await self._run_test_with_auth(self._lease_3)
@asynctest
async def tearDown(self):
await self.cleanUp()
await self.client.close()
async def cleanUp(self):
await self.client.delete(range_all())

410
test/test_watch.py Normal file
View File

@ -0,0 +1,410 @@
import unittest
import functools
import asyncio
from grpc import RpcError
from aioetcd3.client import client
from aioetcd3.help import range_all, range_prefix, PER_RW
from aioetcd3.watch import EVENT_TYPE_CREATE,EVENT_TYPE_DELETE,EVENT_TYPE_MODIFY,\
CompactRevisonException, WatchException
from .utils import switch_auth_off, switch_auth_on
def asynctest(f):
@functools.wraps(f)
def _f(self):
return asyncio.get_event_loop().run_until_complete(f(self))
return _f
class WatchTest(unittest.TestCase):
@asynctest
async def setUp(self):
self.endpoints = "127.0.0.1:2379"
self.client = client(endpoint=self.endpoints)
await self.cleanUp()
async def common_watch1(self):
f1 = asyncio.get_event_loop().create_future()
async def watch_1():
i = 0
async with self.client.watch_scope('/foo') as response:
f1.set_result(None)
async for event in response:
i = i + 1
if i == 1:
self.assertEqual(event.type, EVENT_TYPE_CREATE)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo')
elif i == 2:
self.assertEqual(event.type, EVENT_TYPE_MODIFY)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo1')
elif i == 3:
self.assertEqual(event.type, EVENT_TYPE_DELETE)
self.assertEqual(event.key, b'/foo')
# delete event has no value
# self.assertEqual(event.value, b'foo1')
break
f2 = asyncio.get_event_loop().create_future()
async def watch_2():
i = 0
async for event in self.client.watch('/foo', prev_kv=True, create_event=True):
if event is None:
f2.set_result(None)
continue
i = i + 1
if i == 1:
self.assertEqual(event.type, EVENT_TYPE_CREATE)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo')
elif i == 2:
self.assertEqual(event.type, EVENT_TYPE_MODIFY)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo1')
self.assertEqual(event.pre_value, b'foo')
elif i == 3:
self.assertEqual(event.type, EVENT_TYPE_DELETE)
self.assertEqual(event.key, b'/foo')
# self.assertEqual(event.value, b'foo1')
break
f3 = asyncio.get_event_loop().create_future()
async def watch_3():
i = 0
async for event in self.client.watch('/foo', prev_kv=True, noput=True, create_event=True):
if event is None:
f3.set_result(None)
continue
i = i + 1
if i == 1:
self.assertEqual(event.type, EVENT_TYPE_DELETE)
self.assertEqual(event.key, b'/foo')
# self.assertEqual(event.value, b'foo1')
break
f4 = asyncio.get_event_loop().create_future()
async def watch_4():
i = 0
async for event in self.client.watch('/foo', prev_kv=True, nodelete=True, create_event=True):
if event is None:
f4.set_result(None)
continue
i = i + 1
if i == 1:
self.assertEqual(event.type, EVENT_TYPE_CREATE)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo')
elif i == 2:
self.assertEqual(event.type, EVENT_TYPE_MODIFY)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo1')
self.assertEqual(event.pre_value, b'foo')
break
w1 = asyncio.ensure_future(watch_1())
w2 = asyncio.ensure_future(watch_2())
w3 = asyncio.ensure_future(watch_3())
w4 = asyncio.ensure_future(watch_4())
await asyncio.wait_for(asyncio.wait([f1, f2, f3, f4]), 2)
await self.client.put('/foo', 'foo')
await self.client.put('/foo', 'foo1')
await self.client.delete('/foo')
done, pending = await asyncio.wait([w1, w2, w3, w4], timeout=20)
for t in done:
t.result()
@asynctest
async def test_watch_1(self):
await self.common_watch1()
async def watch_reconnect(self):
f1 = asyncio.get_event_loop().create_future()
f2 = asyncio.get_event_loop().create_future()
async def watch_1():
i = 0
async with self.client.watch_scope('/foo') as response:
f1.set_result(None)
async for event in response:
i = i + 1
if i == 1:
self.assertEqual(event.type, EVENT_TYPE_CREATE)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo')
f2.set_result(None)
elif i == 2:
self.assertEqual(event.type, EVENT_TYPE_MODIFY)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo1')
elif i == 3:
self.assertEqual(event.type, EVENT_TYPE_DELETE)
self.assertEqual(event.key, b'/foo')
# delete event has no value
# self.assertEqual(event.value, b'foo1')
break
t1 = asyncio.ensure_future(watch_1())
await f1
await self.client.put('/foo', 'foo')
await f2
self.client.update_server_list(self.endpoints)
await self.client.put('/foo', 'foo1')
await self.client.delete('/foo')
await t1
@asynctest
async def test_watch_reconnect(self):
await self.watch_reconnect()
async def watch_create_cancel(self):
async def watch_1():
async with self.client.watch_scope('/foo') as _:
pass
async def watch_2():
async with self.client.watch_scope('/foo') as _:
await asyncio.sleep(5)
for _ in range(0, 5):
watches = [asyncio.ensure_future(watch_1() if i % 2 else watch_2()) for i in range(0, 200)]
await asyncio.sleep(1)
for w in watches[::3]:
w.cancel()
self.client.update_server_list(self.endpoints)
await asyncio.sleep(0.01)
for w in watches[1::3]:
w.cancel()
await asyncio.sleep(0.3)
for w in watches[2::3]:
w.cancel()
await asyncio.wait_for(asyncio.wait(watches), 3)
results = await asyncio.gather(*watches, return_exceptions=True)
print("Finished:", len([r for r in results if r is None]), "Cancelled:", len([r for r in results if r is not None]))
self.assertIsNotNone(self.client._watch_task_running)
await asyncio.sleep(3)
self.assertIsNone(self.client._watch_task_running)
@asynctest
async def test_watch_create_cancel(self):
await self.watch_create_cancel()
async def batch_events(self):
f1 = asyncio.get_event_loop().create_future()
f2 = asyncio.get_event_loop().create_future()
def _check_event(e, criterias):
if criterias[0]:
self.assertEqual(e.type, criterias[0])
if criterias[1]:
self.assertEqual(e.key, criterias[1])
if criterias[2]:
self.assertEqual(e.value, criterias[2])
async def watch_1():
asserts = [(EVENT_TYPE_CREATE, b'/foo/1', b'1'),
(EVENT_TYPE_CREATE, b'/foo/2', b'2'),
(EVENT_TYPE_MODIFY, b'/foo/1', b'2'),
(EVENT_TYPE_MODIFY, b'/foo/2', b'3'),
(EVENT_TYPE_DELETE, b'/foo/1', None),
(EVENT_TYPE_DELETE, b'/foo/2', None)]
async with self.client.watch_scope(range_prefix('/foo/')) as response:
f1.set_result(None)
async for e in response:
_check_event(e, asserts.pop(0))
if not asserts:
break
async def watch_2():
asserts = [((EVENT_TYPE_CREATE, b'/foo/1', b'1'),
(EVENT_TYPE_CREATE, b'/foo/2', b'2'),),
((EVENT_TYPE_MODIFY, b'/foo/1', b'2'),),
((EVENT_TYPE_MODIFY, b'/foo/2', b'3'),),
((EVENT_TYPE_DELETE, b'/foo/1', None),
(EVENT_TYPE_DELETE, b'/foo/2', None))]
async with self.client.watch_scope(range_prefix('/foo/'), batch_events=True) \
as response:
f2.set_result(None)
async for es in response:
batch = asserts.pop(0)
self.assertEqual(len(es), len(batch))
for e, a in zip(es, batch):
_check_event(e, a)
if not asserts:
break
t1 = asyncio.ensure_future(watch_1())
t2 = asyncio.ensure_future(watch_2())
await asyncio.wait_for(asyncio.wait([f1, f2]), 2)
self.assertTrue((await self.client.txn([], [self.client.put.txn('/foo/1', '1'),
self.client.put.txn('/foo/2', '2')], []))[0])
await self.client.put('/foo/1', '2')
await self.client.put('/foo/2', '3')
self.assertTrue((await self.client.txn([], [self.client.delete.txn('/foo/1'),
self.client.delete.txn('/foo/2')], []))[0])
await asyncio.gather(t1, t2)
@asynctest
async def test_batch_events(self):
await self.batch_events()
async def compact_revision(self):
await self.client.put('/foo', '1')
first_revision = self.client.last_response_info.revision
await self.client.put('/foo', '2')
await self.client.put('/foo', '3')
await self.client.put('/foo', '4')
await self.client.put('/foo', '5')
compact_revision = self.client.last_response_info.revision
await self.client.compact(compact_revision, True)
async def watch_1():
async with self.client.watch_scope('/foo', start_revision=first_revision) as response:
with self.assertRaises(CompactRevisonException) as cm:
async for e in response:
raise ValueError("Not raised")
self.assertEqual(cm.exception.revision, compact_revision)
async def watch_2():
async with self.client.watch_scope('/foo', ignore_compact=True, start_revision=first_revision) as responses:
async for e in responses:
self.assertEqual(e.type, EVENT_TYPE_MODIFY)
self.assertEqual(e.key, b'/foo')
self.assertEqual(e.value, b'5')
self.assertEqual(e.revision, compact_revision)
break
await watch_1()
await watch_2()
@asynctest
async def test_compact_revision(self):
await self.compact_revision()
async def watch_exception(self):
f1 = asyncio.get_event_loop().create_future()
f2 = asyncio.get_event_loop().create_future()
async def watch_1():
i = 0
async with self.client.watch_scope('/foo') as response:
f1.set_result(None)
with self.assertRaises(WatchException):
async for event in response:
i = i + 1
if i == 1:
self.assertEqual(event.type, EVENT_TYPE_CREATE)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo')
f2.set_result(None)
elif i == 2:
raise ValueError("Not raised")
f3 = asyncio.get_event_loop().create_future()
f4 = asyncio.get_event_loop().create_future()
async def watch_2():
i = 0
async with self.client.watch_scope('/foo', always_reconnect=True) as response:
f3.set_result(None)
async for event in response:
i = i + 1
if i == 1:
self.assertEqual(event.type, EVENT_TYPE_CREATE)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo')
f4.set_result(None)
elif i == 2:
self.assertEqual(event.type, EVENT_TYPE_MODIFY)
self.assertEqual(event.key, b'/foo')
self.assertEqual(event.value, b'foo1')
elif i == 3:
self.assertEqual(event.type, EVENT_TYPE_DELETE)
self.assertEqual(event.key, b'/foo')
# delete event has no value
# self.assertEqual(event.value, b'foo1')
break
t1 = asyncio.ensure_future(watch_1())
t2 = asyncio.ensure_future(watch_2())
await f1
await f3
await self.client.put('/foo', 'foo')
await f2
await f4
fake_endpoints = 'ipv4:///127.0.0.1:49999'
self.client.update_server_list(fake_endpoints)
await asyncio.sleep(2)
self.client.update_server_list(self.endpoints)
await self.client.put('/foo', 'foo1')
await self.client.delete('/foo')
await t1
await t2
@asynctest
async def test_watch_exception(self):
await self.watch_exception()
async def _run_test_with_auth(self, test):
default_client = self.client
await switch_auth_on(default_client)
root_client = client(endpoint=self.endpoints, username="root", password="root")
await root_client.role_grant_permission(name='client', key_range=range_prefix('/foo'), permission=PER_RW)
self.client = client(endpoint=self.endpoints, username="client", password="client")
try:
await test()
finally:
await switch_auth_off(
root_client,
default_client
)
await root_client.close()
await self.client.close()
self.client = default_client
@asynctest
async def test_watch1_with_auth(self):
await self._run_test_with_auth(self.common_watch1)
@asynctest
async def test_watch_reconnect_with_auth(self):
await self._run_test_with_auth(self.watch_reconnect)
@asynctest
async def test_watch_create_cancel_with_auth(self):
await self._run_test_with_auth(self.watch_create_cancel)
@asynctest
async def test_batch_events_with_auth(self):
await self._run_test_with_auth(self.batch_events)
@asynctest
async def test_compact_revision_with_auth(self):
await self._run_test_with_auth(self.compact_revision)
@asynctest
async def test_watch_exception_with_auth(self):
await self._run_test_with_auth(self.watch_exception)
@asynctest
async def tearDown(self):
await self.cleanUp()
await self.client.close()
async def cleanUp(self):
await self.client.delete(range_all())
if __name__ == '__main__':
unittest.main()

17
test/utils.py Normal file
View File

@ -0,0 +1,17 @@
async def switch_auth_on(client):
await client.user_add(username="root", password="root")
await client.role_add(name="root")
await client.user_grant_role(username="root", role="root")
await client.user_add(username="client", password="client")
await client.role_add(name="client")
await client.user_grant_role(username="client", role="client")
await client.auth_enable()
async def switch_auth_off(root_client, unautheticated_client):
await root_client.auth_disable()
await unautheticated_client.user_delete("client")
await unautheticated_client.user_delete("root")
await unautheticated_client.role_delete("client")
await unautheticated_client.role_delete("root")