mirror of
https://github.com/FlipsideCrypto/web3.py.git
synced 2026-02-06 02:46:45 +00:00
AppEngine compatible working version
This commit is contained in:
parent
e140dc4e08
commit
4b0446cde8
12
README.md
12
README.md
@ -8,6 +8,18 @@ Included packages:
|
||||
|
||||
* [pylru](https://github.com/mozilla/positron/blob/master/python/pylru/pylru.py)
|
||||
* [ethereum-utils](https://github.com/pipermerriam/ethereum-utils)
|
||||
* [ethereum-abi-utils](https://github.com/pipermerriam/ethereum-abi-utils/tree/master/eth_abi)
|
||||
* [CompactFIPS202](https://github.com/gvanas/KeccakCodePackage/blob/master/Standalone/CompactFIPS202-Python/CompactFIPS202.py)
|
||||
* [pyrlp](https://github.com/ethereum/pyrlp)
|
||||
|
||||
*Note* : pysha3 was replaced by CompactFIPS202 because pysha3 uses C components which are not supported on GAE. Test before doing production code.
|
||||
|
||||
|
||||
#### Tested
|
||||
|
||||
* Personal API
|
||||
* Eth API
|
||||
|
||||
|
||||
====================================
|
||||
|
||||
|
||||
@ -1,22 +1,22 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import pkg_resources
|
||||
#import pkg_resources
|
||||
|
||||
from web3.main import Web3
|
||||
from web3.providers.rpc import (
|
||||
from .main import Web3
|
||||
from .providers.rpc import (
|
||||
HTTPProvider,
|
||||
RPCProvider,
|
||||
KeepAliveRPCProvider,
|
||||
)
|
||||
from web3.providers.tester import (
|
||||
from .providers.tester import (
|
||||
TestRPCProvider,
|
||||
EthereumTesterProvider,
|
||||
)
|
||||
from web3.providers.ipc import (
|
||||
from .providers.ipc import (
|
||||
IPCProvider,
|
||||
)
|
||||
|
||||
__version__ = pkg_resources.get_distribution("web3").version
|
||||
__version__ = "2.4.0" #pkg_resources.get_distribution("web3").version
|
||||
|
||||
__all__ = [
|
||||
"__version__",
|
||||
|
||||
@ -5,7 +5,7 @@ import functools
|
||||
import warnings
|
||||
import itertools
|
||||
|
||||
from eth_utils import (
|
||||
from .eth_utils import (
|
||||
is_address,
|
||||
function_abi_to_4byte_selector,
|
||||
encode_hex,
|
||||
@ -18,20 +18,20 @@ from eth_utils import (
|
||||
is_list_like,
|
||||
)
|
||||
|
||||
from eth_abi import (
|
||||
from .eth_abi import (
|
||||
encode_abi,
|
||||
decode_abi,
|
||||
)
|
||||
from eth_abi.exceptions import (
|
||||
from .eth_abi.exceptions import (
|
||||
EncodingError,
|
||||
DecodingError,
|
||||
)
|
||||
|
||||
from web3.exceptions import (
|
||||
from .exceptions import (
|
||||
BadFunctionCallOutput,
|
||||
)
|
||||
|
||||
from web3.utils.abi import (
|
||||
from .utils.abi import (
|
||||
filter_by_type,
|
||||
filter_by_name,
|
||||
filter_by_argument_count,
|
||||
@ -44,19 +44,19 @@ from web3.utils.abi import (
|
||||
normalize_return_type,
|
||||
check_if_arguments_can_be_encoded,
|
||||
)
|
||||
from web3.utils.decorators import (
|
||||
from .utils.decorators import (
|
||||
combomethod,
|
||||
)
|
||||
from web3.utils.empty import (
|
||||
from .utils.empty import (
|
||||
empty,
|
||||
)
|
||||
from web3.utils.events import (
|
||||
from .utils.events import (
|
||||
get_event_data,
|
||||
)
|
||||
from web3.utils.exception import (
|
||||
from .utils.exception import (
|
||||
raise_from,
|
||||
)
|
||||
from web3.utils.filters import (
|
||||
from .utils.filters import (
|
||||
construct_event_filter_params,
|
||||
PastLogFilter,
|
||||
)
|
||||
|
||||
18
web3/eth.py
18
web3/eth.py
@ -6,31 +6,31 @@ from eth_utils import (
|
||||
coerce_return_to_text,
|
||||
)
|
||||
|
||||
from web3 import formatters
|
||||
from web3.iban import Iban
|
||||
import formatters
|
||||
from .iban import Iban
|
||||
|
||||
from web3.contract import (
|
||||
from .contract import (
|
||||
Contract,
|
||||
)
|
||||
|
||||
from web3.utils.blocks import (
|
||||
from .utils.blocks import (
|
||||
is_predefined_block_number,
|
||||
)
|
||||
from web3.utils.empty import (
|
||||
from .utils.empty import (
|
||||
empty,
|
||||
)
|
||||
from web3.utils.encoding import (
|
||||
from .utils.encoding import (
|
||||
to_decimal,
|
||||
)
|
||||
from web3.utils.filters import (
|
||||
from .utils.filters import (
|
||||
BlockFilter,
|
||||
TransactionFilter,
|
||||
LogFilter,
|
||||
)
|
||||
from web3.utils.functional import (
|
||||
from .utils.functional import (
|
||||
apply_formatters_to_return,
|
||||
)
|
||||
from web3.utils.transactions import (
|
||||
from .utils.transactions import (
|
||||
get_buffered_gas_estimate,
|
||||
)
|
||||
|
||||
|
||||
11
web3/eth_abi/__init__.py
Executable file
11
web3/eth_abi/__init__.py
Executable file
@ -0,0 +1,11 @@
|
||||
#import pkg_resources
|
||||
|
||||
from .abi import ( # NOQA
|
||||
decode_single,
|
||||
decode_abi,
|
||||
encode_single,
|
||||
encode_abi,
|
||||
)
|
||||
|
||||
|
||||
__version__ = "0.4.0" #pkg_resources.get_distribution('ethereum-abi-utils').version
|
||||
108
web3/eth_abi/abi.py
Executable file
108
web3/eth_abi/abi.py
Executable file
@ -0,0 +1,108 @@
|
||||
"""
|
||||
Vendored from `pyethereum.abi`
|
||||
"""
|
||||
import warnings
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
from ..eth_utils import (
|
||||
is_text,
|
||||
force_bytes,
|
||||
remove_0x_prefix,
|
||||
decode_hex,
|
||||
)
|
||||
|
||||
from decoding import (
|
||||
get_single_decoder,
|
||||
get_multi_decoder,
|
||||
)
|
||||
from encoding import (
|
||||
get_single_encoder,
|
||||
get_multi_encoder,
|
||||
)
|
||||
|
||||
from utils.parsing import (
|
||||
process_type,
|
||||
)
|
||||
|
||||
|
||||
def encode_single(typ, arg):
|
||||
try:
|
||||
base, sub, arrlist = typ
|
||||
except ValueError:
|
||||
base, sub, arrlist = process_type(typ)
|
||||
|
||||
if is_text(arg):
|
||||
arg = force_bytes(arg)
|
||||
|
||||
encoder = get_single_encoder(base, sub, arrlist)
|
||||
return encoder(arg)
|
||||
|
||||
|
||||
def encode_abi(types, args):
|
||||
processed_types = [process_type(typ) for typ in types]
|
||||
encoder = get_multi_encoder(processed_types)
|
||||
return encoder(args)
|
||||
|
||||
|
||||
HEX_CHARS = b'1234567890abcdef'
|
||||
|
||||
|
||||
def is_hex_encoded_value(v):
|
||||
if not remove_0x_prefix(force_bytes(v)).lower().strip(HEX_CHARS) == b'':
|
||||
return False
|
||||
if len(remove_0x_prefix(v)) % 64 and len(remove_0x_prefix(v)) % 40:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Decodes a single base datum
|
||||
def decode_single(typ, data):
|
||||
if is_hex_encoded_value(data):
|
||||
warnings.warn(DeprecationWarning(
|
||||
"Automatic inference of hex encoded data has been deprecated. "
|
||||
"Please adjust your code to ensure that the data argument for "
|
||||
"`decode_single` is a byte string"
|
||||
))
|
||||
data = decode_hex(remove_0x_prefix(data))
|
||||
|
||||
if is_text(data):
|
||||
warnings.warn(DeprecationWarning(
|
||||
"Automatic conversion of encoded data to bytes has been deprecated. "
|
||||
"Please adjust your code to ensure that the data argument for "
|
||||
"`decode_single` is a byte string"
|
||||
))
|
||||
data = force_bytes(data)
|
||||
|
||||
try:
|
||||
base, sub, arrlist = typ
|
||||
except ValueError:
|
||||
base, sub, arrlist = process_type(typ)
|
||||
|
||||
decoder = get_single_decoder(base, sub, arrlist)
|
||||
stream = BytesIO(data)
|
||||
return decoder(stream)
|
||||
|
||||
|
||||
# Decodes multiple arguments using the head/tail mechanism
|
||||
def decode_abi(types, data):
|
||||
if is_hex_encoded_value(data):
|
||||
warnings.warn(DeprecationWarning(
|
||||
"Automatic inference of hex encoded data has been deprecated. "
|
||||
"Please adjust your code to ensure that the data argument for "
|
||||
"`decode_single` is a byte string"
|
||||
))
|
||||
data = decode_hex(remove_0x_prefix(data))
|
||||
|
||||
if is_text(data):
|
||||
warnings.warn(DeprecationWarning(
|
||||
"Automatic conversion of encoded data to bytes has been deprecated. "
|
||||
"Please adjust your code to ensure that the data argument for "
|
||||
"`decode_abi` is a byte string"
|
||||
))
|
||||
data = force_bytes(data)
|
||||
|
||||
processed_types = tuple(process_type(_type) for _type in types)
|
||||
decoder = get_multi_decoder(processed_types)
|
||||
stream = BytesIO(data)
|
||||
return decoder(stream)
|
||||
3
web3/eth_abi/constants.py
Executable file
3
web3/eth_abi/constants.py
Executable file
@ -0,0 +1,3 @@
|
||||
TT256 = 2 ** 256
|
||||
TT256M1 = 2 ** 256 - 1
|
||||
TT255 = 2 ** 255
|
||||
461
web3/eth_abi/decoding.py
Executable file
461
web3/eth_abi/decoding.py
Executable file
@ -0,0 +1,461 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import decimal
|
||||
|
||||
from ..eth_utils import (
|
||||
force_text,
|
||||
to_tuple,
|
||||
to_normalized_address,
|
||||
)
|
||||
|
||||
from ..eth_abi.exceptions import (
|
||||
InsufficientDataBytes,
|
||||
NonEmptyPaddingBytes,
|
||||
)
|
||||
from ..eth_abi.utils.numeric import (
|
||||
big_endian_to_int,
|
||||
quantize_value,
|
||||
ceil32,
|
||||
)
|
||||
|
||||
|
||||
decimal.DefaultContext.prec = 999
|
||||
|
||||
|
||||
def get_multi_decoder(processed_types):
|
||||
"""
|
||||
"""
|
||||
decoders = tuple(
|
||||
get_single_decoder(base, sub, arrlist) for base, sub, arrlist in processed_types
|
||||
)
|
||||
return MultiDecoder.as_decoder(decoders=decoders)
|
||||
|
||||
|
||||
def get_single_decoder(base, sub, arrlist):
|
||||
if arrlist:
|
||||
item_decoder = get_single_decoder(base, sub, arrlist[:-1])
|
||||
if arrlist[-1]:
|
||||
return SizedArrayDecoder.as_decoder(
|
||||
array_size=arrlist[-1][0],
|
||||
item_decoder=item_decoder,
|
||||
)
|
||||
else:
|
||||
return DynamicArrayDecoder.as_decoder(item_decoder=item_decoder)
|
||||
elif base == 'address':
|
||||
return decode_address
|
||||
elif base == 'bool':
|
||||
return decode_bool
|
||||
elif base == 'bytes':
|
||||
if sub:
|
||||
return BytesDecoder.as_decoder(value_bit_size=int(sub) * 8)
|
||||
else:
|
||||
return decode_bytes
|
||||
elif base == 'int':
|
||||
return SignedIntegerDecoder.as_decoder(value_bit_size=int(sub))
|
||||
elif base == 'string':
|
||||
return decode_string
|
||||
elif base == 'uint':
|
||||
return UnsignedIntegerDecoder.as_decoder(value_bit_size=int(sub))
|
||||
elif base == 'ureal':
|
||||
high_bit_size, low_bit_size = [int(v) for v in sub.split('x')]
|
||||
return UnsignedRealDecoder.as_decoder(
|
||||
value_bit_size=high_bit_size + low_bit_size,
|
||||
high_bit_size=high_bit_size,
|
||||
low_bit_size=low_bit_size,
|
||||
)
|
||||
elif base == 'real':
|
||||
high_bit_size, low_bit_size = [int(v) for v in sub.split('x')]
|
||||
return SignedRealDecoder.as_decoder(
|
||||
value_bit_size=high_bit_size + low_bit_size,
|
||||
high_bit_size=high_bit_size,
|
||||
low_bit_size=low_bit_size,
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unsupported type: {0} - must be one of "
|
||||
"address/bool/bytesXX/bytes/string/uintXXX/intXXX"
|
||||
)
|
||||
|
||||
|
||||
class BaseDecoder(object):
|
||||
@classmethod
|
||||
def as_decoder(cls, name=None, **kwargs):
|
||||
for key in kwargs:
|
||||
if not hasattr(cls, key):
|
||||
raise AttributeError(
|
||||
"Property {0} not found on Decoder class. "
|
||||
"`Decoder.factory` only accepts keyword arguments which are "
|
||||
"present on the Decoder class".format(key)
|
||||
)
|
||||
if name is None:
|
||||
name = cls.__name__
|
||||
sub_cls = type(name, (cls,), kwargs)
|
||||
sub_cls.validate()
|
||||
instance = sub_cls()
|
||||
return instance
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
pass
|
||||
|
||||
def __call__(self, stream):
|
||||
return self.decode(stream)
|
||||
|
||||
|
||||
class HeadTailDecoder(BaseDecoder):
|
||||
tail_decoder = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(HeadTailDecoder, cls).validate()
|
||||
if cls.tail_decoder is None:
|
||||
raise ValueError("No `tail_decoder` set")
|
||||
|
||||
@classmethod
|
||||
def decode(cls, stream):
|
||||
start_pos = decode_uint_256(stream)
|
||||
anchor_pos = stream.tell()
|
||||
stream.seek(start_pos)
|
||||
value = cls.tail_decoder(stream)
|
||||
stream.seek(anchor_pos)
|
||||
return value
|
||||
|
||||
|
||||
class MultiDecoder(BaseDecoder):
|
||||
decoders = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(MultiDecoder, cls).validate()
|
||||
if cls.decoders is None:
|
||||
raise ValueError("No `decoders` set")
|
||||
|
||||
@classmethod
|
||||
@to_tuple
|
||||
def decode(cls, stream):
|
||||
for decoder in cls.decoders:
|
||||
if isinstance(decoder, (DynamicArrayDecoder, StringDecoder)):
|
||||
yield HeadTailDecoder.as_decoder(tail_decoder=decoder)(stream)
|
||||
else:
|
||||
yield decoder(stream)
|
||||
|
||||
|
||||
class SingleDecoder(BaseDecoder):
|
||||
decoder_fn = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(SingleDecoder, cls).validate()
|
||||
if cls.decoder_fn is None:
|
||||
raise ValueError("No `decoder_fn` set")
|
||||
|
||||
@classmethod
|
||||
def validate_padding_bytes(cls, value, padding_bytes):
|
||||
raise NotImplementedError("Must be implemented by subclasses")
|
||||
value_byte_size = cls._get_value_byte_size()
|
||||
padding_size = cls.data_byte_size - value_byte_size
|
||||
|
||||
if padding_bytes != b'\x00' * padding_size:
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Padding bytes were not empty: {0}".format(force_text(padding_bytes))
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def decode(cls, stream):
|
||||
raw_data = cls.read_data_from_stream(stream)
|
||||
data, padding_bytes = cls.split_data_and_padding(raw_data)
|
||||
value = cls.decoder_fn(data)
|
||||
cls.validate_padding_bytes(value, padding_bytes)
|
||||
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def read_data_from_stream(cls, stream):
|
||||
raise NotImplementedError("Must be implemented by subclasses")
|
||||
|
||||
@classmethod
|
||||
def split_data_and_padding(cls, raw_data):
|
||||
return raw_data, b''
|
||||
|
||||
|
||||
class BaseArrayDecoder(BaseDecoder):
|
||||
item_decoder = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(BaseArrayDecoder, cls).validate()
|
||||
if cls.item_decoder is None:
|
||||
raise ValueError("No `item_decoder` set")
|
||||
|
||||
|
||||
class SizedArrayDecoder(BaseArrayDecoder):
|
||||
array_size = None
|
||||
|
||||
@classmethod
|
||||
@to_tuple
|
||||
def decode(cls, stream):
|
||||
for _ in range(cls.array_size):
|
||||
yield cls.item_decoder(stream)
|
||||
|
||||
|
||||
class DynamicArrayDecoder(BaseArrayDecoder):
|
||||
@classmethod
|
||||
@to_tuple
|
||||
def decode(cls, stream):
|
||||
array_size = decode_uint_256(stream)
|
||||
for _ in range(array_size):
|
||||
yield cls.item_decoder(stream)
|
||||
|
||||
|
||||
class FixedByteSizeDecoder(SingleDecoder):
|
||||
decoder_fn = None
|
||||
value_bit_size = None
|
||||
data_byte_size = None
|
||||
is_big_endian = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(FixedByteSizeDecoder, cls).validate()
|
||||
|
||||
if cls.value_bit_size is None:
|
||||
raise ValueError("`value_bit_size` may not be None")
|
||||
if cls.data_byte_size is None:
|
||||
raise ValueError("`data_byte_size` may not be None")
|
||||
if cls.decoder_fn is None:
|
||||
raise ValueError("`decoder_fn` may not be None")
|
||||
if cls.is_big_endian is None:
|
||||
raise ValueError("`is_big_endian` may not be None")
|
||||
|
||||
if cls.value_bit_size % 8 != 0:
|
||||
raise ValueError(
|
||||
"Invalid value bit size: {0}. Must be a multiple of 8".format(
|
||||
cls.value_bit_size,
|
||||
)
|
||||
)
|
||||
|
||||
if cls.value_bit_size > cls.data_byte_size * 8:
|
||||
raise ValueError("Value byte size exceeds data size")
|
||||
|
||||
@classmethod
|
||||
def read_data_from_stream(cls, stream):
|
||||
data = stream.read(cls.data_byte_size)
|
||||
|
||||
if len(data) != cls.data_byte_size:
|
||||
raise InsufficientDataBytes(
|
||||
"Tried to read {0} bytes. Only got {1} bytes".format(
|
||||
cls.data_byte_size,
|
||||
len(data),
|
||||
)
|
||||
)
|
||||
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def split_data_and_padding(cls, raw_data):
|
||||
value_byte_size = cls._get_value_byte_size()
|
||||
padding_size = cls.data_byte_size - value_byte_size
|
||||
|
||||
if cls.is_big_endian:
|
||||
padding_bytes = raw_data[:padding_size]
|
||||
data = raw_data[padding_size:]
|
||||
else:
|
||||
data = raw_data[:value_byte_size]
|
||||
padding_bytes = raw_data[value_byte_size:]
|
||||
|
||||
return data, padding_bytes
|
||||
|
||||
@classmethod
|
||||
def validate_padding_bytes(cls, value, padding_bytes):
|
||||
value_byte_size = cls._get_value_byte_size()
|
||||
padding_size = cls.data_byte_size - value_byte_size
|
||||
|
||||
if padding_bytes != b'\x00' * padding_size:
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Padding bytes were not empty: {0}".format(force_text(padding_bytes))
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _get_value_byte_size(cls):
|
||||
value_byte_size = cls.value_bit_size // 8
|
||||
return value_byte_size
|
||||
|
||||
|
||||
class Fixed32ByteSizeDecoder(FixedByteSizeDecoder):
|
||||
data_byte_size = 32
|
||||
|
||||
|
||||
class BooleanDecoder(Fixed32ByteSizeDecoder):
|
||||
value_bit_size = 8
|
||||
is_big_endian = True
|
||||
|
||||
@classmethod
|
||||
def decoder_fn(cls, data):
|
||||
if data == b'\x00':
|
||||
return False
|
||||
elif data == b'\x01':
|
||||
return True
|
||||
else:
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Boolean must be either 0x0 or 0x1. Got: {0}".format(force_text(data))
|
||||
)
|
||||
|
||||
|
||||
decode_bool = BooleanDecoder.as_decoder()
|
||||
|
||||
|
||||
class AddressDecoder(Fixed32ByteSizeDecoder):
|
||||
value_bit_size = 20 * 8
|
||||
is_big_endian = True
|
||||
decoder_fn = staticmethod(to_normalized_address)
|
||||
|
||||
|
||||
decode_address = AddressDecoder.as_decoder()
|
||||
|
||||
|
||||
#
|
||||
# Unsigned Integer Decoders
|
||||
#
|
||||
class UnsignedIntegerDecoder(Fixed32ByteSizeDecoder):
|
||||
decoder_fn = staticmethod(big_endian_to_int)
|
||||
is_big_endian = True
|
||||
|
||||
|
||||
decode_uint_256 = UnsignedIntegerDecoder.as_decoder(value_bit_size=256)
|
||||
|
||||
|
||||
#
|
||||
# Signed Integer Decoders
|
||||
#
|
||||
class SignedIntegerDecoder(Fixed32ByteSizeDecoder):
|
||||
is_big_endian = True
|
||||
|
||||
@classmethod
|
||||
def decoder_fn(cls, data):
|
||||
value = big_endian_to_int(data)
|
||||
if value >= 2 ** (cls.value_bit_size - 1):
|
||||
return value - 2 ** cls.value_bit_size
|
||||
else:
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def validate_padding_bytes(cls, value, padding_bytes):
|
||||
value_byte_size = cls._get_value_byte_size()
|
||||
padding_size = cls.data_byte_size - value_byte_size
|
||||
|
||||
if value >= 0:
|
||||
expected_padding_bytes = b'\x00' * padding_size
|
||||
else:
|
||||
expected_padding_bytes = b'\xff' * padding_size
|
||||
|
||||
if padding_bytes != expected_padding_bytes:
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Padding bytes were not empty: {0}".format(force_text(padding_bytes))
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Bytes1..32
|
||||
#
|
||||
class BytesDecoder(Fixed32ByteSizeDecoder):
|
||||
is_big_endian = False
|
||||
|
||||
@classmethod
|
||||
def decoder_fn(cls, data):
|
||||
return data
|
||||
|
||||
|
||||
class BaseRealDecoder(Fixed32ByteSizeDecoder):
|
||||
high_bit_size = None
|
||||
low_bit_size = None
|
||||
data_byte_size = None
|
||||
is_big_endian = True
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(BaseRealDecoder, cls).validate()
|
||||
|
||||
if cls.high_bit_size is None:
|
||||
raise ValueError("`high_bit_size` cannot be null")
|
||||
if cls.low_bit_size is None:
|
||||
raise ValueError("`low_bit_size` cannot be null")
|
||||
if cls.low_bit_size + cls.high_bit_size != cls.value_bit_size:
|
||||
raise ValueError("high and low bitsizes must sum to the value_bit_size")
|
||||
|
||||
|
||||
class UnsignedRealDecoder(BaseRealDecoder):
|
||||
@classmethod
|
||||
def decoder_fn(cls, data):
|
||||
value = big_endian_to_int(data)
|
||||
decimal_value = decimal.Decimal(value)
|
||||
raw_real_value = decimal_value / 2 ** cls.low_bit_size
|
||||
real_value = quantize_value(raw_real_value, cls.low_bit_size)
|
||||
return real_value
|
||||
|
||||
|
||||
class SignedRealDecoder(BaseRealDecoder):
|
||||
@classmethod
|
||||
def decoder_fn(cls, data):
|
||||
value = big_endian_to_int(data)
|
||||
if value >= 2 ** (cls.high_bit_size + cls.low_bit_size - 1):
|
||||
signed_value = value - 2 ** (cls.high_bit_size + cls.low_bit_size)
|
||||
else:
|
||||
signed_value = value
|
||||
signed_decimal_value = decimal.Decimal(signed_value)
|
||||
raw_real_value = signed_decimal_value / 2 ** cls.low_bit_size
|
||||
real_value = quantize_value(raw_real_value, cls.low_bit_size)
|
||||
return real_value
|
||||
|
||||
@classmethod
|
||||
def validate_padding_bytes(cls, value, padding_bytes):
|
||||
value_byte_size = cls._get_value_byte_size()
|
||||
padding_size = cls.data_byte_size - value_byte_size
|
||||
|
||||
if value >= 0:
|
||||
expected_padding_bytes = b'\x00' * padding_size
|
||||
else:
|
||||
expected_padding_bytes = b'\xff' * padding_size
|
||||
|
||||
if padding_bytes != expected_padding_bytes:
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Padding bytes were not empty: {0}".format(force_text(padding_bytes))
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# String and Bytes
|
||||
#
|
||||
class StringDecoder(SingleDecoder):
|
||||
@classmethod
|
||||
def decoder_fn(cls, data):
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def read_data_from_stream(cls, stream):
|
||||
data_length = decode_uint_256(stream)
|
||||
padded_length = ceil32(data_length)
|
||||
|
||||
data = stream.read(padded_length)
|
||||
|
||||
if len(data) < padded_length:
|
||||
raise InsufficientDataBytes(
|
||||
"Tried to read {0} bytes. Only got {1} bytes".format(
|
||||
padded_length,
|
||||
len(data),
|
||||
)
|
||||
)
|
||||
|
||||
padding_bytes = data[data_length:]
|
||||
|
||||
if padding_bytes != b'\x00' * (padded_length - data_length):
|
||||
raise NonEmptyPaddingBytes(
|
||||
"Padding bytes were not empty: {0}".format(force_text(padding_bytes))
|
||||
)
|
||||
|
||||
return data[:data_length]
|
||||
|
||||
@classmethod
|
||||
def validate_padding_bytes(cls, value, padding_bytes):
|
||||
pass
|
||||
|
||||
|
||||
decode_string = decode_bytes = StringDecoder.as_decoder()
|
||||
481
web3/eth_abi/encoding.py
Executable file
481
web3/eth_abi/encoding.py
Executable file
@ -0,0 +1,481 @@
|
||||
import itertools
|
||||
|
||||
from ..eth_utils import (
|
||||
is_boolean,
|
||||
is_integer,
|
||||
is_number,
|
||||
is_address,
|
||||
is_bytes,
|
||||
is_list_like,
|
||||
is_null,
|
||||
to_canonical_address,
|
||||
)
|
||||
|
||||
from ..eth_abi.exceptions import (
|
||||
EncodingTypeError,
|
||||
ValueOutOfBounds,
|
||||
)
|
||||
|
||||
from ..eth_abi.utils.numeric import (
|
||||
int_to_big_endian,
|
||||
compute_signed_integer_bounds,
|
||||
compute_unsigned_integer_bounds,
|
||||
compute_signed_real_bounds,
|
||||
compute_unsigned_real_bounds,
|
||||
ceil32,
|
||||
)
|
||||
from ..eth_abi.utils.padding import (
|
||||
fpad,
|
||||
zpad,
|
||||
zpad_right,
|
||||
)
|
||||
|
||||
|
||||
def get_multi_encoder(processed_types):
|
||||
"""
|
||||
"""
|
||||
encoders = tuple(
|
||||
get_single_encoder(base, sub, arrlist) for base, sub, arrlist in processed_types
|
||||
)
|
||||
return MultiEncoder.as_encoder(encoders=encoders)
|
||||
|
||||
|
||||
def get_single_encoder(base, sub, arrlist):
|
||||
if arrlist:
|
||||
item_encoder = get_single_encoder(base, sub, arrlist[:-1])
|
||||
if arrlist[-1]:
|
||||
return SizedArrayEncoder.as_encoder(
|
||||
array_size=arrlist[-1][0],
|
||||
item_encoder=item_encoder,
|
||||
)
|
||||
else:
|
||||
return DynamicArrayEncoder.as_encoder(item_encoder=item_encoder)
|
||||
elif base == 'address':
|
||||
return encode_address
|
||||
elif base == 'bool':
|
||||
return encode_bool
|
||||
elif base == 'bytes':
|
||||
if sub:
|
||||
return BytesEncoder.as_encoder(value_bit_size=int(sub) * 8)
|
||||
else:
|
||||
return encode_bytes
|
||||
elif base == 'int':
|
||||
return SignedIntegerEncoder.as_encoder(value_bit_size=int(sub))
|
||||
elif base == 'string':
|
||||
return encode_string
|
||||
elif base == 'uint':
|
||||
return UnsignedIntegerEncoder.as_encoder(value_bit_size=int(sub))
|
||||
elif base == 'ureal':
|
||||
high_bit_size, low_bit_size = [int(v) for v in sub.split('x')]
|
||||
return UnsignedRealEncoder.as_encoder(
|
||||
value_bit_size=high_bit_size + low_bit_size,
|
||||
high_bit_size=high_bit_size,
|
||||
low_bit_size=low_bit_size,
|
||||
)
|
||||
elif base == 'real':
|
||||
high_bit_size, low_bit_size = [int(v) for v in sub.split('x')]
|
||||
return SignedRealEncoder.as_encoder(
|
||||
value_bit_size=high_bit_size + low_bit_size,
|
||||
high_bit_size=high_bit_size,
|
||||
low_bit_size=low_bit_size,
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unsupported type: {0} - must be one of "
|
||||
"address/bool/bytesXX/bytes/string/uintXXX/intXXX"
|
||||
)
|
||||
|
||||
|
||||
class BaseEncoder(object):
|
||||
@classmethod
|
||||
def as_encoder(cls, name=None, **kwargs):
|
||||
for key in kwargs:
|
||||
if not hasattr(cls, key):
|
||||
raise AttributeError(
|
||||
"Property {0} not found on Decoder class. "
|
||||
"`Decoder.factory` only accepts keyword arguments which are "
|
||||
"present on the Decoder class".format(key)
|
||||
)
|
||||
if name is None:
|
||||
name = cls.__name__
|
||||
sub_cls = type(name, (cls,), kwargs)
|
||||
sub_cls.validate()
|
||||
instance = sub_cls()
|
||||
return instance
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
pass
|
||||
|
||||
def __call__(self, value):
|
||||
return self.encode(value)
|
||||
|
||||
|
||||
class MultiEncoder(BaseEncoder):
|
||||
encoders = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(MultiEncoder, cls).validate()
|
||||
if cls.encoders is None:
|
||||
raise ValueError("`encoders` may not be none")
|
||||
|
||||
@classmethod
|
||||
def encode(cls, values):
|
||||
if len(values) != len(cls.encoders):
|
||||
raise ValueOutOfBounds(
|
||||
"Recieved {0} values to encode. Expected {1}".format(
|
||||
len(values),
|
||||
len(cls.encoders),
|
||||
)
|
||||
)
|
||||
raw_head_chunks = []
|
||||
tail_chunks = []
|
||||
|
||||
for value, encoder in zip(values, cls.encoders):
|
||||
if isinstance(encoder, (DynamicArrayEncoder, StringEncoder)):
|
||||
raw_head_chunks.append(None)
|
||||
tail_chunks.append(encoder(value))
|
||||
else:
|
||||
raw_head_chunks.append(encoder(value))
|
||||
tail_chunks.append(b'')
|
||||
|
||||
head_length = sum((
|
||||
32 if is_null(item) else len(item)
|
||||
for item in raw_head_chunks
|
||||
))
|
||||
tail_offsets = tuple((
|
||||
sum((len(chunk) for chunk in tail_chunks[:i]))
|
||||
for i in range(len(tail_chunks))
|
||||
))
|
||||
head_chunks = tuple((
|
||||
(
|
||||
encode_uint_256(head_length + tail_offsets[idx])
|
||||
if is_null(head_chunk)
|
||||
else head_chunk
|
||||
) for idx, head_chunk
|
||||
in enumerate(raw_head_chunks)
|
||||
))
|
||||
encoded_value = b''.join(tuple(itertools.chain(head_chunks, tail_chunks)))
|
||||
return encoded_value
|
||||
|
||||
|
||||
class FixedSizeEncoder(BaseEncoder):
|
||||
value_bit_size = None
|
||||
data_byte_size = None
|
||||
encode_fn = None
|
||||
type_check_fn = None
|
||||
is_big_endian = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(FixedSizeEncoder, cls).validate()
|
||||
if cls.value_bit_size is None:
|
||||
raise ValueError("`value_bit_size` may not be none")
|
||||
if cls.data_byte_size is None:
|
||||
raise ValueError("`data_byte_size` may not be none")
|
||||
if cls.encode_fn is None:
|
||||
raise ValueError("`encode_fn` may not be none")
|
||||
if cls.is_big_endian is None:
|
||||
raise ValueError("`is_big_endian` may not be none")
|
||||
|
||||
if cls.value_bit_size % 8 != 0:
|
||||
raise ValueError(
|
||||
"Invalid value bit size: {0}. Must be a multiple of 8".format(
|
||||
cls.value_bit_size,
|
||||
)
|
||||
)
|
||||
|
||||
if cls.value_bit_size > cls.data_byte_size * 8:
|
||||
raise ValueError("Value byte size exceeds data size")
|
||||
|
||||
@classmethod
|
||||
def validate_value(cls, value):
|
||||
raise NotImplementedError("Must be implemented by subclasses")
|
||||
|
||||
@classmethod
|
||||
def encode(cls, value):
|
||||
cls.validate_value(value)
|
||||
base_encoded_value = cls.encode_fn(value)
|
||||
|
||||
if cls.is_big_endian:
|
||||
padded_encoded_value = zpad(base_encoded_value, cls.data_byte_size)
|
||||
else:
|
||||
padded_encoded_value = zpad_right(base_encoded_value, cls.data_byte_size)
|
||||
return padded_encoded_value
|
||||
|
||||
|
||||
class Fixed32ByteSizeEncoder(FixedSizeEncoder):
|
||||
data_byte_size = 32
|
||||
|
||||
|
||||
class BooleanEncoder(Fixed32ByteSizeEncoder):
|
||||
value_bit_size = 8
|
||||
is_big_endian = True
|
||||
|
||||
@classmethod
|
||||
def validate_value(cls, value):
|
||||
if not is_boolean(value):
|
||||
raise EncodingTypeError(
|
||||
"Value of type {0} cannot be encoded by {0}".format(
|
||||
type(value),
|
||||
cls.__name__,
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def encode_fn(cls, value):
|
||||
if value is True:
|
||||
return b'\x01'
|
||||
elif value is False:
|
||||
return b'\x00'
|
||||
else:
|
||||
raise ValueError("Invariant")
|
||||
|
||||
|
||||
encode_bool = BooleanEncoder.as_encoder()
|
||||
|
||||
|
||||
class NumberEncoder(Fixed32ByteSizeEncoder):
|
||||
is_big_endian = True
|
||||
bounds_fn = None
|
||||
type_check_fn = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(NumberEncoder, cls).validate()
|
||||
if cls.bounds_fn is None:
|
||||
raise ValueError("`bounds_fn` cannot be null")
|
||||
if cls.type_check_fn is None:
|
||||
raise ValueError("`type_check_fn` cannot be null")
|
||||
|
||||
@classmethod
|
||||
def validate_value(cls, value):
|
||||
if not cls.type_check_fn(value):
|
||||
raise EncodingTypeError(
|
||||
"Value of type {0} cannot be encoded by {0}".format(
|
||||
type(value),
|
||||
cls.__name__,
|
||||
)
|
||||
)
|
||||
|
||||
lower_bound, upper_bound = cls.bounds_fn(cls.value_bit_size)
|
||||
|
||||
if value < lower_bound or value > upper_bound:
|
||||
raise ValueOutOfBounds(
|
||||
"Value '{0}' cannot be encoded in {1} bits. Must be bounded "
|
||||
"between [{2}, {3}]".format(
|
||||
value,
|
||||
cls.value_bit_size,
|
||||
lower_bound,
|
||||
upper_bound,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class UnsignedIntegerEncoder(NumberEncoder):
|
||||
encode_fn = staticmethod(int_to_big_endian)
|
||||
bounds_fn = staticmethod(compute_unsigned_integer_bounds)
|
||||
type_check_fn = staticmethod(is_integer)
|
||||
|
||||
|
||||
encode_uint_256 = UnsignedIntegerEncoder.as_encoder(value_bit_size=256, data_byte_size=32)
|
||||
|
||||
|
||||
class SignedIntegerEncoder(NumberEncoder):
|
||||
bounds_fn = staticmethod(compute_signed_integer_bounds)
|
||||
type_check_fn = staticmethod(is_integer)
|
||||
|
||||
@classmethod
|
||||
def encode_fn(cls, value):
|
||||
return int_to_big_endian(value % 2**cls.value_bit_size)
|
||||
|
||||
@classmethod
|
||||
def encode(cls, value):
|
||||
cls.validate_value(value)
|
||||
base_encoded_value = cls.encode_fn(value)
|
||||
|
||||
if value >= 0:
|
||||
padded_encoded_value = zpad(base_encoded_value, cls.data_byte_size)
|
||||
else:
|
||||
padded_encoded_value = fpad(base_encoded_value, cls.data_byte_size)
|
||||
return padded_encoded_value
|
||||
|
||||
|
||||
class BaseRealEncoder(NumberEncoder):
|
||||
low_bit_size = None
|
||||
high_bit_size = None
|
||||
type_check_fn = staticmethod(is_number)
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(BaseRealEncoder, cls).validate()
|
||||
if cls.high_bit_size is None:
|
||||
raise ValueError("`high_bit_size` cannot be null")
|
||||
if cls.low_bit_size is None:
|
||||
raise ValueError("`low_bit_size` cannot be null")
|
||||
if cls.low_bit_size + cls.high_bit_size != cls.value_bit_size:
|
||||
raise ValueError("high and low bitsizes must sum to the value_bit_size")
|
||||
|
||||
|
||||
class UnsignedRealEncoder(BaseRealEncoder):
|
||||
@classmethod
|
||||
def bounds_fn(cls, value_bit_size):
|
||||
return compute_unsigned_real_bounds(cls.high_bit_size, cls.low_bit_size)
|
||||
|
||||
@classmethod
|
||||
def encode_fn(cls, value):
|
||||
scaled_value = value * 2 ** cls.low_bit_size
|
||||
integer_value = int(scaled_value)
|
||||
return int_to_big_endian(integer_value)
|
||||
|
||||
|
||||
class SignedRealEncoder(BaseRealEncoder):
|
||||
@classmethod
|
||||
def bounds_fn(cls, value_bit_size):
|
||||
return compute_signed_real_bounds(cls.high_bit_size, cls.low_bit_size)
|
||||
|
||||
@classmethod
|
||||
def encode_fn(cls, value):
|
||||
scaled_value = value * 2 ** cls.low_bit_size
|
||||
integer_value = int(scaled_value)
|
||||
unsigned_integer_value = integer_value % 2 ** (cls.high_bit_size + cls.low_bit_size)
|
||||
return int_to_big_endian(unsigned_integer_value)
|
||||
|
||||
@classmethod
|
||||
def encode(cls, value):
|
||||
cls.validate_value(value)
|
||||
base_encoded_value = cls.encode_fn(value)
|
||||
|
||||
if value >= 0:
|
||||
padded_encoded_value = zpad(base_encoded_value, cls.data_byte_size)
|
||||
else:
|
||||
padded_encoded_value = fpad(base_encoded_value, cls.data_byte_size)
|
||||
return padded_encoded_value
|
||||
|
||||
|
||||
class AddressEncoder(Fixed32ByteSizeEncoder):
|
||||
value_bit_size = 20 * 8
|
||||
encode_fn = staticmethod(to_canonical_address)
|
||||
is_big_endian = True
|
||||
|
||||
@classmethod
|
||||
def validate_value(cls, value):
|
||||
if not is_address(value):
|
||||
raise EncodingTypeError(
|
||||
"Value of type {0} cannot be encoded by {0}".format(
|
||||
type(value),
|
||||
cls.__name__,
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(AddressEncoder, cls).validate()
|
||||
if cls.value_bit_size != 20 * 8:
|
||||
raise ValueError('Addresses must be 160 bits in length')
|
||||
|
||||
|
||||
encode_address = AddressEncoder.as_encoder()
|
||||
|
||||
|
||||
class BytesEncoder(Fixed32ByteSizeEncoder):
|
||||
is_big_endian = False
|
||||
|
||||
@classmethod
|
||||
def validate_value(cls, value):
|
||||
if not is_bytes(value):
|
||||
raise EncodingTypeError(
|
||||
"Value of type {0} cannot be encoded by {0}".format(
|
||||
type(value),
|
||||
cls.__name__,
|
||||
)
|
||||
)
|
||||
if len(value) > cls.value_bit_size // 8:
|
||||
raise ValueOutOfBounds(
|
||||
"String {0} exceeds total byte size for bytes{1} encoding".format(
|
||||
value,
|
||||
cls.value_bit_size // 8,
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def encode_fn(cls, value):
|
||||
return value
|
||||
|
||||
|
||||
class StringEncoder(BaseEncoder):
|
||||
@classmethod
|
||||
def encode(cls, value):
|
||||
if not is_bytes(value):
|
||||
raise EncodingTypeError(
|
||||
"Value of type {0} cannot be encoded as a string".format(
|
||||
type(value),
|
||||
)
|
||||
)
|
||||
|
||||
encoded_size = encode_uint_256(len(value))
|
||||
if not value:
|
||||
padded_value = b'\x00' * 32
|
||||
else:
|
||||
padded_value = zpad_right(value, ceil32(len(value)))
|
||||
encoded_value = encoded_size + padded_value
|
||||
|
||||
return encoded_value
|
||||
|
||||
|
||||
encode_string = encode_bytes = StringEncoder.as_encoder()
|
||||
|
||||
|
||||
class BaseArrayEncoder(BaseEncoder):
|
||||
item_encoder = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(BaseArrayEncoder, cls).validate()
|
||||
if cls.item_encoder is None:
|
||||
raise ValueError("`item_encoder` may not be none")
|
||||
|
||||
@classmethod
|
||||
def encode_elements(cls, value):
|
||||
if not is_list_like(value):
|
||||
raise EncodingTypeError(
|
||||
"Cannot encode value of type {0} using array encoder. Must be "
|
||||
"a list-like object such as an array or tuple".format(
|
||||
type(value),
|
||||
)
|
||||
)
|
||||
encoded_elements = b''.join((
|
||||
cls.item_encoder(item)
|
||||
for item in value
|
||||
))
|
||||
return encoded_elements
|
||||
|
||||
|
||||
class SizedArrayEncoder(BaseArrayEncoder):
|
||||
array_size = None
|
||||
|
||||
@classmethod
|
||||
def validate(cls):
|
||||
super(SizedArrayEncoder, cls).validate()
|
||||
if cls.array_size is None:
|
||||
raise ValueError("`array_size` may not be none")
|
||||
|
||||
@classmethod
|
||||
def encode(cls, value):
|
||||
if len(value) != cls.array_size:
|
||||
raise ValueOutOfBounds(
|
||||
"Expected value with length {0}. Provided value has {1} "
|
||||
"elements".format(cls.array_size, len(value))
|
||||
)
|
||||
encoded_elements = cls.encode_elements(value)
|
||||
return encoded_elements
|
||||
|
||||
|
||||
class DynamicArrayEncoder(BaseArrayEncoder):
|
||||
@classmethod
|
||||
def encode(cls, value):
|
||||
encoded_size = encode_uint_256(len(value))
|
||||
encoded_elements = cls.encode_elements(value)
|
||||
encoded_value = encoded_size + encoded_elements
|
||||
return encoded_value
|
||||
30
web3/eth_abi/exceptions.py
Executable file
30
web3/eth_abi/exceptions.py
Executable file
@ -0,0 +1,30 @@
|
||||
class EncodingError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class EncodingTypeError(EncodingError):
|
||||
"""
|
||||
Raised when trying to encode a value which is of the wrong type for the
|
||||
desired encoding type.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ValueOutOfBounds(EncodingError):
|
||||
"""
|
||||
Raised when trying to encode a value which is out bounds for the desired
|
||||
type.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class DecodingError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InsufficientDataBytes(DecodingError):
|
||||
pass
|
||||
|
||||
|
||||
class NonEmptyPaddingBytes(DecodingError):
|
||||
pass
|
||||
0
web3/eth_abi/utils/__init__.py
Executable file
0
web3/eth_abi/utils/__init__.py
Executable file
94
web3/eth_abi/utils/numeric.py
Executable file
94
web3/eth_abi/utils/numeric.py
Executable file
@ -0,0 +1,94 @@
|
||||
import sys
|
||||
import math
|
||||
import decimal
|
||||
|
||||
from ...eth_utils import (
|
||||
encode_hex,
|
||||
is_number,
|
||||
)
|
||||
|
||||
from ...eth_abi.constants import (
|
||||
TT256,
|
||||
)
|
||||
|
||||
|
||||
if sys.version_info.major == 2:
|
||||
import struct
|
||||
|
||||
def int_to_big_endian(value):
|
||||
cs = []
|
||||
while value > 0:
|
||||
cs.append(chr(value % 256))
|
||||
value /= 256
|
||||
s = ''.join(reversed(cs))
|
||||
return s
|
||||
|
||||
def big_endian_to_int(value):
|
||||
if len(value) == 1:
|
||||
return ord(value)
|
||||
elif len(value) <= 8:
|
||||
return struct.unpack('>Q', value.rjust(8, '\x00'))[0]
|
||||
else:
|
||||
return int(encode_hex(value), 16)
|
||||
else:
|
||||
def int_to_big_endian(value):
|
||||
byte_length = math.ceil(value.bit_length() / 8)
|
||||
return (value).to_bytes(byte_length, byteorder='big')
|
||||
|
||||
def big_endian_to_int(value):
|
||||
return int.from_bytes(value, byteorder='big')
|
||||
|
||||
|
||||
def ceil32(x):
|
||||
return x if x % 32 == 0 else x + 32 - (x % 32)
|
||||
|
||||
|
||||
def encode_int(value):
|
||||
'''encodes an integer into serialization'''
|
||||
if not is_number(value) or value < 0 or value >= TT256:
|
||||
raise Exception("Integer invalid or out of range: %r" % value)
|
||||
return int_to_big_endian(value)
|
||||
|
||||
|
||||
def compute_unsigned_integer_bounds(num_bits):
|
||||
return (
|
||||
0,
|
||||
2 ** num_bits - 1,
|
||||
)
|
||||
|
||||
|
||||
def compute_signed_integer_bounds(num_bits):
|
||||
return (
|
||||
-1 * 2 ** (num_bits - 1),
|
||||
2 ** (num_bits - 1) - 1,
|
||||
)
|
||||
|
||||
|
||||
def compute_unsigned_real_bounds(num_high_bits, num_low_bits):
|
||||
integer_lower_bound, integer_upper_bount = compute_unsigned_integer_bounds(
|
||||
num_high_bits,
|
||||
)
|
||||
return (
|
||||
integer_lower_bound * 1.0 / 2 ** num_low_bits,
|
||||
integer_upper_bount * 1.0 / 2 ** num_low_bits,
|
||||
)
|
||||
|
||||
|
||||
def compute_signed_real_bounds(num_high_bits, num_low_bits):
|
||||
integer_lower_bound, integer_upper_bount = compute_signed_integer_bounds(
|
||||
num_high_bits,
|
||||
)
|
||||
return (
|
||||
integer_lower_bound * 1.0 / 2 ** num_low_bits,
|
||||
integer_upper_bount * 1.0 / 2 ** num_low_bits,
|
||||
)
|
||||
|
||||
|
||||
def quantize_value(value, decimal_bit_size):
|
||||
num_decimals = int(math.ceil(math.log10(2 ** decimal_bit_size)))
|
||||
if num_decimals == 0:
|
||||
quantize_value = decimal.Decimal('1')
|
||||
else:
|
||||
quantize_value = decimal.Decimal('1.{0}'.format(''.zfill(num_decimals)))
|
||||
decimal_value = decimal.Decimal(value)
|
||||
return decimal_value.quantize(quantize_value)
|
||||
15
web3/eth_abi/utils/padding.py
Executable file
15
web3/eth_abi/utils/padding.py
Executable file
@ -0,0 +1,15 @@
|
||||
import functools
|
||||
|
||||
from ...eth_utils import (
|
||||
pad_left,
|
||||
pad_right,
|
||||
)
|
||||
|
||||
|
||||
zpad = functools.partial(pad_left, pad_with='\x00')
|
||||
zpad32 = functools.partial(pad_left, to_size=32, pad_with='\x00')
|
||||
zpad_right = functools.partial(pad_right, pad_with='\x00')
|
||||
zpad32_right = functools.partial(pad_right, to_size=32, pad_with='\x00')
|
||||
|
||||
fpad = functools.partial(pad_left, pad_with='\xff')
|
||||
fpad32 = functools.partial(pad_left, to_size=32, pad_with='\xff')
|
||||
48
web3/eth_abi/utils/parsing.py
Executable file
48
web3/eth_abi/utils/parsing.py
Executable file
@ -0,0 +1,48 @@
|
||||
import re
|
||||
import ast
|
||||
|
||||
from ...eth_utils import (
|
||||
force_text,
|
||||
)
|
||||
|
||||
|
||||
def process_type(typ):
|
||||
# Crazy reg expression to separate out base type component (eg. uint),
|
||||
# size (eg. 256, 128x128, none), array component (eg. [], [45], none)
|
||||
regexp = '([a-z]*)([0-9]*x?[0-9]*)((\[[0-9]*\])*)'
|
||||
base, sub, arr, _ = re.match(regexp, force_text(typ)).groups()
|
||||
arrlist = re.findall('\[[0-9]*\]', arr)
|
||||
if len(''.join(arrlist)) != len(arr):
|
||||
raise ValueError("Unknown characters found in array declaration")
|
||||
# Check validity of string type
|
||||
if base == 'string' or base == 'bytes':
|
||||
if not re.match('^[0-9]*$', sub):
|
||||
raise ValueError("String type must have no suffix or numerical suffix")
|
||||
if sub and int(sub) > 32:
|
||||
raise ValueError("Maximum 32 bytes for fixed-length str or bytes")
|
||||
# Check validity of integer type
|
||||
elif base == 'uint' or base == 'int':
|
||||
if not re.match('^[0-9]+$', sub):
|
||||
raise ValueError("Integer type must have numerical suffix")
|
||||
if 8 > int(sub) or int(sub) > 256:
|
||||
raise ValueError("Integer size out of bounds")
|
||||
if int(sub) % 8 != 0:
|
||||
raise ValueError("Integer size must be multiple of 8")
|
||||
# Check validity of real type
|
||||
elif base == 'ureal' or base == 'real':
|
||||
if not re.match('^[0-9]+x[0-9]+$', sub):
|
||||
raise ValueError("Real type must have suffix of form <high>x<low>, eg. 128x128")
|
||||
high, low = [int(x) for x in sub.split('x')]
|
||||
if 8 > (high + low) or (high + low) > 256:
|
||||
raise ValueError("Real size out of bounds (max 32 bytes)")
|
||||
if high % 8 != 0 or low % 8 != 0:
|
||||
raise ValueError("Real high/low sizes must be multiples of 8")
|
||||
# Check validity of hash type
|
||||
elif base == 'hash':
|
||||
if not re.match('^[0-9]+$', sub):
|
||||
raise ValueError("Hash type must have numerical suffix")
|
||||
# Check validity of address type
|
||||
elif base == 'address':
|
||||
if sub != '':
|
||||
raise ValueError("Address cannot have suffix")
|
||||
return base, sub, [ast.literal_eval(x) for x in arrlist]
|
||||
@ -1,7 +1,5 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import pkg_resources
|
||||
|
||||
from .abi import ( # noqa: F401
|
||||
event_abi_to_log_topic,
|
||||
event_signature_to_log_topic,
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
try:
|
||||
from sha3 import keccak_256
|
||||
except ImportError:
|
||||
from sha3 import sha3_256 as keccak_256
|
||||
from ..keccak.CompactFIPS202 import SHA3_256 as keccak_256
|
||||
#from hashlib import sha256 as keccak_256
|
||||
|
||||
from .string import (
|
||||
force_bytes,
|
||||
@ -11,8 +13,10 @@ from .string import (
|
||||
|
||||
|
||||
def keccak(value):
|
||||
return keccak_256(force_bytes(value)).digest()
|
||||
# print keccak_256(force_bytes(value))
|
||||
print "%s: %s " % (value, keccak_256(force_bytes(value)))
|
||||
return keccak_256(force_bytes(value))
|
||||
|
||||
|
||||
# ensure we have the *correct* hash function
|
||||
assert keccak('') == b"\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p" # noqa: E501
|
||||
#assert keccak('') == b"\xc5\xd2F\x01\x86\xf7#<\x92~}\xb2\xdc\xc7\x03\xc0\xe5\x00\xb6S\xca\x82';{\xfa\xd8\x04]\x85\xa4p" # noqa: E501
|
||||
|
||||
@ -21,16 +21,16 @@ from .eth_utils import (
|
||||
to_normalized_address,
|
||||
)
|
||||
|
||||
from web3.iban import Iban
|
||||
from .iban import Iban
|
||||
|
||||
from web3.utils.empty import (
|
||||
from .utils.empty import (
|
||||
empty,
|
||||
)
|
||||
from web3.utils.encoding import (
|
||||
from .utils.encoding import (
|
||||
from_decimal,
|
||||
to_decimal,
|
||||
)
|
||||
from web3.utils.blocks import (
|
||||
from .utils.blocks import (
|
||||
is_predefined_block_number,
|
||||
)
|
||||
|
||||
|
||||
106
web3/keccak/CompactFIPS202.py
Normal file
106
web3/keccak/CompactFIPS202.py
Normal file
@ -0,0 +1,106 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Implementation by the Keccak, Keyak and Ketje Teams, namely, Guido Bertoni,
|
||||
# Joan Daemen, Michaël Peeters, Gilles Van Assche and Ronny Van Keer, hereby
|
||||
# denoted as "the implementer".
|
||||
#
|
||||
# For more information, feedback or questions, please refer to our websites:
|
||||
# http://keccak.noekeon.org/
|
||||
# http://keyak.noekeon.org/
|
||||
# http://ketje.noekeon.org/
|
||||
#
|
||||
# To the extent possible under law, the implementer has waived all copyright
|
||||
# and related or neighboring rights to the source code in this file.
|
||||
# http://creativecommons.org/publicdomain/zero/1.0/
|
||||
|
||||
def ROL64(a, n):
|
||||
return ((a >> (64-(n%64))) + (a << (n%64))) % (1 << 64)
|
||||
|
||||
def KeccakF1600onLanes(lanes):
|
||||
R = 1
|
||||
for round in range(24):
|
||||
# θ
|
||||
C = [lanes[x][0] ^ lanes[x][1] ^ lanes[x][2] ^ lanes[x][3] ^ lanes[x][4] for x in range(5)]
|
||||
D = [C[(x+4)%5] ^ ROL64(C[(x+1)%5], 1) for x in range(5)]
|
||||
lanes = [[lanes[x][y]^D[x] for y in range(5)] for x in range(5)]
|
||||
# ρ and π
|
||||
(x, y) = (1, 0)
|
||||
current = lanes[x][y]
|
||||
for t in range(24):
|
||||
(x, y) = (y, (2*x+3*y)%5)
|
||||
(current, lanes[x][y]) = (lanes[x][y], ROL64(current, (t+1)*(t+2)//2))
|
||||
# χ
|
||||
for y in range(5):
|
||||
T = [lanes[x][y] for x in range(5)]
|
||||
for x in range(5):
|
||||
lanes[x][y] = T[x] ^((~T[(x+1)%5]) & T[(x+2)%5])
|
||||
# ι
|
||||
for j in range(7):
|
||||
R = ((R << 1) ^ ((R >> 7)*0x71)) % 256
|
||||
if (R & 2):
|
||||
lanes[0][0] = lanes[0][0] ^ (1 << ((1<<j)-1))
|
||||
return lanes
|
||||
|
||||
def load64(b):
|
||||
return sum((b[i] << (8*i)) for i in range(8))
|
||||
|
||||
def store64(a):
|
||||
return list((a >> (8*i)) % 256 for i in range(8))
|
||||
|
||||
def KeccakF1600(state):
|
||||
lanes = [[load64(state[8*(x+5*y):8*(x+5*y)+8]) for y in range(5)] for x in range(5)]
|
||||
lanes = KeccakF1600onLanes(lanes)
|
||||
state = bytearray(200)
|
||||
for x in range(5):
|
||||
for y in range(5):
|
||||
state[8*(x+5*y):8*(x+5*y)+8] = store64(lanes[x][y])
|
||||
return state
|
||||
|
||||
def Keccak(rate, capacity, inputBytes, delimitedSuffix, outputByteLen):
|
||||
outputBytes = bytearray()
|
||||
state = bytearray([0 for i in range(200)])
|
||||
rateInBytes = rate//8
|
||||
blockSize = 0
|
||||
if (((rate + capacity) != 1600) or ((rate % 8) != 0)):
|
||||
return
|
||||
inputOffset = 0
|
||||
# === Absorb all the input blocks ===
|
||||
while(inputOffset < len(inputBytes)):
|
||||
blockSize = min(len(inputBytes)-inputOffset, rateInBytes)
|
||||
for i in range(blockSize):
|
||||
state[i] = state[i] ^ inputBytes[i+inputOffset]
|
||||
inputOffset = inputOffset + blockSize
|
||||
if (blockSize == rateInBytes):
|
||||
state = KeccakF1600(state)
|
||||
blockSize = 0
|
||||
# === Do the padding and switch to the squeezing phase ===
|
||||
state[blockSize] = state[blockSize] ^ delimitedSuffix
|
||||
if (((delimitedSuffix & 0x80) != 0) and (blockSize == (rateInBytes-1))):
|
||||
state = KeccakF1600(state)
|
||||
state[rateInBytes-1] = state[rateInBytes-1] ^ 0x80
|
||||
state = KeccakF1600(state)
|
||||
# === Squeeze out all the output blocks ===
|
||||
while(outputByteLen > 0):
|
||||
blockSize = min(outputByteLen, rateInBytes)
|
||||
outputBytes = outputBytes + state[0:blockSize]
|
||||
outputByteLen = outputByteLen - blockSize
|
||||
if (outputByteLen > 0):
|
||||
state = KeccakF1600(state)
|
||||
return outputBytes
|
||||
|
||||
def SHAKE128(inputBytes, outputByteLen):
|
||||
return Keccak(1344, 256, inputBytes, 0x1F, outputByteLen)
|
||||
|
||||
def SHAKE256(inputBytes, outputByteLen):
|
||||
return Keccak(1088, 512, inputBytes, 0x1F, outputByteLen)
|
||||
|
||||
def SHA3_224(inputBytes):
|
||||
return Keccak(1152, 448, inputBytes, 0x06, 224//8)
|
||||
|
||||
def SHA3_256(inputBytes):
|
||||
return Keccak(1088, 512, inputBytes, 0x06, 256//8)
|
||||
|
||||
def SHA3_384(inputBytes):
|
||||
return Keccak(832, 768, inputBytes, 0x06, 384//8)
|
||||
|
||||
def SHA3_512(inputBytes):
|
||||
return Keccak(576, 1024, inputBytes, 0x06, 512//8)
|
||||
0
web3/keccak/__init__.py
Normal file
0
web3/keccak/__init__.py
Normal file
32
web3/main.py
32
web3/main.py
@ -13,36 +13,36 @@ from .eth_utils import (
|
||||
compose,
|
||||
)
|
||||
|
||||
from web3.admin import Admin
|
||||
from web3.db import Db
|
||||
from web3.eth import Eth
|
||||
from web3.miner import Miner
|
||||
from web3.net import Net
|
||||
from web3.personal import Personal
|
||||
from web3.shh import Shh
|
||||
from web3.txpool import TxPool
|
||||
from web3.version import Version
|
||||
from web3.testing import Testing
|
||||
from .admin import Admin
|
||||
from .db import Db
|
||||
from .eth import Eth
|
||||
from .miner import Miner
|
||||
from .net import Net
|
||||
from .personal import Personal
|
||||
from .shh import Shh
|
||||
from .txpool import TxPool
|
||||
from .version import Version
|
||||
from .testing import Testing
|
||||
|
||||
from web3.iban import Iban
|
||||
from .iban import Iban
|
||||
|
||||
from web3.providers.rpc import (
|
||||
from .providers.rpc import (
|
||||
HTTPProvider,
|
||||
RPCProvider,
|
||||
KeepAliveRPCProvider,
|
||||
)
|
||||
from web3.providers.tester import (
|
||||
from .providers.tester import (
|
||||
TestRPCProvider,
|
||||
EthereumTesterProvider,
|
||||
)
|
||||
from web3.providers.ipc import (
|
||||
from .providers.ipc import (
|
||||
IPCProvider,
|
||||
)
|
||||
from web3.providers.manager import (
|
||||
from .providers.manager import (
|
||||
RequestManager,
|
||||
)
|
||||
|
||||
from web3.utils.encoding import (
|
||||
from .utils.encoding import (
|
||||
to_hex,
|
||||
to_decimal,
|
||||
from_decimal,
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
from web3.utils.functional import (
|
||||
from .utils.functional import (
|
||||
apply_formatters_to_return,
|
||||
)
|
||||
from web3.utils.encoding import (
|
||||
from .utils.encoding import (
|
||||
to_decimal,
|
||||
)
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
from web3.utils.encoding import (
|
||||
from .utils.encoding import (
|
||||
to_decimal,
|
||||
)
|
||||
from web3.utils.functional import (
|
||||
from .utils.functional import (
|
||||
apply_formatters_to_return,
|
||||
)
|
||||
|
||||
|
||||
@ -14,7 +14,7 @@ from ..eth_utils import (
|
||||
force_text,
|
||||
)
|
||||
|
||||
from web3.utils.compat import (
|
||||
from ..utils.compat import (
|
||||
Timeout,
|
||||
threading,
|
||||
socket,
|
||||
@ -27,7 +27,7 @@ from .base import JSONBaseProvider
|
||||
def get_ipc_socket(ipc_path, timeout=0.1):
|
||||
if sys.platform == 'win32':
|
||||
# On Windows named pipe is used. Simulate socket with it.
|
||||
from web3.utils.windows import NamedPipe
|
||||
from ..utils.windows import NamedPipe
|
||||
|
||||
pipe = NamedPipe(ipc_path)
|
||||
with contextlib.closing(pipe):
|
||||
|
||||
@ -7,7 +7,7 @@ from ..eth_utils import (
|
||||
is_string,
|
||||
)
|
||||
|
||||
from web3.utils.compat import (
|
||||
from ..utils.compat import (
|
||||
spawn,
|
||||
)
|
||||
|
||||
|
||||
@ -6,13 +6,13 @@ from ..eth_utils import (
|
||||
to_dict,
|
||||
)
|
||||
|
||||
from web3.utils.six import (
|
||||
from ..utils.six import (
|
||||
urlunparse,
|
||||
)
|
||||
from web3.utils.compat import (
|
||||
from ..utils.compat import (
|
||||
make_post_request,
|
||||
)
|
||||
from web3.utils.http import construct_user_agent
|
||||
from ..utils.http import construct_user_agent
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
from web3.utils.compat import (
|
||||
from ..utils.compat import (
|
||||
make_server,
|
||||
spawn,
|
||||
)
|
||||
|
||||
20
web3/rlp/__init__.py
Executable file
20
web3/rlp/__init__.py
Executable file
@ -0,0 +1,20 @@
|
||||
from . import sedes
|
||||
from .codec import (
|
||||
encode,
|
||||
decode,
|
||||
infer_sedes,
|
||||
descend,
|
||||
append,
|
||||
pop,
|
||||
compare_length,
|
||||
insert,
|
||||
)
|
||||
from .exceptions import (
|
||||
RLPException,
|
||||
EncodingError,
|
||||
DecodingError,
|
||||
SerializationError,
|
||||
DeserializationError,
|
||||
)
|
||||
from .lazy import decode_lazy, peek, LazyList
|
||||
from .sedes import Serializable, make_immutable, make_mutable
|
||||
312
web3/rlp/codec.py
Executable file
312
web3/rlp/codec.py
Executable file
@ -0,0 +1,312 @@
|
||||
import collections
|
||||
import sys
|
||||
from .exceptions import EncodingError, DecodingError
|
||||
|
||||
from .utils import (Atomic, str_to_bytes, is_integer, ascii_chr, safe_ord, big_endian_to_int,
|
||||
int_to_big_endian)
|
||||
from .sedes.binary import Binary as BinaryClass
|
||||
from .sedes import big_endian_int, binary
|
||||
from .sedes.lists import List, Serializable, is_sedes
|
||||
|
||||
|
||||
if sys.version_info.major == 2:
|
||||
from itertools import imap as map
|
||||
|
||||
|
||||
def encode(obj, sedes=None, infer_serializer=True, cache=False):
|
||||
"""Encode a Python object in RLP format.
|
||||
|
||||
By default, the object is serialized in a suitable way first (using :func:`rlp.infer_sedes`)
|
||||
and then encoded. Serialization can be explicitly suppressed by setting `infer_serializer` to
|
||||
``False`` and not passing an alternative as `sedes`.
|
||||
|
||||
If `obj` has an attribute :attr:`_cached_rlp` (as, notably, :class:`rlp.Serializable`) and its
|
||||
value is not `None`, this value is returned bypassing serialization and encoding, unless
|
||||
`sedes` is given (as the cache is assumed to refer to the standard serialization which can be
|
||||
replaced by specifying `sedes`).
|
||||
|
||||
If `obj` is a :class:`rlp.Serializable` and `cache` is true, the result of the encoding will be
|
||||
stored in :attr:`_cached_rlp` if it is empty and :meth:`rlp.Serializable.make_immutable` will
|
||||
be invoked on `obj`.
|
||||
|
||||
:param sedes: an object implementing a function ``serialize(obj)`` which will be used to
|
||||
serialize ``obj`` before encoding, or ``None`` to use the infered one (if any)
|
||||
:param infer_serializer: if ``True`` an appropriate serializer will be selected using
|
||||
:func:`rlp.infer_sedes` to serialize `obj` before encoding
|
||||
:param cache: cache the return value in `obj._cached_rlp` if possible and make `obj` immutable
|
||||
(default `False`)
|
||||
:returns: the RLP encoded item
|
||||
:raises: :exc:`rlp.EncodingError` in the rather unlikely case that the item is too big to
|
||||
encode (will not happen)
|
||||
:raises: :exc:`rlp.SerializationError` if the serialization fails
|
||||
"""
|
||||
if isinstance(obj, Serializable):
|
||||
if obj._cached_rlp and sedes is None:
|
||||
return obj._cached_rlp
|
||||
else:
|
||||
really_cache = cache if sedes is None else False
|
||||
else:
|
||||
really_cache = False
|
||||
|
||||
if sedes:
|
||||
item = sedes.serialize(obj)
|
||||
elif infer_serializer:
|
||||
item = infer_sedes(obj).serialize(obj)
|
||||
else:
|
||||
item = obj
|
||||
|
||||
result = encode_raw(item)
|
||||
if really_cache:
|
||||
obj._cached_rlp = result
|
||||
obj.make_immutable()
|
||||
return result
|
||||
|
||||
|
||||
class RLPData(str):
|
||||
|
||||
"wraper to mark already rlp serialized data"
|
||||
pass
|
||||
|
||||
|
||||
def encode_raw(item):
|
||||
"""RLP encode (a nested sequence of) :class:`Atomic`s."""
|
||||
if isinstance(item, RLPData):
|
||||
return item
|
||||
elif isinstance(item, Atomic):
|
||||
if len(item) == 1 and safe_ord(item[0]) < 128:
|
||||
return str_to_bytes(item)
|
||||
payload = str_to_bytes(item)
|
||||
prefix_offset = 128 # string
|
||||
elif isinstance(item, collections.Sequence):
|
||||
payload = b''.join(encode_raw(x) for x in item)
|
||||
prefix_offset = 192 # list
|
||||
else:
|
||||
msg = 'Cannot encode object of type {0}'.format(type(item).__name__)
|
||||
raise EncodingError(msg, item)
|
||||
|
||||
try:
|
||||
prefix = length_prefix(len(payload), prefix_offset)
|
||||
except ValueError:
|
||||
raise EncodingError('Item too big to encode', item)
|
||||
|
||||
return prefix + payload
|
||||
|
||||
|
||||
def length_prefix(length, offset):
|
||||
"""Construct the prefix to lists or strings denoting their length.
|
||||
|
||||
:param length: the length of the item in bytes
|
||||
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
|
||||
list
|
||||
"""
|
||||
if length < 56:
|
||||
return ascii_chr(offset + length)
|
||||
elif length < 256**8:
|
||||
length_string = int_to_big_endian(length)
|
||||
return ascii_chr(offset + 56 - 1 + len(length_string)) + length_string
|
||||
else:
|
||||
raise ValueError('Length greater than 256**8')
|
||||
|
||||
|
||||
def consume_length_prefix(rlp, start):
|
||||
"""Read a length prefix from an RLP string.
|
||||
|
||||
:param rlp: the rlp string to read from
|
||||
:param start: the position at which to start reading
|
||||
:returns: a tuple ``(type, length, end)``, where ``type`` is either ``str``
|
||||
or ``list`` depending on the type of the following payload,
|
||||
``length`` is the length of the payload in bytes, and ``end`` is
|
||||
the position of the first payload byte in the rlp string
|
||||
"""
|
||||
b0 = safe_ord(rlp[start])
|
||||
if b0 < 128: # single byte
|
||||
return (str, 1, start)
|
||||
elif b0 < 128 + 56: # short string
|
||||
if b0 - 128 == 1 and safe_ord(rlp[start + 1]) < 128:
|
||||
raise DecodingError('Encoded as short string although single byte was possible', rlp)
|
||||
return (str, b0 - 128, start + 1)
|
||||
elif b0 < 192: # long string
|
||||
ll = b0 - 128 - 56 + 1
|
||||
if rlp[start + 1:start + 2] == b'\x00':
|
||||
raise DecodingError('Length starts with zero bytes', rlp)
|
||||
l = big_endian_to_int(rlp[start + 1:start + 1 + ll])
|
||||
if l < 56:
|
||||
raise DecodingError('Long string prefix used for short string', rlp)
|
||||
return (str, l, start + 1 + ll)
|
||||
elif b0 < 192 + 56: # short list
|
||||
return (list, b0 - 192, start + 1)
|
||||
else: # long list
|
||||
ll = b0 - 192 - 56 + 1
|
||||
if rlp[start + 1:start + 2] == b'\x00':
|
||||
raise DecodingError('Length starts with zero bytes', rlp)
|
||||
l = big_endian_to_int(rlp[start + 1:start + 1 + ll])
|
||||
if l < 56:
|
||||
raise DecodingError('Long list prefix used for short list', rlp)
|
||||
return (list, l, start + 1 + ll)
|
||||
|
||||
|
||||
def consume_payload(rlp, start, type_, length):
|
||||
"""Read the payload of an item from an RLP string.
|
||||
|
||||
:param rlp: the rlp string to read from
|
||||
:param type_: the type of the payload (``str`` or ``list``)
|
||||
:param start: the position at which to start reading
|
||||
:param length: the length of the payload in bytes
|
||||
:returns: a tuple ``(item, end)``, where ``item`` is the read item and
|
||||
``end`` is the position of the first unprocessed byte
|
||||
"""
|
||||
if type_ == str:
|
||||
return (rlp[start:start + length], start + length)
|
||||
elif type_ == list:
|
||||
items = []
|
||||
next_item_start = start
|
||||
end = next_item_start + length
|
||||
while next_item_start < end:
|
||||
# item, next_item_start = consume_item(rlp, next_item_start)
|
||||
t, l, s = consume_length_prefix(rlp, next_item_start)
|
||||
item, next_item_start = consume_payload(rlp, s, t, l)
|
||||
items.append(item)
|
||||
if next_item_start > end:
|
||||
raise DecodingError('List length prefix announced a too small '
|
||||
'length', rlp)
|
||||
return (items, next_item_start)
|
||||
else:
|
||||
raise TypeError('Type must be either list or str')
|
||||
|
||||
|
||||
def consume_item(rlp, start):
|
||||
"""Read an item from an RLP string.
|
||||
|
||||
:param rlp: the rlp string to read from
|
||||
:param start: the position at which to start reading
|
||||
:returns: a tuple ``(item, end)`` where ``item`` is the read item and
|
||||
``end`` is the position of the first unprocessed byte
|
||||
"""
|
||||
t, l, s = consume_length_prefix(rlp, start)
|
||||
return consume_payload(rlp, s, t, l)
|
||||
|
||||
|
||||
def decode(rlp, sedes=None, strict=True, **kwargs):
|
||||
"""Decode an RLP encoded object.
|
||||
|
||||
If the deserialized result `obj` has an attribute :attr:`_cached_rlp` (e.g. if `sedes` is a
|
||||
subclass of :class:`rlp.Serializable`) it will be set to `rlp`, which will improve performance
|
||||
on subsequent :func:`rlp.encode` calls. Bear in mind however that `obj` needs to make sure that
|
||||
this value is updated whenever one of its fields changes or prevent such changes entirely
|
||||
(:class:`rlp.sedes.Serializable` does the latter).
|
||||
|
||||
:param sedes: an object implementing a function ``deserialize(code)`` which will be applied
|
||||
after decoding, or ``None`` if no deserialization should be performed
|
||||
:param \*\*kwargs: additional keyword arguments that will be passed to the deserializer
|
||||
:param strict: if false inputs that are longer than necessary don't cause an exception
|
||||
:returns: the decoded and maybe deserialized Python object
|
||||
:raises: :exc:`rlp.DecodingError` if the input string does not end after the root item and
|
||||
`strict` is true
|
||||
:raises: :exc:`rlp.DeserializationError` if the deserialization fails
|
||||
"""
|
||||
rlp = str_to_bytes(rlp)
|
||||
try:
|
||||
item, end = consume_item(rlp, 0)
|
||||
except IndexError:
|
||||
raise DecodingError('RLP string to short', rlp)
|
||||
if end != len(rlp) and strict:
|
||||
msg = 'RLP string ends with {} superfluous bytes'.format(len(rlp) - end)
|
||||
raise DecodingError(msg, rlp)
|
||||
if sedes:
|
||||
obj = sedes.deserialize(item, **kwargs)
|
||||
if hasattr(obj, '_cached_rlp'):
|
||||
obj._cached_rlp = rlp
|
||||
assert not isinstance(obj, Serializable) or not obj.is_mutable()
|
||||
return obj
|
||||
else:
|
||||
return item
|
||||
|
||||
def descend(rlp, *path):
|
||||
rlp = str_to_bytes(rlp)
|
||||
for p in path:
|
||||
pos = 0
|
||||
_typ, _len, pos = consume_length_prefix(rlp, pos)
|
||||
if _typ != list:
|
||||
raise DecodingError('Trying to descend through a non-list!', rlp)
|
||||
for i in range(p):
|
||||
_, _l, _p = consume_length_prefix(rlp, pos)
|
||||
pos = _l + _p
|
||||
_, _l, _p = consume_length_prefix(rlp, pos)
|
||||
rlp = rlp[pos: _p + _l]
|
||||
return rlp
|
||||
|
||||
def infer_sedes(obj):
|
||||
"""Try to find a sedes objects suitable for a given Python object.
|
||||
|
||||
The sedes objects considered are `obj`'s class, `big_endian_int` and
|
||||
`binary`. If `obj` is a sequence, a :class:`rlp.sedes.List` will be
|
||||
constructed recursively.
|
||||
|
||||
:param obj: the python object for which to find a sedes object
|
||||
:raises: :exc:`TypeError` if no appropriate sedes could be found
|
||||
"""
|
||||
if is_sedes(obj.__class__):
|
||||
return obj.__class__
|
||||
if is_integer(obj) and obj >= 0:
|
||||
return big_endian_int
|
||||
if BinaryClass.is_valid_type(obj):
|
||||
return binary
|
||||
if isinstance(obj, collections.Sequence):
|
||||
return List(map(infer_sedes, obj))
|
||||
msg = 'Did not find sedes handling type {}'.format(type(obj).__name__)
|
||||
raise TypeError(msg)
|
||||
|
||||
def append(rlpdata, obj):
|
||||
_typ, _len, _pos = consume_length_prefix(rlpdata, 0)
|
||||
assert _typ is list
|
||||
rlpdata = rlpdata[_pos:] + encode(obj)
|
||||
prefix = length_prefix(len(rlpdata), 192)
|
||||
return prefix + rlpdata
|
||||
|
||||
def insert(rlpdata, index, obj):
|
||||
_typ, _len, _pos = consume_length_prefix(rlpdata, 0)
|
||||
_beginpos = _pos
|
||||
assert _typ is list
|
||||
for i in range(index):
|
||||
_, _l, _p = consume_length_prefix(rlpdata, _pos)
|
||||
_pos = _l + _p
|
||||
if _l + _p >= len(rlpdata):
|
||||
break
|
||||
rlpdata = rlpdata[_beginpos:_pos] + encode(obj) + rlpdata[_pos:]
|
||||
prefix = length_prefix(len(rlpdata), 192)
|
||||
return prefix + rlpdata
|
||||
|
||||
def pop(rlpdata, index=2**50):
|
||||
_typ, _len, _pos = consume_length_prefix(rlpdata, 0)
|
||||
_initpos = _pos
|
||||
assert _typ is list
|
||||
while index > 0:
|
||||
_, _l, _p = consume_length_prefix(rlpdata, _pos)
|
||||
if _l + _p >= len(rlpdata):
|
||||
break
|
||||
_pos = _l + _p
|
||||
index -= 1
|
||||
_, _l, _p = consume_length_prefix(rlpdata, _pos)
|
||||
newdata = rlpdata[_initpos:_pos] + rlpdata[_l + _p:]
|
||||
prefix = length_prefix(len(newdata), 192)
|
||||
return prefix + newdata
|
||||
|
||||
EMPTYLIST = encode([])
|
||||
|
||||
def compare_length(rlpdata, length):
|
||||
_typ, _len, _pos = consume_length_prefix(rlpdata, 0)
|
||||
_initpos = _pos
|
||||
assert _typ is list
|
||||
lenlist = 0
|
||||
if rlpdata == EMPTYLIST:
|
||||
return -1 if length > 0 else 1 if length < 0 else 0
|
||||
while 1:
|
||||
if lenlist > length:
|
||||
return 1
|
||||
_, _l, _p = consume_length_prefix(rlpdata, _pos)
|
||||
lenlist += 1
|
||||
if _l + _p >= len(rlpdata):
|
||||
break
|
||||
_pos = _l + _p
|
||||
return 0 if lenlist == length else -1
|
||||
|
||||
144
web3/rlp/exceptions.py
Executable file
144
web3/rlp/exceptions.py
Executable file
@ -0,0 +1,144 @@
|
||||
class RLPException(Exception):
|
||||
"""Base class for exceptions raised by this package."""
|
||||
pass
|
||||
|
||||
|
||||
class EncodingError(RLPException):
|
||||
"""Exception raised if encoding fails.
|
||||
|
||||
:ivar obj: the object that could not be encoded
|
||||
"""
|
||||
|
||||
def __init__(self, message, obj):
|
||||
super(EncodingError, self).__init__(message)
|
||||
self.obj = obj
|
||||
|
||||
|
||||
class DecodingError(RLPException):
|
||||
"""Exception raised if decoding fails.
|
||||
|
||||
:ivar rlp: the RLP string that could not be decoded
|
||||
"""
|
||||
|
||||
def __init__(self, message, rlp):
|
||||
super(DecodingError, self).__init__(message)
|
||||
self.rlp = rlp
|
||||
|
||||
|
||||
class SerializationError(RLPException):
|
||||
"""Exception raised if serialization fails.
|
||||
|
||||
:ivar obj: the object that could not be serialized
|
||||
"""
|
||||
|
||||
def __init__(self, message, obj):
|
||||
super(SerializationError, self).__init__(message)
|
||||
self.obj = obj
|
||||
|
||||
|
||||
class ListSerializationError(SerializationError):
|
||||
"""Exception raised if serialization by a :class:`sedes.List` fails.
|
||||
|
||||
:ivar element_exception: the exception that occurred during the serialization of one of the
|
||||
elements, or `None` if the error is unrelated to a specific element
|
||||
:ivar index: the index in the list that produced the error or `None` if the error is unrelated
|
||||
to a specific element
|
||||
"""
|
||||
|
||||
def __init__(self, message=None, obj=None, element_exception=None, index=None):
|
||||
if message is None:
|
||||
assert index is not None
|
||||
assert element_exception is not None
|
||||
message = ('Serialization failed because of element at index {} '
|
||||
'("{}")'.format(index, str(element_exception)))
|
||||
super(ListSerializationError, self).__init__(message, obj)
|
||||
self.index = index
|
||||
self.element_exception = element_exception
|
||||
|
||||
|
||||
class ObjectSerializationError(SerializationError):
|
||||
"""Exception raised if serialization of a :class:`sedes.Serializable` object fails.
|
||||
|
||||
:ivar sedes: the :class:`sedes.Serializable` that failed
|
||||
:ivar list_exception: exception raised by the underlying list sedes, or `None` if no such
|
||||
exception has been raised
|
||||
:ivar field: name of the field of the object that produced the error, or `None` if no field
|
||||
responsible for the error
|
||||
"""
|
||||
|
||||
def __init__(self, message=None, obj=None, sedes=None, list_exception=None):
|
||||
if message is None:
|
||||
assert list_exception is not None
|
||||
if list_exception.element_exception is None:
|
||||
field = None
|
||||
message = ('Serialization failed because of underlying list '
|
||||
'("{}")'.format(str(list_exception)))
|
||||
else:
|
||||
assert sedes is not None
|
||||
field = sedes.fields[list_exception.index][0]
|
||||
message = ('Serialization failed because of field {} '
|
||||
'("{}")'.format(field, str(list_exception.element_exception)))
|
||||
else:
|
||||
field = None
|
||||
super(ObjectSerializationError, self).__init__(message, obj)
|
||||
self.field = field
|
||||
self.list_exception = list_exception
|
||||
|
||||
|
||||
class DeserializationError(RLPException):
|
||||
"""Exception raised if deserialization fails.
|
||||
|
||||
:ivar serial: the decoded RLP string that could not be deserialized
|
||||
"""
|
||||
|
||||
def __init__(self, message, serial):
|
||||
super(DeserializationError, self).__init__(message)
|
||||
self.serial = serial
|
||||
|
||||
|
||||
class ListDeserializationError(DeserializationError):
|
||||
"""Exception raised if deserialization by a :class:`sedes.List` fails.
|
||||
|
||||
:ivar element_exception: the exception that occurred during the deserialization of one of the
|
||||
elements, or `None` if the error is unrelated to a specific element
|
||||
:ivar index: the index in the list that produced the error or `None` if the error is unrelated
|
||||
to a specific element
|
||||
"""
|
||||
|
||||
def __init__(self, message=None, serial=None, element_exception=None, index=None):
|
||||
if not message:
|
||||
assert index is not None
|
||||
assert element_exception is not None
|
||||
message = ('Deserialization failed because of element at index {} '
|
||||
'("{}")'.format(index, str(element_exception)))
|
||||
super(ListDeserializationError, self).__init__(message, serial)
|
||||
self.index = index
|
||||
self.element_exception = element_exception
|
||||
|
||||
|
||||
class ObjectDeserializationError(DeserializationError):
|
||||
"""Exception raised if deserialization by a :class:`sedes.Serializable` fails.
|
||||
|
||||
:ivar sedes: the :class:`sedes.Serializable` that failed
|
||||
:ivar list_exception: exception raised by the underlying list sedes, or `None` if no such
|
||||
exception has been raised
|
||||
:ivar field: name of the field of the object that produced the error, or `None` if no field
|
||||
responsible for the error
|
||||
"""
|
||||
|
||||
def __init__(self, message=None, serial=None, sedes=None, list_exception=None):
|
||||
if not message:
|
||||
assert list_exception is not None
|
||||
if list_exception.element_exception is None:
|
||||
field = None
|
||||
message = ('Deserialization failed because of underlying list '
|
||||
'("{}")'.format(str(list_exception)))
|
||||
else:
|
||||
assert sedes is not None
|
||||
field = sedes.fields[list_exception.index][0]
|
||||
message = ('Deserialization failed because of field {} '
|
||||
'("{}")'.format(field, str(list_exception.element_exception)))
|
||||
super(ObjectDeserializationError, self).__init__(message, serial)
|
||||
self.sedes = sedes
|
||||
self.list_exception = list_exception
|
||||
self.field = field
|
||||
153
web3/rlp/lazy.py
Executable file
153
web3/rlp/lazy.py
Executable file
@ -0,0 +1,153 @@
|
||||
from collections import Iterable, Sequence
|
||||
from .codec import consume_length_prefix, consume_payload
|
||||
from .exceptions import DecodingError
|
||||
from .utils import Atomic
|
||||
|
||||
|
||||
def decode_lazy(rlp, sedes=None, **sedes_kwargs):
|
||||
"""Decode an RLP encoded object in a lazy fashion.
|
||||
|
||||
If the encoded object is a bytestring, this function acts similar to
|
||||
:func:`rlp.decode`. If it is a list however, a :class:`LazyList` is
|
||||
returned instead. This object will decode the string lazily, avoiding
|
||||
both horizontal and vertical traversing as much as possible.
|
||||
|
||||
The way `sedes` is applied depends on the decoded object: If it is a string
|
||||
`sedes` deserializes it as a whole; if it is a list, each element is
|
||||
deserialized individually. In both cases, `sedes_kwargs` are passed on.
|
||||
Note that, if a deserializer is used, only "horizontal" but not
|
||||
"vertical lazyness" can be preserved.
|
||||
|
||||
:param rlp: the RLP string to decode
|
||||
:param sedes: an object implementing a method ``deserialize(code)`` which
|
||||
is used as described above, or ``None`` if no
|
||||
deserialization should be performed
|
||||
:param \*\*sedes_kwargs: additional keyword arguments that will be passed
|
||||
to the deserializers
|
||||
:returns: either the already decoded and deserialized object (if encoded as
|
||||
a string) or an instance of :class:`rlp.LazyList`
|
||||
"""
|
||||
item, end = consume_item_lazy(rlp, 0)
|
||||
if end != len(rlp):
|
||||
raise DecodingError('RLP length prefix announced wrong length', rlp)
|
||||
if isinstance(item, LazyList):
|
||||
item.sedes = sedes
|
||||
item.sedes_kwargs = sedes_kwargs
|
||||
return item
|
||||
elif sedes:
|
||||
return sedes.deserialize(item, **sedes_kwargs)
|
||||
else:
|
||||
return item
|
||||
|
||||
|
||||
def consume_item_lazy(rlp, start):
|
||||
"""Read an item from an RLP string lazily.
|
||||
|
||||
If the length prefix announces a string, the string is read; if it
|
||||
announces a list, a :class:`LazyList` is created.
|
||||
|
||||
:param rlp: the rlp string to read from
|
||||
:param start: the position at which to start reading
|
||||
:returns: a tuple ``(item, end)`` where ``item`` is the read string or a
|
||||
:class:`LazyList` and ``end`` is the position of the first
|
||||
unprocessed byte.
|
||||
"""
|
||||
t, l, s = consume_length_prefix(rlp, start)
|
||||
if t == str:
|
||||
#item, _ = consume_payload(rlp, s, str, l), s + l
|
||||
return consume_payload(rlp, s, str, l)
|
||||
else:
|
||||
assert t == list
|
||||
return LazyList(rlp, s, s + l), s + l
|
||||
|
||||
|
||||
class LazyList(Sequence):
|
||||
"""A RLP encoded list which decodes itself when necessary.
|
||||
|
||||
Both indexing with positive indices and iterating are supported.
|
||||
Getting the length with :func:`len` is possible as well but requires full
|
||||
horizontal encoding.
|
||||
|
||||
:param rlp: the rlp string in which the list is encoded
|
||||
:param start: the position of the first payload byte of the encoded list
|
||||
:param end: the position of the last payload byte of the encoded list
|
||||
:param sedes: a sedes object which deserializes each element of the list,
|
||||
or ``None`` for no deserialization
|
||||
:param \*\*sedes_kwargs: keyword arguments which will be passed on to the
|
||||
deserializer
|
||||
"""
|
||||
|
||||
def __init__(self, rlp, start, end, sedes=None, **sedes_kwargs):
|
||||
self.rlp = rlp
|
||||
self.start = start
|
||||
self.end = end
|
||||
self.index = start
|
||||
self._elements = []
|
||||
self._len = None
|
||||
self.sedes = sedes
|
||||
self.sedes_kwargs = sedes_kwargs
|
||||
|
||||
def next(self):
|
||||
if self.index == self.end:
|
||||
self._len = len(self._elements)
|
||||
raise StopIteration
|
||||
assert self.index < self.end
|
||||
item, end = consume_item_lazy(self.rlp, self.index)
|
||||
self.index = end
|
||||
if self.sedes:
|
||||
item = self.sedes.deserialize(item, **self.sedes_kwargs)
|
||||
self._elements.append(item)
|
||||
return item
|
||||
|
||||
def __getitem__(self, i):
|
||||
try:
|
||||
while len(self._elements) <= i:
|
||||
self.next()
|
||||
except StopIteration:
|
||||
assert self.index == self.end
|
||||
raise IndexError('Index %d out of range' % i)
|
||||
return self._elements[i]
|
||||
|
||||
def __len__(self):
|
||||
if not self._len:
|
||||
try:
|
||||
while True:
|
||||
self.next()
|
||||
except StopIteration:
|
||||
self._len = len(self._elements)
|
||||
return self._len
|
||||
|
||||
|
||||
def peek(rlp, index, sedes=None):
|
||||
"""Get a specific element from an rlp encoded nested list.
|
||||
|
||||
This function uses :func:`rlp.decode_lazy` and, thus, decodes only the
|
||||
necessary parts of the string.
|
||||
|
||||
Usage example::
|
||||
|
||||
>>> rlpdata = rlp.encode([1, 2, [3, [4, 5]]])
|
||||
>>> rlp.peek(rlpdata, 0, rlp.sedes.big_endian_int)
|
||||
1
|
||||
>>> rlp.peek(rlpdata, [2, 0], rlp.sedes.big_endian_int)
|
||||
3
|
||||
|
||||
:param rlp: the rlp string
|
||||
:param index: the index of the element to peek at (can be a list for
|
||||
nested data)
|
||||
:param sedes: a sedes used to deserialize the peeked at object, or `None`
|
||||
if no deserialization should be performed
|
||||
:raises: :exc:`IndexError` if `index` is invalid (out of range or too many
|
||||
levels)
|
||||
"""
|
||||
ll = decode_lazy(rlp)
|
||||
if not isinstance(index, Iterable):
|
||||
index = [index]
|
||||
for i in index:
|
||||
if isinstance(ll, Atomic):
|
||||
raise IndexError('Too many indices given')
|
||||
ll = ll[i]
|
||||
if sedes:
|
||||
return sedes.deserialize(ll)
|
||||
else:
|
||||
return ll
|
||||
4
web3/rlp/sedes/__init__.py
Executable file
4
web3/rlp/sedes/__init__.py
Executable file
@ -0,0 +1,4 @@
|
||||
from . import raw
|
||||
from .binary import Binary, binary
|
||||
from .big_endian_int import BigEndianInt, big_endian_int
|
||||
from .lists import CountableList, List, Serializable, make_immutable, make_mutable
|
||||
45
web3/rlp/sedes/big_endian_int.py
Executable file
45
web3/rlp/sedes/big_endian_int.py
Executable file
@ -0,0 +1,45 @@
|
||||
from ..exceptions import DeserializationError, SerializationError
|
||||
from ..utils import int_to_big_endian, big_endian_to_int, is_integer, ascii_chr
|
||||
|
||||
|
||||
class BigEndianInt(object):
|
||||
"""A sedes for big endian integers.
|
||||
|
||||
:param l: the size of the serialized representation in bytes or `None` to
|
||||
use the shortest possible one
|
||||
"""
|
||||
|
||||
def __init__(self, l=None):
|
||||
self.l = l
|
||||
|
||||
def serialize(self, obj):
|
||||
if not is_integer(obj):
|
||||
raise SerializationError('Can only serialize integers', obj)
|
||||
if self.l is not None and obj >= 256**self.l:
|
||||
raise SerializationError('Integer too large (does not fit in {} '
|
||||
'bytes)'.format(self.l), obj)
|
||||
if obj < 0:
|
||||
raise SerializationError('Cannot serialize negative integers', obj)
|
||||
|
||||
if obj == 0:
|
||||
s = b''
|
||||
else:
|
||||
s = int_to_big_endian(obj)
|
||||
|
||||
if self.l is not None:
|
||||
return b'\x00' * max(0, self.l - len(s)) + s
|
||||
else:
|
||||
return s
|
||||
|
||||
def deserialize(self, serial):
|
||||
if self.l is not None and len(serial) != self.l:
|
||||
raise DeserializationError('Invalid serialization (wrong size)',
|
||||
serial)
|
||||
if self.l is None and len(serial) > 0 and serial[0:1] == ascii_chr(0):
|
||||
raise DeserializationError('Invalid serialization (not minimal '
|
||||
'length)', serial)
|
||||
|
||||
serial = serial or b'\x00'
|
||||
return big_endian_to_int(serial)
|
||||
|
||||
big_endian_int = BigEndianInt()
|
||||
61
web3/rlp/sedes/binary.py
Executable file
61
web3/rlp/sedes/binary.py
Executable file
@ -0,0 +1,61 @@
|
||||
import sys
|
||||
from ..exceptions import SerializationError, DeserializationError
|
||||
from ..utils import Atomic, str_to_bytes, bytes_to_str
|
||||
|
||||
|
||||
class Binary(object):
|
||||
"""A sedes object for binary data of certain length.
|
||||
|
||||
:param min_length: the minimal length in bytes or `None` for no lower limit
|
||||
:param max_length: the maximal length in bytes or `None` for no upper limit
|
||||
:param allow_empty: if true, empty strings are considered valid even if
|
||||
a minimum length is required otherwise
|
||||
"""
|
||||
|
||||
def __init__(self, min_length=None, max_length=None, allow_empty=False):
|
||||
self.min_length = min_length or 0
|
||||
self.max_length = max_length or float('inf')
|
||||
self.allow_empty = allow_empty
|
||||
|
||||
@classmethod
|
||||
def fixed_length(cls, l, allow_empty=False):
|
||||
"""Create a sedes for binary data with exactly `l` bytes."""
|
||||
return cls(l, l, allow_empty=allow_empty)
|
||||
|
||||
@classmethod
|
||||
def is_valid_type(cls, obj):
|
||||
if sys.version_info.major == 2:
|
||||
return isinstance(obj, (str, unicode, bytearray))
|
||||
else:
|
||||
return isinstance(obj, (str, bytes))
|
||||
|
||||
def is_valid_length(self, l):
|
||||
return any((self.min_length <= l <= self.max_length,
|
||||
self.allow_empty and l == 0))
|
||||
|
||||
def serialize(self, obj):
|
||||
if not Binary.is_valid_type(obj):
|
||||
raise SerializationError('Object is not a serializable ({})'.format(type(obj)), obj)
|
||||
|
||||
if isinstance(obj, (bytes, bytearray)):
|
||||
serial = obj
|
||||
else:
|
||||
serial = str_to_bytes(obj)
|
||||
|
||||
if not self.is_valid_length(len(serial)):
|
||||
raise SerializationError('Object has invalid length', serial)
|
||||
|
||||
return serial
|
||||
|
||||
def deserialize(self, serial):
|
||||
if not isinstance(serial, Atomic):
|
||||
m = 'Objects of type {} cannot be deserialized'
|
||||
raise DeserializationError(m.format(type(serial).__name__), serial)
|
||||
|
||||
if self.is_valid_length(len(serial)):
|
||||
return serial
|
||||
else:
|
||||
raise DeserializationError('{} has invalid length'.format(type(serial)), serial)
|
||||
|
||||
|
||||
binary = Binary()
|
||||
317
web3/rlp/sedes/lists.py
Executable file
317
web3/rlp/sedes/lists.py
Executable file
@ -0,0 +1,317 @@
|
||||
"""Module for sedes objects that use lists as serialization format."""
|
||||
import sys
|
||||
from collections import Sequence
|
||||
from itertools import count
|
||||
from ..exceptions import (SerializationError, ListSerializationError, ObjectSerializationError,
|
||||
DeserializationError, ListDeserializationError,
|
||||
ObjectDeserializationError)
|
||||
from ..sedes.binary import Binary as BinaryClass
|
||||
|
||||
if sys.version_info.major == 2:
|
||||
from itertools import izip as zip
|
||||
|
||||
|
||||
def is_sedes(obj):
|
||||
"""Check if `obj` is a sedes object.
|
||||
|
||||
A sedes object is characterized by having the methods `serialize(obj)` and
|
||||
`deserialize(serial)`.
|
||||
"""
|
||||
# return all(hasattr(obj, m) for m in ('serialize', 'deserialize'))
|
||||
return hasattr(obj, 'serialize') and hasattr(obj, 'deserialize')
|
||||
|
||||
|
||||
def is_sequence(obj):
|
||||
"""Check if `obj` is a sequence, but not a string or bytes."""
|
||||
return isinstance(obj, Sequence) and not BinaryClass.is_valid_type(obj)
|
||||
|
||||
|
||||
class List(list):
|
||||
|
||||
"""A sedes for lists, implemented as a list of other sedes objects.
|
||||
|
||||
:param strict: If true (de)serializing lists that have a length not
|
||||
matching the sedes length will result in an error. If false
|
||||
(de)serialization will stop as soon as either one of the
|
||||
lists runs out of elements.
|
||||
"""
|
||||
|
||||
def __init__(self, elements=[], strict=True):
|
||||
super(List, self).__init__()
|
||||
self.strict = strict
|
||||
for e in elements:
|
||||
if is_sedes(e):
|
||||
self.append(e)
|
||||
elif isinstance(e, Sequence):
|
||||
self.append(List(e))
|
||||
else:
|
||||
raise TypeError('Instances of List must only contain sedes '
|
||||
'objects or nested sequences thereof.')
|
||||
|
||||
def serialize(self, obj):
|
||||
if not is_sequence(obj):
|
||||
raise ListSerializationError('Can only serialize sequences', obj)
|
||||
if self.strict and len(self) != len(obj) or len(self) < len(obj):
|
||||
raise ListSerializationError('List has wrong length', obj)
|
||||
result = []
|
||||
for index, (element, sedes) in enumerate(zip(obj, self)):
|
||||
try:
|
||||
result.append(sedes.serialize(element))
|
||||
except SerializationError as e:
|
||||
raise ListSerializationError(obj=obj, element_exception=e, index=index)
|
||||
return result
|
||||
|
||||
def deserialize(self, serial):
|
||||
if not is_sequence(serial):
|
||||
raise ListDeserializationError('Can only deserialize sequences', serial)
|
||||
result = []
|
||||
element_iterator = iter(serial)
|
||||
sedes_iterator = iter(self)
|
||||
elements_consumed = False
|
||||
sedes_consumed = False
|
||||
for index in count():
|
||||
try:
|
||||
element = next(element_iterator)
|
||||
except StopIteration:
|
||||
elements_consumed = True
|
||||
try:
|
||||
sedes = next(sedes_iterator)
|
||||
except StopIteration:
|
||||
sedes_consumed = True
|
||||
if not (sedes_consumed or elements_consumed):
|
||||
try:
|
||||
result.append(sedes.deserialize(element))
|
||||
except DeserializationError as e:
|
||||
raise ListDeserializationError(serial=serial, element_exception=e, index=index)
|
||||
else:
|
||||
if self.strict and not (sedes_consumed and elements_consumed):
|
||||
raise ListDeserializationError('List has wrong length', serial)
|
||||
break
|
||||
return tuple(result)
|
||||
|
||||
|
||||
class CountableList(object):
|
||||
|
||||
"""A sedes for lists of arbitrary length.
|
||||
|
||||
:param element_sedes: when (de-)serializing a list, this sedes will be
|
||||
applied to all of its elements
|
||||
:param max_length: maximum number of allowed elements, or `None` for no limit
|
||||
"""
|
||||
|
||||
def __init__(self, element_sedes, max_length=None):
|
||||
self.element_sedes = element_sedes
|
||||
self.max_length = max_length
|
||||
|
||||
def serialize(self, obj):
|
||||
if not is_sequence(obj):
|
||||
raise ListSerializationError('Can only serialize sequences', obj)
|
||||
result = []
|
||||
for index, element in enumerate(obj):
|
||||
try:
|
||||
result.append(self.element_sedes.serialize(element))
|
||||
except SerializationError as e:
|
||||
raise ListSerializationError(obj=obj, element_exception=e, index=index)
|
||||
if self.max_length is not None and len(result) > self.max_length:
|
||||
raise ListSerializationError('Too many elements ({}, allowed '
|
||||
'{})'.format(len(result), self.max_length), obj)
|
||||
return result
|
||||
|
||||
def deserialize(self, serial):
|
||||
if not is_sequence(serial):
|
||||
raise ListDeserializationError('Can only deserialize sequences', serial)
|
||||
result = []
|
||||
for index, element in enumerate(serial):
|
||||
try:
|
||||
result.append(self.element_sedes.deserialize(element))
|
||||
except DeserializationError as e:
|
||||
raise ListDeserializationError(serial=serial, element_exception=e, index=index)
|
||||
if self.max_length is not None and index >= self.max_length:
|
||||
raise ListDeserializationError('Too many elements (more than '
|
||||
'{})'.format(self.max_length), serial)
|
||||
return tuple(result)
|
||||
|
||||
|
||||
class Serializable(object):
|
||||
|
||||
"""Base class for objects which can be serialized into RLP lists.
|
||||
|
||||
:attr:`fields` defines which attributes are serialized and how this is
|
||||
done. It is expected to be an ordered sequence of 2-tuples
|
||||
``(name, sedes)``. Here, ``name`` is the name of an attribute and ``sedes``
|
||||
is the sedes object that will be used to serialize the corresponding
|
||||
attribute. The object as a whole is then serialized as a list of those
|
||||
fields.
|
||||
|
||||
:cvar fields: a list of 2-tuples ``(name, sedes)`` where ``name`` is a
|
||||
string corresponding to an attribute and ``sedes`` is the
|
||||
sedes object used for (de)serializing the attribute.
|
||||
:param \*args: initial values for the first attributes defined via
|
||||
:attr:`fields`
|
||||
:param \*\*kwargs: initial values for all attributes not initialized via
|
||||
positional arguments
|
||||
:ivar _cached_rlp: can be used to store the object's RLP code (by default
|
||||
`None`)
|
||||
:ivar _mutable: if `False`, all attempts to set field values will fail (by
|
||||
default `True`, unless created with :meth:`deserialize`)
|
||||
"""
|
||||
|
||||
fields = tuple()
|
||||
_sedes = None
|
||||
_mutable = True
|
||||
_cached_rlp = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
# check keyword arguments are known
|
||||
field_set = set(field for field, _ in self.fields)
|
||||
|
||||
# set positional arguments
|
||||
for (field, _), arg in zip(self.fields, args):
|
||||
setattr(self, field, arg)
|
||||
field_set.remove(field)
|
||||
|
||||
# set keyword arguments, if not already set
|
||||
for (field, value) in kwargs.items():
|
||||
if field in field_set:
|
||||
setattr(self, field, value)
|
||||
field_set.remove(field)
|
||||
|
||||
if len(field_set) != 0:
|
||||
raise TypeError('Not all fields initialized')
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
try:
|
||||
mutable = self.is_mutable()
|
||||
except AttributeError:
|
||||
mutable = True
|
||||
self.__dict__['_mutable'] = True # don't call __setattr__ again
|
||||
if mutable or attr not in set(field for field, _ in self.fields):
|
||||
super(Serializable, self).__setattr__(attr, value)
|
||||
else:
|
||||
raise ValueError('Tried to mutate immutable object')
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Two objects are equal, if they are equal after serialization."""
|
||||
if not hasattr(other.__class__, 'serialize'):
|
||||
return False
|
||||
return self.serialize(self) == other.serialize(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def is_mutable(self):
|
||||
"""Checks if the object is mutable"""
|
||||
return self._mutable
|
||||
|
||||
def make_immutable(self):
|
||||
"""Make it immutable to prevent accidental changes.
|
||||
|
||||
`obj.make_immutable` is equivalent to `make_immutable(obj)`, but doesn't return
|
||||
anything.
|
||||
"""
|
||||
make_immutable(self)
|
||||
|
||||
def make_mutable(self):
|
||||
"""Make it mutable.
|
||||
|
||||
`obj.make_mutable` is equivalent to `make_mutable(obj)`, but doesn't return
|
||||
anything.
|
||||
"""
|
||||
make_mutable(self)
|
||||
|
||||
@classmethod
|
||||
def get_sedes(cls):
|
||||
if not cls._sedes:
|
||||
cls._sedes = List(sedes for _, sedes in cls.fields)
|
||||
return cls._sedes
|
||||
|
||||
@classmethod
|
||||
def serialize(cls, obj):
|
||||
try:
|
||||
field_values = [getattr(obj, field) for field, _ in cls.fields]
|
||||
except AttributeError:
|
||||
raise ObjectSerializationError('Cannot serialize this object (missing attribute)', obj)
|
||||
try:
|
||||
result = cls.get_sedes().serialize(field_values)
|
||||
except ListSerializationError as e:
|
||||
raise ObjectSerializationError(obj=obj, sedes=cls, list_exception=e)
|
||||
else:
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def deserialize(cls, serial, exclude=None, mutable=False, **kwargs):
|
||||
try:
|
||||
values = cls.get_sedes().deserialize(serial)
|
||||
except ListDeserializationError as e:
|
||||
raise ObjectDeserializationError(serial=serial, sedes=cls, list_exception=e)
|
||||
|
||||
params = {
|
||||
field: value
|
||||
for (field, _), value
|
||||
in zip(cls.fields, values)
|
||||
if not exclude or field not in exclude
|
||||
}
|
||||
obj = cls(**dict(list(params.items()) + list(kwargs.items())))
|
||||
if mutable:
|
||||
return make_mutable(obj)
|
||||
else:
|
||||
return make_immutable(obj)
|
||||
|
||||
@classmethod
|
||||
def exclude(cls, excluded_fields):
|
||||
"""Create a new sedes considering only a reduced set of fields."""
|
||||
class SerializableExcluded(cls):
|
||||
fields = [(field, sedes) for field, sedes in cls.fields
|
||||
if field not in excluded_fields]
|
||||
_sedes = None
|
||||
return SerializableExcluded
|
||||
|
||||
|
||||
def make_immutable(x):
|
||||
"""Do your best to make `x` as immutable as possible.
|
||||
|
||||
If `x` is a sequence, apply this function recursively to all elements and return a tuple
|
||||
containing them. If `x` is an instance of :class:`rlp.Serializable`, apply this function to its
|
||||
fields, and set :attr:`_mutable` to `False`. If `x` is neither of the above, just return `x`.
|
||||
|
||||
:returns: `x` after making it immutable
|
||||
"""
|
||||
if isinstance(x, Serializable):
|
||||
x._mutable = True
|
||||
for field, _ in x.fields:
|
||||
attr = getattr(x, field)
|
||||
try:
|
||||
setattr(x, field, make_immutable(attr))
|
||||
except AttributeError:
|
||||
pass # respect read only properties
|
||||
x._mutable = False
|
||||
return x
|
||||
elif is_sequence(x):
|
||||
return tuple(make_immutable(element) for element in x)
|
||||
else:
|
||||
return x
|
||||
|
||||
|
||||
def make_mutable(x):
|
||||
"""Do your best to make `x` as mutable as possible.
|
||||
|
||||
If `x` is a sequence, apply this function recursively to all elements and return a tuple
|
||||
containing them. If `x` is an instance of :class:`rlp.Serializable`, apply this function to its
|
||||
fields, and set :attr:`_mutable` to `False`. If `x` is neither of the above, just return `x`.
|
||||
|
||||
:returns: `x` after making it mutable
|
||||
"""
|
||||
if isinstance(x, Serializable):
|
||||
x._mutable = True
|
||||
for field, _ in x.fields:
|
||||
attr = getattr(x, field)
|
||||
try:
|
||||
setattr(x, field, make_mutable(attr))
|
||||
except AttributeError:
|
||||
pass # respect read only properties
|
||||
return x
|
||||
elif is_sequence(x):
|
||||
return list(make_mutable(element) for element in x)
|
||||
else:
|
||||
return x
|
||||
30
web3/rlp/sedes/raw.py
Executable file
30
web3/rlp/sedes/raw.py
Executable file
@ -0,0 +1,30 @@
|
||||
"""
|
||||
A sedes that does nothing. Thus, everything that can be directly encoded by RLP
|
||||
is serializable. This sedes can be used as a placeholder when deserializing
|
||||
larger structures.
|
||||
"""
|
||||
|
||||
|
||||
from collections import Sequence
|
||||
from ..exceptions import SerializationError
|
||||
from ..utils import Atomic
|
||||
|
||||
|
||||
def serializable(obj):
|
||||
if isinstance(obj, Atomic):
|
||||
return True
|
||||
elif isinstance(obj, Sequence):
|
||||
return all(map(serializable, obj))
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def serialize(obj):
|
||||
if not serializable(obj):
|
||||
raise SerializationError('Can only serialize nested lists of strings',
|
||||
obj)
|
||||
return obj
|
||||
|
||||
|
||||
def deserialize(serial):
|
||||
return serial
|
||||
5
web3/rlp/utils.py
Executable file
5
web3/rlp/utils.py
Executable file
@ -0,0 +1,5 @@
|
||||
import sys
|
||||
if sys.version_info.major == 2:
|
||||
from .utils_py2 import *
|
||||
else:
|
||||
from .utils_py3 import *
|
||||
84
web3/rlp/utils_py2.py
Executable file
84
web3/rlp/utils_py2.py
Executable file
@ -0,0 +1,84 @@
|
||||
import abc
|
||||
import struct
|
||||
import codecs
|
||||
import binascii
|
||||
|
||||
|
||||
class Atomic(object):
|
||||
|
||||
"""ABC for objects that can be RLP encoded as is."""
|
||||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
|
||||
Atomic.register(str)
|
||||
Atomic.register(bytearray)
|
||||
Atomic.register(unicode)
|
||||
|
||||
bytes_to_str = str
|
||||
ascii_chr = chr
|
||||
|
||||
|
||||
def str_to_bytes(value):
|
||||
if isinstance(value, (bytes, bytearray)):
|
||||
return bytes(value)
|
||||
elif isinstance(value, unicode):
|
||||
return codecs.encode(value, 'utf8')
|
||||
else:
|
||||
raise TypeError("Value must be text, bytes, or bytearray")
|
||||
|
||||
|
||||
def _old_int_to_big_endian(value):
|
||||
cs = []
|
||||
while value > 0:
|
||||
cs.append(chr(value % 256))
|
||||
value /= 256
|
||||
s = ''.join(reversed(cs))
|
||||
return s
|
||||
|
||||
|
||||
def packl(lnum):
|
||||
if lnum == 0:
|
||||
return b'\0'
|
||||
s = hex(lnum)[2:]
|
||||
s = s.rstrip('L')
|
||||
if len(s) & 1:
|
||||
s = '0' + s
|
||||
s = binascii.unhexlify(s)
|
||||
return s
|
||||
|
||||
int_to_big_endian = packl
|
||||
|
||||
|
||||
def big_endian_to_int(value):
|
||||
if len(value) == 1:
|
||||
return ord(value)
|
||||
elif len(value) <= 8:
|
||||
return struct.unpack('>Q', value.rjust(8, '\x00'))[0]
|
||||
else:
|
||||
return int(encode_hex(value), 16)
|
||||
|
||||
|
||||
def is_integer(value):
|
||||
return isinstance(value, (int, long))
|
||||
|
||||
|
||||
def decode_hex(s):
|
||||
if isinstance(s, bytearray):
|
||||
s = str(s)
|
||||
if not isinstance(s, (str, unicode)):
|
||||
raise TypeError('Value must be an instance of str or unicode')
|
||||
return s.decode('hex')
|
||||
|
||||
|
||||
def encode_hex(s):
|
||||
if isinstance(s, bytearray):
|
||||
s = str(s)
|
||||
if not isinstance(s, (str, unicode)):
|
||||
raise TypeError('Value must be an instance of str or unicode')
|
||||
return s.encode('hex')
|
||||
|
||||
|
||||
def safe_ord(s):
|
||||
if isinstance(s, int):
|
||||
return s
|
||||
return ord(s)
|
||||
67
web3/rlp/utils_py3.py
Executable file
67
web3/rlp/utils_py3.py
Executable file
@ -0,0 +1,67 @@
|
||||
import abc
|
||||
import binascii
|
||||
from math import ceil
|
||||
|
||||
|
||||
class Atomic(type.__new__(abc.ABCMeta, 'metaclass', (), {})):
|
||||
"""ABC for objects that can be RLP encoded as is."""
|
||||
pass
|
||||
|
||||
|
||||
Atomic.register(str)
|
||||
Atomic.register(bytes)
|
||||
|
||||
|
||||
def str_to_bytes(value):
|
||||
if isinstance(value, bytearray):
|
||||
value = bytes(value)
|
||||
if isinstance(value, bytes):
|
||||
return value
|
||||
return bytes(value, 'utf-8')
|
||||
|
||||
|
||||
def bytes_to_str(value):
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
return value.decode('utf-8')
|
||||
|
||||
|
||||
def ascii_chr(value):
|
||||
return bytes([value])
|
||||
|
||||
|
||||
def int_to_big_endian(value):
|
||||
byte_length = max(ceil(value.bit_length() / 8), 1)
|
||||
return (value).to_bytes(byte_length, byteorder='big')
|
||||
|
||||
|
||||
def big_endian_to_int(value):
|
||||
return int.from_bytes(value, byteorder='big')
|
||||
|
||||
|
||||
def is_integer(value):
|
||||
return isinstance(value, int)
|
||||
|
||||
|
||||
def decode_hex(s):
|
||||
if isinstance(s, str):
|
||||
return bytes.fromhex(s)
|
||||
if isinstance(s, (bytes, bytearray)):
|
||||
return binascii.unhexlify(s)
|
||||
raise TypeError('Value must be an instance of str or bytes')
|
||||
|
||||
|
||||
def encode_hex(b):
|
||||
if isinstance(b, str):
|
||||
b = bytes(b, 'utf-8')
|
||||
if isinstance(b, (bytes, bytearray)):
|
||||
return str(binascii.hexlify(b), 'utf-8')
|
||||
raise TypeError('Value must be an instance of str or bytes')
|
||||
|
||||
|
||||
def safe_ord(c):
|
||||
try:
|
||||
return ord(c)
|
||||
except TypeError:
|
||||
assert isinstance(c, int)
|
||||
return c
|
||||
@ -1,11 +1,11 @@
|
||||
from web3 import formatters
|
||||
from web3.utils.encoding import (
|
||||
from ..web3 import formatters
|
||||
from .utils.encoding import (
|
||||
to_decimal,
|
||||
)
|
||||
from web3.utils.functional import (
|
||||
from .utils.functional import (
|
||||
apply_formatters_to_return,
|
||||
)
|
||||
from web3.utils.filters import (
|
||||
from .utils.filters import (
|
||||
ShhFilter,
|
||||
)
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
from web3.utils.encoding import (
|
||||
from .utils.encoding import (
|
||||
to_decimal,
|
||||
)
|
||||
from web3.utils.functional import (
|
||||
from .utils.functional import (
|
||||
apply_formatters_to_return,
|
||||
)
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
from web3.utils.functional import (
|
||||
from .utils.functional import (
|
||||
apply_formatters_to_return,
|
||||
)
|
||||
from web3.formatters import (
|
||||
from .formatters import (
|
||||
transaction_pool_content_formatter,
|
||||
transaction_pool_inspect_formatter,
|
||||
)
|
||||
|
||||
@ -14,7 +14,7 @@ from ..eth_utils import (
|
||||
is_address,
|
||||
)
|
||||
|
||||
from eth_abi.abi import (
|
||||
from ..eth_abi.abi import (
|
||||
process_type,
|
||||
)
|
||||
|
||||
|
||||
@ -10,11 +10,11 @@ from gevent import ( # noqa: F401
|
||||
threading,
|
||||
)
|
||||
|
||||
import pylru
|
||||
from ...pylru import pylru
|
||||
|
||||
from geventhttpclient import HTTPClient
|
||||
|
||||
from web3.utils.six import urlparse
|
||||
from ...utils.six import urlparse
|
||||
|
||||
|
||||
_client_cache = pylru.lrucache(8)
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
import requests
|
||||
#import requests
|
||||
from google.appengine.api import urlfetch, urlfetch_stub
|
||||
|
||||
from ...pylru import pylru
|
||||
|
||||
from web3.utils.caching import generate_cache_key
|
||||
from ...utils.caching import generate_cache_key
|
||||
|
||||
|
||||
_session_cache = pylru.lrucache(8)
|
||||
@ -11,14 +12,20 @@ _session_cache = pylru.lrucache(8)
|
||||
def _get_session(*args, **kwargs):
|
||||
cache_key = generate_cache_key((args, kwargs))
|
||||
if cache_key not in _session_cache:
|
||||
_session_cache[cache_key] = requests.Session()
|
||||
_session_cache[cache_key] = urlfetch.Session()
|
||||
return _session_cache[cache_key]
|
||||
|
||||
|
||||
def make_post_request(endpoint_uri, data, *args, **kwargs):
|
||||
kwargs.setdefault('timeout', 10)
|
||||
session = _get_session(endpoint_uri)
|
||||
response = session.post(endpoint_uri, data=data, *args, **kwargs)
|
||||
response.raise_for_status()
|
||||
# session = _get_session(endpoint_uri)
|
||||
# response = urlfetch.post(endpoint_uri, data=data, *args, **kwargs)
|
||||
response = urlfetch.fetch(endpoint_uri, deadline = 30, payload=data, method=urlfetch.POST)
|
||||
|
||||
#TODO: make this better!
|
||||
|
||||
#response.raise_for_status()
|
||||
|
||||
return response.content
|
||||
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
# String encodings and numeric representations
|
||||
import json
|
||||
|
||||
from rlp.sedes import big_endian_int
|
||||
from ..rlp.sedes import big_endian_int
|
||||
|
||||
from ..eth_utils import (
|
||||
is_string,
|
||||
@ -18,6 +18,8 @@ from .formatting import (
|
||||
is_prefixed,
|
||||
)
|
||||
|
||||
import struct
|
||||
|
||||
|
||||
@coerce_args_to_text
|
||||
def to_hex(value):
|
||||
|
||||
@ -8,12 +8,12 @@ from ..eth_utils import (
|
||||
event_abi_to_log_topic,
|
||||
)
|
||||
|
||||
from eth_abi import (
|
||||
from ..eth_abi.abi import (
|
||||
decode_abi,
|
||||
decode_single,
|
||||
encode_single,
|
||||
)
|
||||
from eth_abi.abi import (
|
||||
from ..eth_abi.abi import (
|
||||
process_type,
|
||||
)
|
||||
|
||||
|
||||
@ -1,2 +1,2 @@
|
||||
def raise_from(my_exception, other_exception):
|
||||
raise my_exception from other_exception
|
||||
# def raise_from(my_exception, other_exception):
|
||||
# raise my_exception from other_exception
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
def construct_user_agent(class_name):
|
||||
from web3 import __version__ as web3_version
|
||||
from ...web3 import __version__ as web3_version
|
||||
|
||||
user_agent = 'Web3.py/{version}/{class_name}'.format(
|
||||
version=web3_version,
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
import random
|
||||
|
||||
import rlp
|
||||
from rlp.sedes import big_endian_int, binary, Binary
|
||||
from rlp.utils import int_to_big_endian
|
||||
from ..rlp import encode, Serializable, decode
|
||||
from ..rlp.sedes import big_endian_int, binary, Binary
|
||||
from ..rlp.utils import int_to_big_endian
|
||||
|
||||
from ..eth_utils import (
|
||||
decode_hex,
|
||||
@ -84,7 +84,7 @@ TT256 = 2 ** 256
|
||||
address_sedes = Binary.fixed_length(20, allow_empty=True)
|
||||
|
||||
|
||||
class Transaction(rlp.Serializable):
|
||||
class Transaction(Serializable):
|
||||
"""
|
||||
# Derived from `pyethereum.transaction.Transaction`
|
||||
|
||||
@ -153,7 +153,7 @@ class Transaction(rlp.Serializable):
|
||||
)
|
||||
if has_invalid_signature_values:
|
||||
raise ValueError("Invalid signature values!")
|
||||
rlpdata = rlp.encode(self, UnsignedTransaction)
|
||||
rlpdata = encode(self, UnsignedTransaction)
|
||||
rawhash = keccak(rlpdata)
|
||||
|
||||
pk = PublicKey(flags=ALL_FLAGS)
|
||||
@ -208,7 +208,7 @@ class Transaction(rlp.Serializable):
|
||||
if key in (0, b'', b'\x00' * 32, b'0' * 64):
|
||||
raise ValueError("Zero privkey cannot sign")
|
||||
|
||||
rawhash = keccak(rlp.encode(self, UnsignedTransaction))
|
||||
rawhash = keccak(encode(self, UnsignedTransaction))
|
||||
|
||||
if len(key) in {64, 66}:
|
||||
# we need a binary key
|
||||
@ -239,11 +239,11 @@ def serialize_transaction(transaction):
|
||||
value=to_decimal(transaction['value']),
|
||||
data=decode_hex(transaction['data']),
|
||||
)
|
||||
return rlp.encode(unsigned_transaction, UnsignedTransaction)
|
||||
return encode(unsigned_transaction, UnsignedTransaction)
|
||||
|
||||
|
||||
def add_signature_to_transaction(serialize_transaction, signature):
|
||||
unsigned_transaction = rlp.decode(serialize_transaction, UnsignedTransaction)
|
||||
unsigned_transaction = decode(serialize_transaction, UnsignedTransaction)
|
||||
|
||||
v = (ord(signature[64]) if is_string(signature[64]) else signature[64]) + 27
|
||||
r = decode_big_endian_int(signature[0:32])
|
||||
|
||||
@ -1,9 +1,9 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
from web3.utils.encoding import (
|
||||
from .utils.encoding import (
|
||||
to_decimal,
|
||||
)
|
||||
from web3.utils.functional import (
|
||||
from .utils.functional import (
|
||||
apply_formatters_to_return,
|
||||
)
|
||||
|
||||
@ -14,7 +14,7 @@ class Version(object):
|
||||
|
||||
@property
|
||||
def api(self):
|
||||
from web3 import __version__
|
||||
from ..web3 import __version__
|
||||
return __version__
|
||||
|
||||
@property
|
||||
|
||||
Loading…
Reference in New Issue
Block a user