'''This module implements specialized container datatypes providing
alternatives to Python's general purpose built-in containers, dict,
* namedtuple factory function for creating tuple subclasses with named fields
* deque list-like container with fast appends and pops on either end
* ChainMap dict-like class for creating a single view of multiple mappings
* Counter dict subclass for counting hashable objects
* OrderedDict dict subclass that remembers the order entries were added
* defaultdict dict subclass that calls a factory function to supply missing values
* UserDict wrapper around dictionary objects for easier dict subclassing
* UserList wrapper around list objects for easier list subclassing
* UserString wrapper around string objects for easier string subclassing
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict', 'ChainMap']
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
from _weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
from _collections import deque
_collections_abc.MutableSequence.register(deque)
from _collections import defaultdict
# For backwards compatibility, continue to make the collections ABCs
# through Python 3.6 available through the collections module.
# Note, no new collections ABCs were added in Python 3.7
if name in _collections_abc.__all__:
obj = getattr(_collections_abc, name)
warnings.warn("Using or importing the ABCs from 'collections' instead "
"of from 'collections.abc' is deprecated since Python 3.3, "
"and in 3.10 it will stop working",
DeprecationWarning, stacklevel=2)
raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
################################################################################
################################################################################
class _OrderedDictKeysView(_collections_abc.KeysView):
yield from reversed(self._mapping)
class _OrderedDictItemsView(_collections_abc.ItemsView):
for key in reversed(self._mapping):
yield (key, self._mapping[key])
class _OrderedDictValuesView(_collections_abc.ValuesView):
for key in reversed(self._mapping):
__slots__ = 'prev', 'next', 'key', '__weakref__'
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, other=(), /, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries. Keyword argument order is preserved.
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__update(other, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
self.__map[key] = link = Link()
link.prev, link.next, link.key = last, root, key
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
link = self.__map.pop(key)
link_prev.next = link_next
link_next.prev = link_prev
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
'od.clear() -> None. Remove all items from od.'
root.prev = root.next = root
def popitem(self, last=True):
'''Remove and return a (key, value) pair from the dictionary.
Pairs are returned in LIFO order if last is true or FIFO order if false.
raise KeyError('dictionary is empty')
value = dict.pop(self, key)
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last is false).
Raise KeyError if the element does not exist.
soft_link = link_next.prev
link_prev.next = link_next
link_next.prev = link_prev
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
update = __update = _collections_abc.MutableMapping.update
"D.keys() -> a set-like object providing a view on D's keys"
return _OrderedDictKeysView(self)
"D.items() -> a set-like object providing a view on D's items"
return _OrderedDictItemsView(self)
"D.values() -> an object providing a view on D's values"
return _OrderedDictValuesView(self)
__ne__ = _collections_abc.MutableMapping.__ne__
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
if default is self.__marker:
def setdefault(self, key, default=None):
'''Insert key with a value of default if key is not in the dictionary.
Return the value for key if key is in the dictionary, else default.
'od.__repr__() <==> repr(od)'
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
return self.__class__, (), inst_dict or None, None, iter(self.items())
'od.copy() -> a shallow copy of od'
return self.__class__(self)
def fromkeys(cls, iterable, value=None):
'''Create a new ordered dictionary with keys from iterable and values set to value.
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
from _collections import OrderedDict
# Leave the pure Python version in place.
################################################################################
################################################################################
from _collections import _tuplegetter
_tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc)
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
>>> x, y = p # unpack like a regular tuple
>>> p.x + p.y # fields also accessible by name
>>> d = p._asdict() # convert to a dictionary
>>> Point(**d) # convert from a dictionary
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
for index, name in enumerate(field_names):
if (not name.isidentifier()
field_names[index] = f'_{index}'
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
raise ValueError('Type names and field names cannot be a '
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
raise ValueError(f'Encountered duplicate field name: {name!r}')
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = repr(field_names).replace("'", "")[1:-1]
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
s = f'def __new__(_cls, {arg_list}): return _tuple_new(_cls, ({arg_list}))'
namespace = {'_tuple_new': tuple_new, '__name__': f'namedtuple_{typename}'}
# Note: exec() has the side-effect of interning the field names
__new__ = namespace['__new__']
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
__new__.__defaults__ = defaults
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
# Modify function metadata to help with introspection and debugging
for method in (__new__, _make.__func__, _replace,
__repr__, _asdict, __getnewargs__):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
'__doc__': f'{typename}({arg_list})',
'_field_defaults': field_defaults,
# alternate spelling for backward compatibility
'_fields_defaults': field_defaults,
'__getnewargs__': __getnewargs__,
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
result.__module__ = module
########################################################################
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
mapping[elem] = mapping_get(elem, 0) + 1
try: # Load C helper function if available
from _collections import _count_elements
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions