redis采用的是RedisCluster与StrictRedis,这两个库的区别在于一个用于redis集群,一个非集群
所以,当在配置文件内定义is_colony=True 的时候就会采用RedisCluster(集群),否则为StrictRedis(非集群)
RedisCluster入参及使用说明见redis.RedisCluster,以下为部分
class Redis(object):"""Implementation of the Redis protocol.This abstract class provides a Python interface to all Redis commandsand an implementation of the Redis protocol.Connection and Pipeline derive from this, implementing howthe commands are sent and received to the Redis server"""RESPONSE_CALLBACKS = dict_merge(string_keys_to_dict('AUTH EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST ''PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX',bool),string_keys_to_dict('BITCOUNT BITPOS DECRBY DEL EXISTS GEOADD GETBIT HDEL HLEN ''HSTRLEN INCRBY LINSERT LLEN LPUSHX PFADD PFCOUNT RPUSHX SADD ''SCARD SDIFFSTORE SETBIT SETRANGE SINTERSTORE SREM STRLEN ''SUNIONSTORE UNLINK XACK XDEL XLEN XTRIM ZCARD ZLEXCOUNT ZREM ''ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE',int),string_keys_to_dict('INCRBYFLOAT HINCRBYFLOAT',float),string_keys_to_dict(# these return OK, or int if redis-server is >=1.3.4'LPUSH RPUSH',lambda r: isinstance(r, (long, int)) and r or nativestr(r) == 'OK'),string_keys_to_dict('SORT', sort_return_tuples),string_keys_to_dict('ZSCORE ZINCRBY GEODIST', float_or_none),string_keys_to_dict('FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE READONLY READWRITE ''RENAME SAVE SELECT SHUTDOWN SLAVEOF SWAPDB WATCH UNWATCH ',bool_ok),string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None),string_keys_to_dict('SDIFF SINTER SMEMBERS SUNION',lambda r: r and set(r) or set()),string_keys_to_dict('ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE',zset_score_pairs),string_keys_to_dict('BZPOPMIN BZPOPMAX', \lambda r: r and (r[0], r[1], float(r[2])) or None),string_keys_to_dict('ZRANK ZREVRANK', int_or_none),string_keys_to_dict('XREVRANGE XRANGE', parse_stream_list),string_keys_to_dict('XREAD XREADGROUP', parse_xread),string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True),{'ACL CAT': lambda r: list(map(nativestr, r)),'ACL DELUSER': int,'ACL GENPASS': nativestr,'ACL GETUSER': parse_acl_getuser,'ACL LIST': lambda r: list(map(nativestr, r)),'ACL LOAD': bool_ok,'ACL SAVE': bool_ok,'ACL SETUSER': bool_ok,'ACL USERS': lambda r: list(map(nativestr, r)),'ACL WHOAMI': nativestr,'CLIENT GETNAME': lambda r: r and nativestr(r),'CLIENT ID': int,'CLIENT KILL': parse_client_kill,'CLIENT LIST': parse_client_list,'CLIENT SETNAME': bool_ok,'CLIENT UNBLOCK': lambda r: r and int(r) == 1 or False,'CLIENT PAUSE': bool_ok,'CLUSTER ADDSLOTS': bool_ok,'CLUSTER COUNT-FAILURE-REPORTS': lambda x: int(x),'CLUSTER COUNTKEYSINSLOT': lambda x: int(x),'CLUSTER DELSLOTS': bool_ok,'CLUSTER FAILOVER': bool_ok,'CLUSTER FORGET': bool_ok,'CLUSTER INFO': parse_cluster_info,'CLUSTER KEYSLOT': lambda x: int(x),'CLUSTER MEET': bool_ok,'CLUSTER NODES': parse_cluster_nodes,'CLUSTER REPLICATE': bool_ok,'CLUSTER RESET': bool_ok,'CLUSTER SAVECONFIG': bool_ok,'CLUSTER SET-CONFIG-EPOCH': bool_ok,'CLUSTER SETSLOT': bool_ok,'CLUSTER SLAVES': parse_cluster_nodes,'CONFIG GET': parse_config_get,'CONFIG RESETSTAT': bool_ok,'CONFIG SET': bool_ok,'DEBUG OBJECT': parse_debug_object,'GEOHASH': lambda r: list(map(nativestr_or_none, r)),'GEOPOS': lambda r: list(map(lambda ll: (float(ll[0]),float(ll[1]))if ll is not None else None, r)),'GEORADIUS': parse_georadius_generic,'GEORADIUSBYMEMBER': parse_georadius_generic,'HGETALL': lambda r: r and pairs_to_dict(r) or {},'HSCAN': parse_hscan,'INFO': parse_info,'LASTSAVE': timestamp_to_datetime,'MEMORY PURGE': bool_ok,'MEMORY STATS': parse_memory_stats,'MEMORY USAGE': int_or_none,'OBJECT': parse_object,'PING': lambda r: nativestr(r) == 'PONG','PUBSUB NUMSUB': parse_pubsub_numsub,'RANDOMKEY': lambda r: r and r or None,'SCAN': parse_scan,'SCRIPT EXISTS': lambda r: list(imap(bool, r)),'SCRIPT FLUSH': bool_ok,'SCRIPT KILL': bool_ok,'SCRIPT LOAD': nativestr,'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master,'SENTINEL MASTER': parse_sentinel_master,'SENTINEL MASTERS': parse_sentinel_masters,'SENTINEL MONITOR': bool_ok,'SENTINEL REMOVE': bool_ok,'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels,'SENTINEL SET': bool_ok,'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels,'SET': lambda r: r and nativestr(r) == 'OK','SLOWLOG GET': parse_slowlog_get,'SLOWLOG LEN': int,'SLOWLOG RESET': bool_ok,'SSCAN': parse_scan,'TIME': lambda x: (int(x[0]), int(x[1])),'XCLAIM': parse_xclaim,'XGROUP CREATE': bool_ok,'XGROUP DELCONSUMER': int,'XGROUP DESTROY': bool,'XGROUP SETID': bool_ok,'XINFO CONSUMERS': parse_list_of_dicts,'XINFO GROUPS': parse_list_of_dicts,'XINFO STREAM': parse_xinfo_stream,'XPENDING': parse_xpending,'ZADD': parse_zadd,'ZSCAN': parse_zscan,})@classmethoddef from_url(cls, url, db=None, **kwargs):"""Return a Redis client object configured from the given URLFor example::redis://[[username]:[password]]@localhost:6379/0rediss://[[username]:[password]]@localhost:6379/0unix://[[username]:[password]]@/path/to/socket.sock?db=0Three URL schemes are supported:- ```redis://``<http://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates anormal TCP socket connection- ```rediss://``<http://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates aSSL wrapped TCP socket connection- ``unix://`` creates a Unix Domain Socket connectionThere are several ways to specify a database number. The parse functionwill return the first specified option:1. A ``db`` querystring option, e.g. redis://localhost?db=02. If using the redis:// scheme, the path argument of the url, e.g.redis://localhost/03. The ``db`` argument to this function.If none of these options are specified, db=0 is used.Any additional querystring arguments and keyword arguments will bepassed along to the ConnectionPool class's initializer. In the caseof conflicting arguments, querystring arguments always win."""connection_pool = ConnectionPool.from_url(url, db=db, **kwargs)return cls(connection_pool=connection_pool)def __init__(self, host='localhost', port=6379,db=0, password=None, socket_timeout=None,socket_connect_timeout=None,socket_keepalive=None, socket_keepalive_options=None,connection_pool=None, unix_socket_path=None,encoding='utf-8', encoding_errors='strict',charset=None, errors=None,decode_responses=False, retry_on_timeout=False,ssl=False, ssl_keyfile=None, ssl_certfile=None,ssl_cert_reqs='required', ssl_ca_certs=None,ssl_check_hostname=False,max_connections=None, single_connection_client=False,health_check_interval=0, client_name=None, username=None):def pipeline(self, transaction=True, shard_hint=None):"""Return a new pipeline object that can queue multiple commands forlater execution. ``transaction`` indicates whether all commandsshould be executed atomically. Apart from making a group of operationsatomic, pipelines are useful for reducing the back-and-forth overheadbetween the client and server."""def transaction(self, func, *watches, **kwargs):"""Convenience method for executing the callable `func` as a transactionwhile watching all keys specified in `watches`. The 'func' callableshould expect a single argument which is a Pipeline object."""def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None,lock_class=None, thread_local=True):"""Return a new Lock object using key ``name`` that mimicsthe behavior of threading.Lock.If specified, ``timeout`` indicates a maximum life for the lock.By default, it will remain locked until release() is called.``sleep`` indicates the amount of time to sleep per loop iterationwhen the lock is in blocking mode and another client is currentlyholding the lock.``blocking_timeout`` indicates the maximum amount of time in seconds tospend trying to acquire the lock. A value of ``None`` indicatescontinue trying forever. ``blocking_timeout`` can be specified as afloat or integer, both representing the number of seconds to wait.``lock_class`` forces the specified lock implementation.``thread_local`` indicates whether the lock token is placed inthread-local storage. By default, the token is placed in thread localstorage so that a thread only sees its token, not a token set byanother thread. Consider the following timeline:time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.thread-1 sets the token to "abc"time: 1, thread-2 blocks trying to acquire `my-lock` using theLock instance.time: 5, thread-1 has not yet completed. redis expires the lockkey.time: 5, thread-2 acquired `my-lock` now that it's available.thread-2 sets the token to "xyz"time: 6, thread-1 finishes its work and calls release(). if thetoken is *not* stored in thread local storage, thenthread-1 would see the token value as "xyz" and would beable to successfully release the thread-2's lock.In some use cases it's necessary to disable thread local storage. Forexample, if you have code where one thread acquires a lock and passesthat lock instance to a worker thread to release later. If threadlocal storage isn't disabled in this case, the worker thread won't seethe token set by the thread that acquired the lock. Our assumptionis that these cases aren't common and as such default to usingthread local storage. """def pubsub(self, **kwargs):"""Return a Publish/Subscribe object. With this object, you cansubscribe to channels and listen for messages that get published tothem."""def close(self):""""""def execute_command(self, *args, **options):"Execute a command and return a parsed response"def acl_list(self):"Return a list of all ACLs on the server"def acl_load(self):"""Load ACL rules from the configured ``aclfile``.Note that the server must be configured with the ``aclfile``directive to be able to load ACL rules from an aclfile."""def acl_save(self):"""Save ACL rules to the configured ``aclfile``.Note that the server must be configured with the ``aclfile``directive to be able to save ACL rules to an aclfile."""def acl_setuser(self, username, enabled=False, nopass=False,passwords=None, hashed_passwords=None, categories=None,commands=None, keys=None, reset=False, reset_keys=False,reset_passwords=False):"""Create or update an ACL user.Create or update the ACL for ``username``. If the user already exists,the existing ACL is completely overwritten and replaced with thespecified values.``enabled`` is a boolean indicating whether the user should be allowedto authenticate or not. Defaults to ``False``.``nopass`` is a boolean indicating whether the can authenticate withouta password. This cannot be True if ``passwords`` are also specified.``passwords`` if specified is a list of plain text passwordsto add to or remove from the user. Each password must be prefixed witha '+' to add or a '-' to remove. For convenience, the value of``add_passwords`` can be a simple prefixed string when adding orremoving a single password.``hashed_passwords`` if specified is a list of SHA-256 hashed passwordsto add to or remove from the user. Each hashed password must beprefixed with a '+' to add or a '-' to remove. For convenience,the value of ``hashed_passwords`` can be a simple prefixed string whenadding or removing a single password.``categories`` if specified is a list of strings representing categorypermissions. Each string must be prefixed with either a '+' to add thecategory permission or a '-' to remove the category permission.``commands`` if specified is a list of strings representing commandpermissions. Each string must be prefixed with either a '+' to add thecommand permission or a '-' to remove the command permission.``keys`` if specified is a list of key patterns to grant the useraccess to. Keys patterns allow '*' to support wildcard matching. Forexample, '*' grants access to all keys while 'cache:*' grants accessto all keys that are prefixed with 'cache:'. ``keys`` should not beprefixed with a '~'.``reset`` is a boolean indicating whether the user should be fullyreset prior to applying the new ACL. Setting this to True willremove all existing passwords, flags and privileges from the user andthen apply the specified rules. If this is False, the user's existingpasswords, flags and privileges will be kept and any new specifiedrules will be applied on top.``reset_keys`` is a boolean indicating whether the user's keypermissions should be reset prior to applying any new key permissionsspecified in ``keys``. If this is False, the user's existingkey permissions will be kept and any new specified key permissionswill be applied on top.``reset_passwords`` is a boolean indicating whether to remove allexisting passwords and the 'nopass' flag from the user prior toapplying any new passwords specified in 'passwords' or'hashed_passwords'. If this is False, the user's existing passwordsand 'nopass' status will be kept and any new specified passwordsor hashed_passwords will be applied on top."""def acl_users(self):"Returns a list of all registered users on the server."def acl_whoami(self):"Get the username for the current connection"def bgrewriteaof(self):"Tell the Redis server to rewrite the AOF file from data in memory."def bgsave(self):"""Tell the Redis server to save its data to disk. Unlike save(),this method is asynchronous and returns immediately."""def client_kill(self, address):"Disconnects the client at ``address`` (ip:port)"def client_kill_filter(self, _id=None, _type=None, addr=None, skipme=None):"""Disconnects client(s) using a variety of filter options:param id: Kills a client by its unique ID field:param type: Kills a client by type where type is one of 'normal','master', 'slave' or 'pubsub':param addr: Kills a client by its 'address:port':param skipme: If True, then the client calling the commandwill not get killed even if it is identified by one of the filteroptions. If skipme is not provided, the server defaults to skipme=True"""def client_list(self, _type=None):"""Returns a list of currently connected clients.If type of client specified, only that type will be returned.:param _type: optional. one of the client types (normal, master,replica, pubsub)"""def client_getname(self):"Returns the current connection name"def client_id(self):"Returns the current connection id"def client_setname(self, name):"Sets the current connection name"def client_unblock(self, client_id, error=False):"""Unblocks a connection by its client id.If ``error`` is True, unblocks the client with a special error message.If ``error`` is False (default), the client is unblocked using theregular timeout mechanism."""def client_pause(self, timeout):"""Suspend all the Redis clients for the specified amount of time:param timeout: milliseconds to pause clients"""def readwrite(self):"Disables read queries for a connection to a Redis Cluster slave node"def readonly(self):"Enables read queries for a connection to a Redis Cluster replica node"def config_get(self, pattern="*"):"Return a dictionary of configuration based on the ``pattern``"def config_set(self, name, value):"Set config item ``name`` with ``value``"def config_resetstat(self):"Reset runtime statistics"def config_rewrite(self):"Rewrite config file with the minimal change to reflect running config"def dbsize(self):"Returns the number of keys in the current database"def debug_object(self, key):"Returns version specific meta information about a given key"def echo(self, value):"Echo the string back from the server"def flushall(self, asynchronous=False):"""Delete all keys in all databases on the current host.``asynchronous`` indicates whether the operation isexecuted asynchronously by the server."""def flushdb(self, asynchronous=False):"""Delete all keys in the current database.``asynchronous`` indicates whether the operation isexecuted asynchronously by the server."""def swapdb(self, first, second):"Swap two databases"def info(self, section=None):"""Returns a dictionary containing information about the Redis serverThe ``section`` option can be used to select a specific sectionof informationThe section option is not supported by older versions of Redis Server,and will generate ResponseError"""def lastsave(self):"""Return a Python datetime object representing the last time theRedis database was saved to disk"""def migrate(self, host, port, keys, destination_db, timeout,copy=False, replace=False, auth=None):"""Migrate 1 or more keys from the current Redis server to a differentserver specified by the ``host``, ``port`` and ``destination_db``.The ``timeout``, specified in milliseconds, indicates the maximumtime the connection between the two servers can be idle before thecommand is interrupted.If ``copy`` is True, the specified ``keys`` are NOT deleted fromthe source server.If ``replace`` is True, this operation will overwrite the keyson the destination server if they exist.If ``auth`` is specified, authenticate to the destination server withthe password provided."""def object(self, infotype, key):"Return the encoding, idletime, or refcount about the key"def memory_stats(self):"Return a dictionary of memory stats"def memory_usage(self, key, samples=None):"""Return the total memory usage for key, its value and associatedadministrative overheads.For nested data structures, ``samples`` is the number of elements tosample. If left unspecified, the server's default is 5. Use 0 to sampleall elements."""def memory_purge(self):"Attempts to purge dirty pages for reclamation by allocator"def ping(self):"Ping the Redis server"def save(self):"""Tell the Redis server to save its data to disk,blocking until the save is complete"""def sentinel(self, *args):"Redis Sentinel's SENTINEL command."def sentinel_get_master_addr_by_name(self, service_name):"Returns a (host, port) pair for the given ``service_name``"def sentinel_master(self, service_name):"Returns a dictionary containing the specified masters state."def sentinel_masters(self):"Returns a list of dictionaries containing each master's state."def sentinel_monitor(self, name, ip, port, quorum):"Add a new master to Sentinel to be monitored"def sentinel_remove(self, name):"Remove a master from Sentinel's monitoring"def sentinel_sentinels(self, service_name):"Returns a list of sentinels for ``service_name``"def sentinel_set(self, name, option, value):"Set Sentinel monitoring parameters for a given master"def sentinel_slaves(self, service_name):"Returns a list of slaves for ``service_name``"def shutdown(self, save=False, nosave=False):"""Shutdown the Redis server. If Redis has persistence configured,data will be flushed before shutdown. If the "save" option is set,a data flush will be attempted even if there is no persistenceconfigured. If the "nosave" option is set, no data flush will beattempted. The "save" and "nosave" options cannot both be set."""def slaveof(self, host=None, port=None):"""Set the server to be a replicated slave of the instance identifiedby the ``host`` and ``port``. If called without arguments, theinstance is promoted to a master instead."""def slowlog_get(self, num=None):"""Get the entries from the slowlog. If ``num`` is specified, get themost recent ``num`` items."""def slowlog_len(self):"Get the number of items in the slowlog"def slowlog_reset(self):"Remove all items in the slowlog"def time(self):"""Returns the server time as a 2-item tuple of ints:(seconds since epoch, microseconds into this second)."""def wait(self, num_replicas, timeout):"""Redis synchronous replicationThat returns the number of replicas that processed the query whenwe finally have at least ``num_replicas``, or when the ``timeout`` wasreached."""def append(self, key, value):"""Appends the string ``value`` to the value at ``key``. If ``key``doesn't already exist, create it with a value of ``value``.Returns the new length of the value at ``key``."""def bitcount(self, key, start=None, end=None):"""Returns the count of set bits in the value of ``key``. Optional``start`` and ``end`` paramaters indicate which bytes to consider"""def bitfield(self, key, default_overflow=None):"""Return a BitFieldOperation instance to conveniently construct one ormore bitfield operations on ``key``."""def bitop(self, operation, dest, *keys):"""Perform a bitwise operation using ``operation`` between ``keys`` andstore the result in ``dest``."""def bitpos(self, key, bit, start=None, end=None):"""Return the position of the first bit set to 1 or 0 in a string.``start`` and ``end`` difines search range. The range is interpretedas a range of bytes and not a range of bits, so start=0 and end=2means to look at the first three bytes."""def decr(self, name, amount=1):"""Decrements the value of ``key`` by ``amount``. If no key exists,the value will be initialized as 0 - ``amount``"""# An alias for ``decr()``, because it is already implemented# as DECRBY redis command.def decrby(self, name, amount=1):"""Decrements the value of ``key`` by ``amount``. If no key exists,the value will be initialized as 0 - ``amount``"""def delete(self, *names):"Delete one or more keys specified by ``names``"def dump(self, name):"""Return a serialized version of the value stored at the specified key.If key does not exist a nil bulk reply is returned."""def exists(self, *names):"Returns the number of ``names`` that exist"def expire(self, name, time):"""Set an expire flag on key ``name`` for ``time`` seconds. ``time``can be represented by an integer or a Python timedelta object."""def expireat(self, name, when):"""Set an expire flag on key ``name``. ``when`` can be representedas an integer indicating unix time or a Python datetime object."""def get(self, name):"""Return the value at key ``name``, or None if the key doesn't exist"""def getbit(self, name, offset):"Returns a boolean indicating the value of ``offset`` in ``name``"def getrange(self, key, start, end):"""Returns the substring of the string value stored at ``key``,determined by the offsets ``start`` and ``end`` (both are inclusive)"""def getset(self, name, value):"""Sets the value at key ``name`` to ``value``and returns the old value at key ``name`` atomically."""def incr(self, name, amount=1):"""Increments the value of ``key`` by ``amount``. If no key exists,the value will be initialized as ``amount``"""def incrby(self, name, amount=1):"""Increments the value of ``key`` by ``amount``. If no key exists,the value will be initialized as ``amount``"""# An alias for ``incr()``, because it is already implemented# as INCRBY redis command.def incrbyfloat(self, name, amount=1.0):"""Increments the value at key ``name`` by floating ``amount``.If no key exists, the value will be initialized as ``amount``"""def keys(self, pattern='*'):"Returns a list of keys matching ``pattern``"def mget(self, keys, *args):"""Returns a list of values ordered identically to ``keys``"""def mset(self, mapping):"""Sets key/values based on a mapping. Mapping is a dictionary ofkey/value pairs. Both keys and values should be strings or types thatcan be cast to a string via str()."""def msetnx(self, mapping):"""Sets key/values based on a mapping if none of the keys are already set.Mapping is a dictionary of key/value pairs. Both keys and valuesshould be strings or types that can be cast to a string via str().Returns a boolean indicating if the operation was successful."""def move(self, name, db):"Moves the key ``name`` to a different Redis database ``db``"def persist(self, name):"Removes an expiration on ``name``"def pexpire(self, name, time):"""Set an expire flag on key ``name`` for ``time`` milliseconds.``time`` can be represented by an integer or a Python timedeltaobject."""def pexpireat(self, name, when):"""Set an expire flag on key ``name``. ``when`` can be representedas an integer representing unix time in milliseconds (unix time * 1000)or a Python datetime object."""def psetex(self, name, time_ms, value):"""Set the value of key ``name`` to ``value`` that expires in ``time_ms``milliseconds. ``time_ms`` can be represented by an integer or a Pythontimedelta object"""def pttl(self, name):"Returns the number of milliseconds until the key ``name`` will expire"def randomkey(self):"Returns the name of a random key"def rename(self, src, dst):"""Rename key ``src`` to ``dst``"""def renamenx(self, src, dst):"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"def restore(self, name, ttl, value, replace=False):"""Create a key using the provided serialized value, previously obtainedusing DUMP."""def set(self, name, value,ex=None, px=None, nx=False, xx=False, keepttl=False):"""Set the value at key ``name`` to ``value````ex`` sets an expire flag on key ``name`` for ``ex`` seconds.``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.``nx`` if set to True, set the value at key ``name`` to ``value`` onlyif it does not exist.``xx`` if set to True, set the value at key ``name`` to ``value`` onlyif it already exists.``keepttl`` if True, retain the time to live associated with the key.(Available since Redis 6.0)"""def setbit(self, name, offset, value):"""Flag the ``offset`` in ``name`` as ``value``. Returns a booleanindicating the previous value of ``offset``."""def setex(self, name, time, value):"""Set the value of key ``name`` to ``value`` that expires in ``time``seconds. ``time`` can be represented by an integer or a Pythontimedelta object."""def setnx(self, name, value):"Set the value of key ``name`` to ``value`` if key doesn't exist"def setrange(self, name, offset, value):"""Overwrite bytes in the value of ``name`` starting at ``offset`` with``value``. If ``offset`` plus the length of ``value`` exceeds thelength of the original value, the new value will be larger than before.If ``offset`` exceeds the length of the original value, null byteswill be used to pad between the end of the previous value and the startof what's being injected.Returns the length of the new string."""def strlen(self, name):"Return the number of bytes stored in the value of ``name``"def substr(self, name, start, end=-1):"""Return a substring of the string at key ``name``. ``start`` and ``end``are 0-based integers specifying the portion of the string to return."""def touch(self, *args):"""Alters the last access time of a key(s) ``*args``. A key is ignoredif it does not exist."""def ttl(self, name):"Returns the number of seconds until the key ``name`` will expire"def type(self, name):"Returns the type of key ``name``"def watch(self, *names):"""Watches the values at keys ``names``, or None if the key doesn't exist"""def unwatch(self):"""Unwatches the value at key ``name``, or None of the key doesn't exist"""def unlink(self, *names):"Unlink one or more keys specified by ``names``"# LIST COMMANDSdef blpop(self, keys, timeout=0):"""LPOP a value off of the first non-empty listnamed in the ``keys`` list.If none of the lists in ``keys`` has a value to LPOP, then blockfor ``timeout`` seconds, or until a value gets pushed on to oneof the lists.If timeout is 0, then block indefinitely."""def brpop(self, keys, timeout=0):"""RPOP a value off of the first non-empty listnamed in the ``keys`` list.If none of the lists in ``keys`` has a value to RPOP, then blockfor ``timeout`` seconds, or until a value gets pushed on to oneof the lists.If timeout is 0, then block indefinitely."""def brpoplpush(self, src, dst, timeout=0):"""Pop a value off the tail of ``src``, push it on the head of ``dst``and then return it.This command blocks until a value is in ``src`` or until ``timeout``seconds elapse, whichever is first. A ``timeout`` value of 0 blocksforever."""def lindex(self, name, index):"""Return the item from list ``name`` at position ``index``Negative indexes are supported and will return an item at theend of the list"""def linsert(self, name, where, refvalue, value):"""Insert ``value`` in list ``name`` either immediately before or after[``where``] ``refvalue``Returns the new length of the list on success or -1 if ``refvalue``is not in the list."""def llen(self, name):"Return the length of the list ``name``"def lpop(self, name):"Remove and return the first item of the list ``name``"def lpush(self, name, *values):"Push ``values`` onto the head of the list ``name``"def lpushx(self, name, value):"Push ``value`` onto the head of the list ``name`` if ``name`` exists"def lrange(self, name, start, end):"""Return a slice of the list ``name`` betweenposition ``start`` and ``end````start`` and ``end`` can be negative numbers just likePython slicing notation"""def lrem(self, name, count, value):"""Remove the first ``count`` occurrences of elements equal to ``value``from the list stored at ``name``.The count argument influences the operation in the following ways:count > 0: Remove elements equal to value moving from head to tail.count < 0: Remove elements equal to value moving from tail to head.count = 0: Remove all elements equal to value."""def lset(self, name, index, value):"Set ``position`` of list ``name`` to ``value``"def ltrim(self, name, start, end):"""Trim the list ``name``, removing all values not within the slicebetween ``start`` and ``end````start`` and ``end`` can be negative numbers just likePython slicing notation"""def rpop(self, name):"Remove and return the last item of the list ``name``"def rpoplpush(self, src, dst):"""RPOP a value off of the ``src`` list and atomically LPUSH iton to the ``dst`` list. Returns the value."""def rpush(self, name, *values):"Push ``values`` onto the tail of the list ``name``"def rpushx(self, name, value):"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"def sort(self, name, start=None, num=None, by=None, get=None,desc=False, alpha=False, store=None, groups=False):"""Sort and return the list, set or sorted set at ``name``.``start`` and ``num`` allow for paging through the sorted data``by`` allows using an external key to weight and sort the items.Use an "*" to indicate where in the key the item value is located``get`` allows for returning items from external keys rather than thesorted data itself. Use an "*" to indicate where in the keythe item value is located``desc`` allows for reversing the sort``alpha`` allows for sorting lexicographically rather than numerically``store`` allows for storing the result of the sort intothe key ``store````groups`` if set to True and if ``get`` contains at least twoelements, sort will return a list of tuples, each containing thevalues fetched from the arguments to ``get``."""def scan(self, cursor=0, match=None, count=None, _type=None):"""Incrementally return lists of key names. Also return a cursorindicating the scan position.``match`` allows for filtering the keys by pattern``count`` provides a hint to Redis about the number of keys toreturn per batch.``_type`` filters the returned values by a particular Redis type.Stock Redis instances allow for the following types:HASH, LIST, SET, STREAM, STRING, ZSETAdditionally, Redis modules can expose other types as well."""def scan_iter(self, match=None, count=None, _type=None):"""Make an iterator using the SCAN command so that the client doesn'tneed to remember the cursor position.``match`` allows for filtering the keys by pattern``count`` provides a hint to Redis about the number of keys toreturn per batch.``_type`` filters the returned values by a particular Redis type.Stock Redis instances allow for the following types:HASH, LIST, SET, STREAM, STRING, ZSETAdditionally, Redis modules can expose other types as well."""def sscan(self, name, cursor=0, match=None, count=None):"""Incrementally return lists of elements in a set. Also return a cursorindicating the scan position.``match`` allows for filtering the keys by pattern``count`` allows for hint the minimum number of returns"""def sscan_iter(self, name, match=None, count=None):"""Make an iterator using the SSCAN command so that the client doesn'tneed to remember the cursor position.``match`` allows for filtering the keys by pattern``count`` allows for hint the minimum number of returns"""def hscan(self, name, cursor=0, match=None, count=None):"""Incrementally return key/value slices in a hash. Also return a cursorindicating the scan position.``match`` allows for filtering the keys by pattern``count`` allows for hint the minimum number of returns"""def hscan_iter(self, name, match=None, count=None):"""Make an iterator using the HSCAN command so that the client doesn'tneed to remember the cursor position.``match`` allows for filtering the keys by pattern``count`` allows for hint the minimum number of returns"""def zscan(self, name, cursor=0, match=None, count=None,score_cast_func=float):"""Incrementally return lists of elements in a sorted set. Also return acursor indicating the scan position.``match`` allows for filtering the keys by pattern``count`` allows for hint the minimum number of returns``score_cast_func`` a callable used to cast the score return value"""def zscan_iter(self, name, match=None, count=None,score_cast_func=float):"""Make an iterator using the ZSCAN command so that the client doesn'tneed to remember the cursor position.``match`` allows for filtering the keys by pattern``count`` allows for hint the minimum number of returns``score_cast_func`` a callable used to cast the score return value"""def sadd(self, name, *values):"Add ``value(s)`` to set ``name``"def scard(self, name):"Return the number of elements in set ``name``"def sdiff(self, keys, *args):"Return the difference of sets specified by ``keys``"def sdiffstore(self, dest, keys, *args):"""Store the difference of sets specified by ``keys`` into a newset named ``dest``. Returns the number of keys in the new set."""def sinter(self, keys, *args):"Return the intersection of sets specified by ``keys``"def sinterstore(self, dest, keys, *args):"""Store the intersection of sets specified by ``keys`` into a newset named ``dest``. Returns the number of keys in the new set."""def sismember(self, name, value):"Return a boolean indicating if ``value`` is a member of set ``name``"def smembers(self, name):"Return all members of the set ``name``"def smove(self, src, dst, value):"Move ``value`` from set ``src`` to set ``dst`` atomically"def spop(self, name, count=None):"Remove and return a random member of set ``name``"def srandmember(self, name, number=None):"""If ``number`` is None, returns a random member of set ``name``.If ``number`` is supplied, returns a list of ``number`` randommembers of set ``name``. Note this is only available when runningRedis 2.6+."""def srem(self, name, *values):"Remove ``values`` from set ``name``"def sunion(self, keys, *args):"Return the union of sets specified by ``keys``"def sunionstore(self, dest, keys, *args):"""Store the union of sets specified by ``keys`` into a newset named ``dest``. Returns the number of keys in the new set."""def xack(self, name, groupname, *ids):"""Acknowledges the successful processing of one or more messages.name: name of the stream.groupname: name of the consumer group.*ids: message ids to acknowlege."""def xadd(self, name, fields, id='*', maxlen=None, approximate=True):"""Add to a stream.name: name of the streamfields: dict of field/value pairs to insert into the streamid: Location to insert this record. By default it is appended.maxlen: truncate old stream members beyond this sizeapproximate: actual stream length may be slightly more than maxlen"""def xclaim(self, name, groupname, consumername, min_idle_time, message_ids,idle=None, time=None, retrycount=None, force=False,justid=False):"""Changes the ownership of a pending message.name: name of the stream.groupname: name of the consumer group.consumername: name of a consumer that claims the message.min_idle_time: filter messages that were idle less than this amount ofmillisecondsmessage_ids: non-empty list or tuple of message IDs to claimidle: optional. Set the idle time (last time it was delivered) of themessage in mstime: optional integer. This is the same as idle but instead of arelative amount of milliseconds, it sets the idle time to a specificUnix time (in milliseconds).retrycount: optional integer. set the retry counter to the specifiedvalue. This counter is incremented every time a message is deliveredagain.force: optional boolean, false by default. Creates the pending messageentry in the PEL even if certain specified IDs are not already in thePEL assigned to a different client.justid: optional boolean, false by default. Return just an array of IDsof messages successfully claimed, without returning the actual message"""def xdel(self, name, *ids):"""Deletes one or more messages from a stream.name: name of the stream.*ids: message ids to delete."""return self.execute_command('XDEL', name, *ids)def xgroup_create(self, name, groupname, id='$', mkstream=False):"""Create a new consumer group associated with a stream.name: name of the stream.groupname: name of the consumer group.id: ID of the last item in the stream to consider already delivered."""def xgroup_delconsumer(self, name, groupname, consumername):"""Remove a specific consumer from a consumer group.Returns the number of pending messages that the consumer had before itwas deleted.name: name of the stream.groupname: name of the consumer group.consumername: name of consumer to delete"""def xgroup_destroy(self, name, groupname):"""Destroy a consumer group.name: name of the stream.groupname: name of the consumer group."""def xgroup_setid(self, name, groupname, id):"""Set the consumer group last delivered ID to something else.name: name of the stream.groupname: name of the consumer group.id: ID of the last item in the stream to consider already delivered."""def xinfo_consumers(self, name, groupname):"""Returns general information about the consumers in the group.name: name of the stream.groupname: name of the consumer group."""def xinfo_groups(self, name):"""Returns general information about the consumer groups of the stream.name: name of the stream."""def xinfo_stream(self, name):"""Returns general information about the stream.name: name of the stream."""def xlen(self, name):"""Returns the number of elements in a given stream."""def xpending(self, name, groupname):"""Returns information about pending messages of a group.name: name of the stream.groupname: name of the consumer group."""def xpending_range(self, name, groupname, min, max, count,consumername=None):"""Returns information about pending messages, in a range.name: name of the stream.groupname: name of the consumer group.min: minimum stream ID.max: maximum stream ID.count: number of messages to returnconsumername: name of a consumer to filter by (optional)."""def xrange(self, name, min='-', max='+', count=None):"""Read stream values within an interval.name: name of the stream.start: first stream ID. defaults to '-',meaning the earliest available.finish: last stream ID. defaults to '+',meaning the latest available.count: if set, only return this many items, beginning with theearliest available."""def xread(self, streams, count=None, block=None):"""Block and monitor multiple streams for new data.streams: a dict of stream names to stream IDs, whereIDs indicate the last ID already seen.count: if set, only return this many items, beginning with theearliest available.block: number of milliseconds to wait, if nothing already present."""# HASH COMMANDSdef hdel(self, name, *keys):"Delete ``keys`` from hash ``name``"def hexists(self, name, key):"Returns a boolean indicating if ``key`` exists within hash ``name``"def hget(self, name, key):"Return the value of ``key`` within the hash ``name``"def hgetall(self, name):"Return a Python dict of the hash's name/value pairs"
RedisCluster入参及使用说明见rediscluster.RedisCluster,以下为部分
class RedisCluster(Redis):def __init__(self, host=None, port=None, startup_nodes=None, max_connections=None, max_connections_per_node=False, init_slot_cache=True,readonly_mode=False, reinitialize_steps=None, skip_full_coverage_check=False, nodemanager_follow_cluster=False,connection_class=None, read_from_replicas=False, cluster_down_retry_attempts=3, host_port_remap=None, **kwargs):""":startup_nodes:List of nodes that initial bootstrapping can be done from:host:Can be used to point to a startup node:port:Can be used to point to a startup node:max_connections:Maximum number of connections that should be kept open at one time:readonly_mode:enable READONLY mode. You can read possibly stale data from slave.:skip_full_coverage_check:Skips the check of cluster-require-full-coverage config, useful for clusterswithout the CONFIG command (like aws):nodemanager_follow_cluster:The node manager will during initialization try the last set of nodes thatit was operating on. This will allow the client to drift along side the clusterif the cluster nodes move around alot.:**kwargs:Extra arguments that will be sent into Redis instance when created(See Official redis-py doc for supported kwargs[https://github.com/andymccurdy/redis-py/blob/master/redis/client.py])Some kwargs is not supported and will raise RedisClusterException- db (Redis do not support database SELECT in cluster mode)"""def set(self, name, value,ex=None, px=None, nx=False, xx=False, keepttl=False):"""Set the value at key ``name`` to ``value````ex`` sets an expire flag on key ``name`` for ``ex`` seconds.``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.``nx`` if set to True, set the value at key ``name`` to ``value`` onlyif it does not exist.``xx`` if set to True, set the value at key ``name`` to ``value`` onlyif it already exists.``keepttl`` if True, retain the time to live associated with the key.(Available since Redis 6.0)"""@classmethoddef from_url(cls, url, db=None, skip_full_coverage_check=False, readonly_mode=False, read_from_replicas=False, **kwargs):"""Return a Redis client object configured from the given URL, which mustuse either `the ``redis://`` scheme<http://www.iana.org/assignments/uri-schemes/prov/redis>`_ for RESPconnections or the ``unix://`` scheme for Unix domain sockets.For example::redis://[:password]@localhost:6379/0unix://[:password]@/path/to/socket.sock?db=0There are several ways to specify a database number. The parse functionwill return the first specified option:1. A ``db`` querystring option, e.g. redis://localhost?db=02. If using the redis:// scheme, the path argument of the url, e.g.redis://localhost/03. The ``db`` argument to this function.If none of these options are specified, db=0 is used.Any additional querystring arguments and keyword arguments will bepassed along to the ConnectionPool class's initializer. In the caseof conflicting arguments, querystring arguments always win."""def set_result_callback(self, command, callback):"Set a custom Result Callback"def pubsub(self, **kwargs):""""""def pipeline(self, transaction=None, shard_hint=None, read_from_replicas=False):"""Cluster impl:Pipelines do not work in cluster mode the same way they do in normal mode.Create a clone of this object so that simulating pipelines will work correctly.Each command will be called directly when used and when calling execute() will only return the result stack."""def transaction(self, *args, **kwargs):"""Transaction is not implemented in cluster mode yet."""def execute_command(self, *args, **kwargs):"""Wrapper for CLUSTERDOWN error handling.If the cluster reports it is down it is assumed that:- connection_pool was disconnected- connection_pool was reseted- refereh_table_asap set to TrueIt will try the number of times specified by the config option "self.cluster_down_retry_attempts"which defaults to 3 unless manually configured.If it reaches the number of times, the command will raises ClusterDownException."""def _execute_command(self, *args, **kwargs):"""Send a command to a node in the cluster"""def mget(self, keys, *args):"""Returns a list of values ordered identically to ``keys``Cluster impl:Itterate all keys and send GET for each key.This will go alot slower than a normal mget call in Redis.Operation is no longer atomic."""def mset(self, *args, **kwargs):"""Sets key/values based on a mapping. Mapping can be supplied as a singledictionary argument or as kwargs.Cluster impl:Itterate over all items and do SET on each (k,v) pairOperation is no longer atomic."""def msetnx(self, *args, **kwargs):"""Sets key/values based on a mapping if none of the keys are already set.Mapping can be supplied as a single dictionary argument or as kwargs.Returns a boolean indicating if the operation was successful.Clutser impl:Itterate over all items and do GET to determine if all keys do not exists.If true then call mset() on all keys."""def rename(self, src, dst, replace=False):"""Rename key ``src`` to ``dst``Cluster impl:If the src and dsst keys is in the same slot then send a plain RENAMEcommand to that node to do the rename inside the server.If the keys is in crossslots then use the client side implementationas fallback method. In this case this operation is no longer atomic asthe key is dumped and posted back to the server through the client."""def delete(self, *names):""""Delete one or more keys specified by ``names``"Cluster impl:Iterate all keys and send DELETE for each key.This will go a lot slower than a normal delete call in Redis.Operation is no longer atomic."""def renamenx(self, src, dst):"""Rename key ``src`` to ``dst`` if ``dst`` doesn't already existCluster impl:Check if dst key do not exists, then calls rename().Operation is no longer atomic."""def pubsub_channels(self, pattern='*', aggregate=True):"""Return a list of channels that have at least one subscriber.Aggregate toggles merging of response."""def pubsub_numpat(self, aggregate=True):"""Returns the number of subscriptions to patterns.Aggregate toggles merging of response."""def pubsub_numsub(self, *args, **kwargs):"""Return a list of (channel, number of subscribers) tuplesfor each channel given in ``*args``.``aggregate`` keyword argument toggles merging of response."""def brpoplpush(self, src, dst, timeout=0):"""Pop a value off the tail of ``src``, push it on the head of ``dst``and then return it.This command blocks until a value is in ``src`` or until ``timeout``seconds elapse, whichever is first. A ``timeout`` value of 0 blocksforever.Cluster impl:Call brpop() then send the result into lpush()Operation is no longer atomic."""def rpoplpush(self, src, dst):"""RPOP a value off of the ``src`` list and atomically LPUSH iton to the ``dst`` list. Returns the value.Cluster impl:Call rpop() then send the result into lpush()Operation is no longer atomic."""def sdiff(self, keys, *args):"""Return the difference of sets specified by ``keys``Cluster impl:Querry all keys and diff all sets and return result"""def sdiffstore(self, dest, keys, *args):"""Store the difference of sets specified by ``keys`` into a newset named ``dest``. Returns the number of keys in the new set.Overwrites dest key if it exists.Cluster impl:Use sdiff() --> Delete dest key --> store result in dest key"""def sinter(self, keys, *args):"""Return the intersection of sets specified by ``keys``Cluster impl:Querry all keys, intersection and return result"""def sinterstore(self, dest, keys, *args):"""Store the intersection of sets specified by ``keys`` into a newset named ``dest``. Returns the number of keys in the new set.Cluster impl:Use sinter() --> Delete dest key --> store result in dest key"""def smove(self, src, dst, value):"""Move ``value`` from set ``src`` to set ``dst`` atomicallyCluster impl:SMEMBERS --> SREM --> SADD. Function is no longer atomic."""def sunion(self, keys, *args):"""Return the union of sets specified by ``keys``Cluster impl:Querry all keys, union and return resultOperation is no longer atomic."""def sunionstore(self, dest, keys, *args):"""Store the union of sets specified by ``keys`` into a newset named ``dest``. Returns the number of keys in the new set.Cluster impl:Use sunion() --> Dlete dest key --> store result in dest keyOperation is no longer atomic."""def pfcount(self, *sources):"""pfcount only works when all sources point to the same hash slot."""def pfmerge(self, dest, *sources):"""Merge N different HyperLogLogs into a single one.Cluster impl:Very special implementation is required to make pfmerge() workBut it works :]It works by first fetching all HLL objects that should be merged andmove them to one hashslot so that pfmerge operation can be performed withoutany 'CROSSSLOT' error.After the PFMERGE operation is done then it will be moved to the correct locationwithin the cluster and cleanup is done.This operation is no longer atomic because of all the operations that has to be done."""
