RecidivCI

Details for run #9463 (ok)

libc-malloc (ebfa76992)

Sun Oct 25 11:22:40 UTC 2020

@cd /home/ubuntu/ci/redis

Working dir is now '/home/ubuntu/ci/redis'

@git checkout unstable

Already on 'unstable'
Your branch is up to date with 'origin/unstable'.

@git pull origin unstable

From https://github.com/antirez/redis
 * branch                unstable   -> FETCH_HEAD
Already up to date.

@make distclean

cd src && make distclean
make[1]: Entering directory '/home/ubuntu/ci/redis/src'
/bin/sh: 1: pkg-config: not found
rm -rf redis-server redis-sentinel redis-cli redis-benchmark redis-check-rdb redis-check-aof *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep dict-benchmark
rm -f adlist.d quicklist.d ae.d anet.d dict.d server.d sds.d zmalloc.d lzf_c.d lzf_d.d pqsort.d zipmap.d sha1.d ziplist.d release.d networking.d util.d object.d db.d replication.d rdb.d t_string.d t_list.d t_set.d t_zset.d t_hash.d config.d aof.d pubsub.d multi.d debug.d sort.d intset.d syncio.d cluster.d crc16.d endianconv.d slowlog.d scripting.d bio.d rio.d rand.d memtest.d crcspeed.d crc64.d bitops.d sentinel.d notify.d setproctitle.d blocked.d hyperloglog.d latency.d sparkline.d redis-check-rdb.d redis-check-aof.d geo.d lazyfree.d module.d evict.d expire.d geohash.d geohash_helper.d childinfo.d defrag.d siphash.d rax.d t_stream.d listpack.d localtime.d lolwut.d lolwut5.d lolwut6.d acl.d gopher.d tracking.d connection.d tls.d sha256.d timeout.d setcpuaffinity.d monotonic.d anet.d adlist.d dict.d redis-cli.d zmalloc.d release.d ae.d crcspeed.d crc64.d siphash.d crc16.d monotonic.d ae.d anet.d redis-benchmark.d adlist.d dict.d zmalloc.d siphash.d monotonic.d
(cd ../deps && make distclean)
make[2]: Entering directory '/home/ubuntu/ci/redis/deps'
(cd hiredis && make clean) > /dev/null || true
(cd linenoise && make clean) > /dev/null || true
(cd lua && make clean) > /dev/null || true
(cd jemalloc && [ -f Makefile ] && make distclean) > /dev/null || true
(cd hdr_histogram && make clean) > /dev/null || true
(rm -f .make-*)
make[2]: Leaving directory '/home/ubuntu/ci/redis/deps'
(rm -f .make-*)
make[1]: Leaving directory '/home/ubuntu/ci/redis/src'

@make MALLOC=libc -j 8

cd src && make all
make[1]: Entering directory '/home/ubuntu/ci/redis/src'
/bin/sh: 1: pkg-config: not found
    CC Makefile.dep
/bin/sh: 1: pkg-config: not found
rm -rf redis-server redis-sentinel redis-cli redis-benchmark redis-check-rdb redis-check-aof *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep dict-benchmark
rm -f adlist.d quicklist.d ae.d anet.d dict.d server.d sds.d zmalloc.d lzf_c.d lzf_d.d pqsort.d zipmap.d sha1.d ziplist.d release.d networking.d util.d object.d db.d replication.d rdb.d t_string.d t_list.d t_set.d t_zset.d t_hash.d config.d aof.d pubsub.d multi.d debug.d sort.d intset.d syncio.d cluster.d crc16.d endianconv.d slowlog.d scripting.d bio.d rio.d rand.d memtest.d crcspeed.d crc64.d bitops.d sentinel.d notify.d setproctitle.d blocked.d hyperloglog.d latency.d sparkline.d redis-check-rdb.d redis-check-aof.d geo.d lazyfree.d module.d evict.d expire.d geohash.d geohash_helper.d childinfo.d defrag.d siphash.d rax.d t_stream.d listpack.d localtime.d lolwut.d lolwut5.d lolwut6.d acl.d gopher.d tracking.d connection.d tls.d sha256.d timeout.d setcpuaffinity.d monotonic.d anet.d adlist.d dict.d redis-cli.d zmalloc.d release.d ae.d crcspeed.d crc64.d siphash.d crc16.d monotonic.d ae.d anet.d redis-benchmark.d adlist.d dict.d zmalloc.d siphash.d monotonic.d
(cd ../deps && make distclean)
make[2]: Entering directory '/home/ubuntu/ci/redis/deps'
(cd hiredis && make clean) > /dev/null || true
(cd linenoise && make clean) > /dev/null || true
(cd lua && make clean) > /dev/null || true
(cd jemalloc && [ -f Makefile ] && make distclean) > /dev/null || true
(cd hdr_histogram && make clean) > /dev/null || true
(rm -f .make-*)
make[2]: Leaving directory '/home/ubuntu/ci/redis/deps'
(rm -f .make-*)
echo STD=-pedantic -DREDIS_STATIC='' -std=c11 >> .make-settings
echo WARN=-Wall -W -Wno-missing-field-initializers >> .make-settings
echo OPT=-O2 >> .make-settings
echo MALLOC=libc >> .make-settings
echo BUILD_TLS= >> .make-settings
echo USE_SYSTEMD= >> .make-settings
echo CFLAGS= >> .make-settings
echo LDFLAGS= >> .make-settings
echo REDIS_CFLAGS= >> .make-settings
echo REDIS_LDFLAGS= >> .make-settings
echo PREV_FINAL_CFLAGS=-pedantic -DREDIS_STATIC='' -std=c11 -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb   -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src -I../deps/hdr_histogram >> .make-settings
echo PREV_FINAL_LDFLAGS=  -g -ggdb -rdynamic >> .make-settings
(cd ../deps && make hiredis linenoise lua hdr_histogram)
make[2]: Entering directory '/home/ubuntu/ci/redis/deps'
(cd hiredis && make clean) > /dev/null || true
(cd linenoise && make clean) > /dev/null || true
(cd lua && make clean) > /dev/null || true
(cd jemalloc && [ -f Makefile ] && make distclean) > /dev/null || true
(cd hdr_histogram && make clean) > /dev/null || true
(rm -f .make-*)
(echo "" > .make-cflags)
(echo "" > .make-ldflags)
MAKE hiredis
cd hiredis && make static 
MAKE linenoise
MAKE lua
MAKE hdr_histogram
cd linenoise && make
cd lua/src && make all CFLAGS="-O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP " MYLDFLAGS="" AR="ar rc"
cd hdr_histogram && make
make[3]: Entering directory '/home/ubuntu/ci/redis/deps/hiredis'
make[3]: Entering directory '/home/ubuntu/ci/redis/deps/linenoise'
cc  -Wall -Os -g  -c linenoise.c
make[3]: Entering directory '/home/ubuntu/ci/redis/deps/lua/src'
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lapi.o lapi.c
make[3]: Entering directory '/home/ubuntu/ci/redis/deps/hdr_histogram'
cc  -Wall -Os -g  -c  hdr_histogram.c 
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lcode.o lcode.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o ldebug.o ldebug.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o ldo.o ldo.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o ldump.o ldump.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb alloc.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb net.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lfunc.o lfunc.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lgc.o lgc.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o llex.o llex.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lmem.o lmem.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lobject.o lobject.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lopcodes.o lopcodes.c
make[3]: Leaving directory '/home/ubuntu/ci/redis/deps/linenoise'
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb hiredis.c
make[3]: Leaving directory '/home/ubuntu/ci/redis/deps/hdr_histogram'
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lparser.o lparser.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lstate.o lstate.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb sds.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb async.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lstring.o lstring.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o ltable.o ltable.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o ltm.o ltm.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb read.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lundump.o lundump.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lvm.o lvm.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb sockcompat.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lzio.o lzio.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o strbuf.o strbuf.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o fpconv.o fpconv.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lauxlib.o lauxlib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lbaselib.o lbaselib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o ldblib.o ldblib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o liolib.o liolib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lmathlib.o lmathlib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o loslib.o loslib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o ltablib.o ltablib.c
ar rcs libhiredis.a alloc.o net.o hiredis.o sds.o async.o read.o sockcompat.o
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lstrlib.o lstrlib.c
make[3]: Leaving directory '/home/ubuntu/ci/redis/deps/hiredis'
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o loadlib.o loadlib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o linit.o linit.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lua_cjson.o lua_cjson.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lua_struct.o lua_struct.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lua_cmsgpack.o lua_cmsgpack.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lua_bit.o lua_bit.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o lua.o lua.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o luac.o luac.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' -DLUA_USE_MKSTEMP    -c -o print.o print.c
ar rc liblua.a lapi.o lcode.o ldebug.o ldo.o ldump.o lfunc.o lgc.o llex.o lmem.o lobject.o lopcodes.o lparser.o lstate.o lstring.o ltable.o ltm.o lundump.o lvm.o lzio.o strbuf.o fpconv.o lauxlib.o lbaselib.o ldblib.o liolib.o lmathlib.o loslib.o ltablib.o lstrlib.o loadlib.o linit.o lua_cjson.o lua_struct.o lua_cmsgpack.o lua_bit.o	# DLL needs all object files
ranlib liblua.a
cc -o lua  lua.o liblua.a -lm 
cc -o luac  luac.o print.o liblua.a -lm 
make[3]: Leaving directory '/home/ubuntu/ci/redis/deps/lua/src'
make[2]: Leaving directory '/home/ubuntu/ci/redis/deps'
    CC adlist.o
    CC quicklist.o
    CC ae.o
    CC anet.o
    CC dict.o
    CC sds.o
    CC server.o
    CC zmalloc.o
    CC lzf_c.o
    CC lzf_d.o
    CC pqsort.o
    CC zipmap.o
    CC sha1.o
    CC ziplist.o
    CC release.o
    CC networking.o
    CC util.o
    CC object.o
    CC db.o
    CC replication.o
    CC rdb.o
    CC t_string.o
    CC t_list.o
    CC t_set.o
    CC t_zset.o
    CC t_hash.o
    CC config.o
    CC aof.o
    CC pubsub.o
    CC multi.o
    CC debug.o
    CC sort.o
    CC intset.o
    CC syncio.o
    CC cluster.o
    CC crc16.o
    CC endianconv.o
    CC slowlog.o
    CC scripting.o
    CC bio.o
    CC rio.o
    CC rand.o
    CC memtest.o
    CC crcspeed.o
    CC crc64.o
    CC bitops.o
    CC sentinel.o
    CC notify.o
    CC setproctitle.o
    CC blocked.o
    CC hyperloglog.o
    CC latency.o
    CC sparkline.o
    CC redis-check-rdb.o
    CC redis-check-aof.o
    CC geo.o
    CC lazyfree.o
    CC module.o
    CC evict.o
    CC expire.o
    CC geohash.o
    CC geohash_helper.o
    CC childinfo.o
    CC defrag.o
    CC siphash.o
    CC rax.o
    CC t_stream.o
    CC listpack.o
    CC localtime.o
    CC lolwut.o
    CC lolwut5.o
    CC lolwut6.o
    CC acl.o
    CC gopher.o
    CC tracking.o
    CC connection.o
    CC tls.o
    CC sha256.o
    CC timeout.o
    CC setcpuaffinity.o
    CC monotonic.o
    CC redis-cli.o
    CC redis-benchmark.o
    LINK redis-server
    LINK redis-benchmark
    INSTALL redis-sentinel
    INSTALL redis-check-rdb
    INSTALL redis-check-aof
    LINK redis-cli

Hint: It's a good idea to run 'make test' ;)

make[1]: Leaving directory '/home/ubuntu/ci/redis/src'

@./runtest --verbose

Cleanup: may take some time... OK
Starting test server at port 11112
[ready]: 646924
Testing unit/printver
[ready]: 646925
Testing unit/dump
[ready]: 646926
Testing unit/auth
[ready]: 646927
Testing unit/protocol
[ready]: 646928
Testing unit/keyspace
[ready]: 646929
Testing unit/scan
[ready]: 646930
Testing unit/type/string
[ready]: 646931
Testing unit/type/incr
[ready]: 646932
Testing unit/type/list
[ready]: 646933
Testing unit/type/list-2
[ready]: 646934
Testing unit/type/list-3
[ready]: 646935
Testing unit/type/set
[ready]: 646936
Testing unit/type/zset
[ready]: 646937
Testing unit/type/hash
[ready]: 646938
Testing unit/type/stream
[ready]: 646939
Testing unit/type/stream-cgroups
=== (keyspace) Starting server 127.0.0.1:23111 ok
=== (scan) Starting server 127.0.0.1:23611 ok
=== (list) Starting server 127.0.0.1:25611 ok
=== (incr) Starting server 127.0.0.1:24611 ok
=== (list ziplist) Starting server 127.0.0.1:26111 ok
=== (stream) Starting server 127.0.0.1:28111 ok
=== (zset) Starting server 127.0.0.1:27111 ok
=== (hash) Starting server 127.0.0.1:27611 ok
=== (auth) Starting server 127.0.0.1:22111 ok
=== (set) Starting server 127.0.0.1:26611 ok
=== (stream) Starting server 127.0.0.1:28611 ok
=== (string) Starting server 127.0.0.1:24111 ok
=== (dump) Starting server 127.0.0.1:21611 ok
[ok]: Explicit regression for a list bug
=== (protocol) Starting server 127.0.0.1:22611 ok
[ok]: INCR against non existing key
[ok]: INCR against key created by incr itself
[ok]: INCR against key originally set with SET
[ok]: INCR over 32bit value
[ok]: INCRBY over 32bit value with over 32bit increment
[ok]: INCR fails against key with spaces (left)
[ok]: DEL against a single item
[ok]: INCR fails against key with spaces (right)
[ok]: INCR fails against key with spaces (both)
[ok]: Vararg DEL
[ok]: KEYS with pattern
[ok]: KEYS to get all keys
[ok]: DBSIZE
=== (list) Starting server 127.0.0.1:25111 ok
[ok]: INCR fails against a key holding a list
[ok]: XADD can add entries into a stream that XRANGE can fetch
[ok]: XADD IDs are incremental
[ok]: AUTH fails if there is no password configured server side
[ok]: DECRBY over 32bit value with over 32bit increment, negative res
[ok]: XADD IDs are incremental when ms is the same as well
[ok]: XADD IDs correctly report an error when overflowing
[ok]: SCAN basic
[ok]: DEL all keys
[ok]: INCR uses shared objects in the 0-9999 range
[ok]: INCR can modify objects in-place
[ok]: INCRBYFLOAT against non existing key
[ok]: Check encoding - ziplist
[ok]: ZSET basic ZADD and score update - ziplist
[ok]: ZSET element can't be set to NaN with ZADD - ziplist
[ok]: ZSET element can't be set to NaN with ZINCRBY
[ok]: ZADD with options syntax error with incomplete pair
[ok]: ZADD XX option without key - ziplist
[ok]: ZADD XX existing key - ziplist
[ok]: INCRBYFLOAT against key originally set with SET
[ok]: INCRBYFLOAT over 32bit value
[ok]: ZADD XX returns the number of elements actually added
[ok]: INCRBYFLOAT over 32bit value with over 32bit increment
[ok]: INCRBYFLOAT fails against key with spaces (left)
[ok]: ZADD XX updates existing elements score
[ok]: INCRBYFLOAT fails against key with spaces (right)
[ok]: DUMP / RESTORE are able to serialize / unserialize a simple key
[ok]: INCRBYFLOAT fails against key with spaces (both)
[ok]: RESTORE can set an arbitrary expire to the materialized key
[ok]: SET and GET an item
[ok]: ZADD GT updates existing elements when new scores are greater
[ok]: SET and GET an empty item
[ok]: RESTORE can set an expire that overflows a 32 bit integer
[ok]: RESTORE can set an absolute expire
[ok]: INCRBYFLOAT fails against a key holding a list
[ok]: RESTORE with ABSTTL in the past
[ok]: RESTORE can set LRU
[ok]: INCRBYFLOAT does not allow NaN or Infinity
[ok]: RESTORE can set LFU
[ok]: ZADD LT updates existing elements when new scores are lower
[ok]: RESTORE returns an error of the key already exists
[ok]: RESTORE can overwrite an existing key with REPLACE
[ok]: INCRBYFLOAT decrement
[ok]: RESTORE can detect a syntax error for unrecongized options
[ok]: DUMP of non existing key returns nil
[ok]: string to double with null terminator
[ok]: ZADD GT XX updates existing elements when new scores are greater and skips new elements
[ok]: No negative zero
[ok]: ZADD LT XX updates existing elements when new scores are lower and skips new elements
[ok]: ZADD XX and NX are not compatible
[ok]: ZADD NX with non existing key
[ok]: ZADD NX only add new elements without updating old ones
[ok]: ZADD GT and NX are not compatible
[ok]: ZADD LT and NX are not compatible
[ok]: ZADD LT and GT are not compatible
[ok]: ZADD INCR works like ZINCRBY
[ok]: SADD, SCARD, SISMEMBER, SMISMEMBER, SMEMBERS basics - regular set
[ok]: ZADD INCR works with a single score-elemenet pair
[ok]: SADD, SCARD, SISMEMBER, SMISMEMBER, SMEMBERS basics - intset
[ok]: ZADD CH option changes return value to all changed elements
[ok]: SMISMEMBER against non set
[ok]: Handle an empty query
[ok]: ZINCRBY calls leading to NaN result in error
[ok]: SMISMEMBER non existing key
[ok]: Negative multibulk length
[ok]: SMISMEMBER requires one or more members
[ok]: Out of range multibulk length
[ok]: ZADD - Variadic version base case
[ok]: SADD against non set
[ok]: SADD a non-integer against an intset
[ok]: ZADD - Return value is the number of actually added items
[ok]: Wrong multibulk payload header
[ok]: SADD an integer larger than 64 bits
[ok]: XGROUP CREATE: creation and duplicate group name detection
[ok]: ZADD - Variadic version does not add nothing on single parsing err
[ok]: Negative multibulk payload length
[ok]: XGROUP CREATE: automatic stream creation fails without MKSTREAM
[ok]: ZADD - Variadic version will raise error on missing arg
[ok]: XGROUP CREATE: automatic stream creation works with MKSTREAM
[ok]: ZINCRBY does not work variadic even if shares ZADD implementation
[ok]: Out of range multibulk payload length
[ok]: XREADGROUP will return only new elements
[ok]: Non-number multibulk payload length
[ok]: ZCARD basics - ziplist
[ok]: Multi bulk request not followed by bulk arguments
[ok]: ZREM removes key after last element is removed
[ok]: XREADGROUP can read the history of the elements we own
[ok]: Generic wrong number of args
[ok]: XPENDING is able to return pending items
[ok]: XPENDING can return single consumer items
[ok]: ZREM variadic version
[ok]: Unbalanced number of quotes
[ok]: ZREM variadic version -- remove elements after key deletion
[ok]: XACK is able to remove items from the client/group PEL
[ok]: XACK can't remove the same item multiple times
[ok]: XACK is able to accept multiple arguments
[ok]: ZRANGE basics - ziplist
[ok]: XACK should fail if got at least one invalid ID
[ok]: PEL NACK reassignment after XGROUP SETID event
[ok]: ZREVRANGE basics - ziplist
[ok]: ZRANK/ZREVRANK basics - ziplist
[ok]: ZRANK - after deletion - ziplist
[ok]: XREADGROUP will not report data on empty history. Bug #5577
[ok]: ZINCRBY - can create a new sorted set - ziplist
[ok]: HSET/HLEN - Small hash creation
[ok]: Is the small hash encoded with a ziplist?
[ok]: ZINCRBY - increment and decrement - ziplist
[ok]: ZINCRBY return value
[ok]: XREADGROUP history reporting of deleted entries. Bug #5570
[ok]: ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics
[ok]: ZRANGEBYSCORE with WITHSCORES
[ok]: ZRANGEBYSCORE with LIMIT
[ok]: ZRANGEBYSCORE with LIMIT and WITHSCORES
[ok]: ZRANGEBYSCORE with non-value min or max
[ok]: ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics
[ok]: ZLEXCOUNT advanced
[ok]: ZRANGEBYSLEX with LIMIT
[ok]: SCAN COUNT
[ok]: ZRANGEBYLEX with invalid lex range specifiers
[ok]: LPOS basic usage
[ok]: SADD overflows the maximum allowed integers in an intset
[ok]: LPOS RANK (positive and negative rank) option
[ok]: LPOS COUNT option
[ok]: LPOS COUNT + RANK option
[ok]: Variadic SADD
[ok]: LPOS non existing key
[ok]: LPOS no match
[ok]: LPOS MAXLEN
[ok]: LPOS when RANK is greater than matches
[ok]: ZREMRANGEBYSCORE basics
[ok]: Blocking XREADGROUP will not reply with an empty array
=== (repl) Starting server 127.0.0.1:21612 ok
[ok]: ZREMRANGEBYSCORE with non-value min or max
[ok]: XGROUP DESTROY should unblock XREADGROUP with -NOGROUP
[ok]: RENAME can unblock XREADGROUP with data
[ok]: RENAME can unblock XREADGROUP with -NOGROUP
[ok]: LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist
[ok]: LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - regular list
[ok]: R/LPOP against empty list
[ok]: Variadic RPUSH/LPUSH
[ok]: DEL a list
[ok]: BLPOP, BRPOP: single existing list - linkedlist
[ok]: ZREMRANGEBYRANK basics
[ok]: SCAN MATCH
[ok]: BLPOP, BRPOP: multiple existing lists - linkedlist
[ok]: ZUNIONSTORE against non-existing key doesn't set destination - ziplist
[ok]: BLPOP, BRPOP: second list has an entry - linkedlist
[ok]: ZUNION/ZINTER against non-existing key - ziplist
[ok]: ZUNIONSTORE with empty set - ziplist
[ok]: BRPOPLPUSH - linkedlist
[ok]: BLMOVE left left - linkedlist
[ok]: ZUNION/ZINTER with empty set - ziplist
[ok]: BLMOVE left right - linkedlist
[ok]: ZUNIONSTORE basics - ziplist
[ok]: BLMOVE right left - linkedlist
[ok]: BLMOVE right right - linkedlist
[ok]: ZUNION/ZINTER with integer members - ziplist
[ok]: ZUNIONSTORE with weights - ziplist
[ok]: BLPOP, BRPOP: single existing list - ziplist
[ok]: ZUNION with weights - ziplist
[ok]: ZUNIONSTORE with a regular set and weights - ziplist
[ok]: BLPOP, BRPOP: multiple existing lists - ziplist
[ok]: ZUNIONSTORE with AGGREGATE MIN - ziplist
[ok]: ZUNION/ZINTER with AGGREGATE MIN - ziplist
[ok]: BLPOP, BRPOP: second list has an entry - ziplist
[ok]: ZUNIONSTORE with AGGREGATE MAX - ziplist
[ok]: ZUNION/ZINTER with AGGREGATE MAX - ziplist
[ok]: ZINTERSTORE basics - ziplist
[ok]: BRPOPLPUSH - ziplist
[ok]: ZINTER basics - ziplist
[ok]: ZINTERSTORE with weights - ziplist
[ok]: BLMOVE left left - ziplist
[ok]: ZINTER with weights - ziplist
[ok]: ZINTERSTORE with a regular set and weights - ziplist
[ok]: ZINTERSTORE with AGGREGATE MIN - ziplist
[ok]: BLMOVE left right - ziplist
[ok]: ZINTERSTORE with AGGREGATE MAX - ziplist
[ok]: BLMOVE right left - ziplist
[ok]: BLMOVE right right - ziplist
[ok]: ZUNIONSTORE with +inf/-inf scores - ziplist
[ok]: BLPOP, LPUSH + DEL should not awake blocked client
[ok]: ZUNIONSTORE with NaN weights ziplist
[ok]: ZINTERSTORE with +inf/-inf scores - ziplist
[ok]: ZINTERSTORE with NaN weights ziplist
[ok]: Basic ZPOP with a single key - ziplist
[ok]: ZPOP with count - ziplist
[ok]: BZPOP with a single existing sorted set - ziplist
[ok]: BZPOP with multiple existing sorted sets - ziplist
[ok]: BZPOP second sorted set has members - ziplist
[ok]: Check encoding - skiplist
[ok]: ZSET basic ZADD and score update - skiplist
[ok]: ZSET element can't be set to NaN with ZADD - skiplist
[ok]: ZSET element can't be set to NaN with ZINCRBY
[ok]: ZADD with options syntax error with incomplete pair
[ok]: ZADD XX option without key - skiplist
[ok]: ZADD XX existing key - skiplist
[ok]: ZADD XX returns the number of elements actually added
[ok]: Regression for quicklist #3343 bug
=== () Starting server 127.0.0.1:21111 ok
[ok]: ZADD XX updates existing elements score
[ok]: ZADD GT updates existing elements when new scores are greater
[ok]: ZADD LT updates existing elements when new scores are lower
[ok]: ZADD GT XX updates existing elements when new scores are greater and skips new elements
[ok]: ZADD LT XX updates existing elements when new scores are lower and skips new elements
[ok]: ZADD XX and NX are not compatible
[ok]: ZADD NX with non existing key
[ok]: ZADD NX only add new elements without updating old ones
[ok]: ZADD GT and NX are not compatible
[ok]: ZADD LT and NX are not compatible
[ok]: ZADD LT and GT are not compatible
[ok]: ZADD INCR works like ZINCRBY
[ok]: ZADD INCR works with a single score-elemenet pair
[ok]: XADD with MAXLEN option
[ok]: ZADD CH option changes return value to all changed elements
[ok]: Very big payload in GET/SET
[ok]: ZINCRBY calls leading to NaN result in error
[ok]: XADD with NOMKSTREAM option
[ok]: ZADD - Variadic version base case
[ok]: ZADD - Return value is the number of actually added items
[ok]: ZADD - Variadic version does not add nothing on single parsing err
[ok]: ZADD - Variadic version will raise error on missing arg
[ok]: ZINCRBY does not work variadic even if shares ZADD implementation
[ok]: ZCARD basics - skiplist
Testing Redis version 255.255.255 (ebfa7699)
[ok]: ZREM removes key after last element is removed
[ok]: ZREM variadic version
[ok]: ZREM variadic version -- remove elements after key deletion
[ok]: ZRANGE basics - skiplist
[ok]: SCAN TYPE
[1/58 done]: unit/type/incr (1 seconds)
Testing unit/sort
[ok]: SSCAN with encoding intset
[ok]: ZREVRANGE basics - skiplist
[ok]: SSCAN with encoding hashtable
[ok]: ZRANK/ZREVRANK basics - skiplist
[ok]: HSCAN with encoding ziplist
[ok]: ZRANK - after deletion - skiplist
[ok]: ZINCRBY - can create a new sorted set - skiplist
[ok]: ZINCRBY - increment and decrement - skiplist
[ok]: ZINCRBY return value
[ok]: MIGRATE is caching connections
[ok]: ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics
=== (auth) Starting server 127.0.0.1:22112 ok
[ok]: ZRANGEBYSCORE with WITHSCORES
[ok]: HSET/HLEN - Big hash creation
[ok]: Is the big hash encoded with an hash table?
[ok]: HGET against the small hash
[ok]: ZRANGEBYSCORE with LIMIT
[ok]: ZRANGEBYSCORE with LIMIT and WITHSCORES
[ok]: ZRANGEBYSCORE with non-value min or max
=== (sort) Starting server 127.0.0.1:24612 ok
[ok]: ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics
[ok]: ZLEXCOUNT advanced
[ok]: ZRANGEBYSLEX with LIMIT
[ok]: BLPOP, LPUSH + DEL + SET should not awake blocked client
[ok]: ZRANGEBYLEX with invalid lex range specifiers
[ok]: BLPOP with same key multiple times should work (issue #801)
[ok]: MULTI/EXEC is isolated from the point of view of BLPOP
[ok]: BLPOP with variadic LPUSH
[ok]: BRPOPLPUSH with zero timeout should block indefinitely
[ok]: BLMOVE left left with zero timeout should block indefinitely
[ok]: BLMOVE left right with zero timeout should block indefinitely
[ok]: BLMOVE right left with zero timeout should block indefinitely
[ok]: BLMOVE right right with zero timeout should block indefinitely
[ok]: BLMOVE (left, left) with a client BLPOPing the target list
[ok]: BLMOVE (left, right) with a client BLPOPing the target list
[ok]: BLMOVE (right, left) with a client BLPOPing the target list
[ok]: BLMOVE (right, right) with a client BLPOPing the target list
[ok]: BRPOPLPUSH with wrong source type
[ok]: BRPOPLPUSH with wrong destination type
[ok]: BRPOPLPUSH maintains order of elements after failure
[ok]: BRPOPLPUSH with multiple blocked clients
[ok]: AUTH fails when a wrong password is given
[ok]: Protocol desync regression test #1
[ok]: Arbitrary command gives an error when AUTH is required
[ok]: Linked LMOVEs
[ok]: Circular BRPOPLPUSH
[ok]: AUTH succeeds when the right password is given
[ok]: Once AUTH succeeded we can actually send commands to the server
[ok]: Self-referential BRPOPLPUSH
[ok]: BRPOPLPUSH inside a transaction
[ok]: ZREMRANGEBYSCORE basics
[ok]: PUSH resulting from BRPOPLPUSH affect WATCH
[ok]: ZREMRANGEBYSCORE with non-value min or max
[ok]: BRPOPLPUSH does not affect WATCH while still blocked
[ok]: ZREMRANGEBYRANK basics
[ok]: ZUNIONSTORE against non-existing key doesn't set destination - skiplist
[ok]: ZUNION/ZINTER against non-existing key - skiplist
[ok]: ZUNIONSTORE with empty set - skiplist
[ok]: ZUNION/ZINTER with empty set - skiplist
[ok]: ZUNIONSTORE basics - skiplist
[ok]: ZUNION/ZINTER with integer members - skiplist
[ok]: ZUNIONSTORE with weights - skiplist
[ok]: ZUNION with weights - skiplist
[ok]: ZUNIONSTORE with a regular set and weights - skiplist
[ok]: Old Ziplist: SORT BY key
[ok]: ZUNIONSTORE with AGGREGATE MIN - skiplist
[ok]: Old Ziplist: SORT BY key with limit
[ok]: Old Ziplist: SORT BY hash field
[ok]: ZUNION/ZINTER with AGGREGATE MIN - skiplist
[ok]: ZUNIONSTORE with AGGREGATE MAX - skiplist
[ok]: ZUNION/ZINTER with AGGREGATE MAX - skiplist
[ok]: ZINTERSTORE basics - skiplist
[2/58 done]: unit/printver (1 seconds)
Testing unit/expire
[ok]: ZINTER basics - skiplist
[ok]: ZINTERSTORE with weights - skiplist
[ok]: ZINTER with weights - skiplist
[ok]: HSCAN with encoding hashtable
[ok]: ZINTERSTORE with a regular set and weights - skiplist
[ok]: ZINTERSTORE with AGGREGATE MIN - skiplist
[ok]: ZINTERSTORE with AGGREGATE MAX - skiplist
[ok]: ZSCAN with encoding ziplist
[ok]: ZUNIONSTORE with +inf/-inf scores - skiplist
[ok]: ZUNIONSTORE with NaN weights skiplist
[ok]: HGET against the big hash
[ok]: HGET against non existing key
[ok]: HSET in update and insert mode
[ok]: HSETNX target key missing - small hash
[ok]: HSETNX target key exists - small hash
[ok]: HSETNX target key missing - big hash
[ok]: HSETNX target key exists - big hash
[ok]: HMSET wrong number of args
[ok]: HMSET - small hash
[ok]: ZINTERSTORE with +inf/-inf scores - skiplist
[ok]: ZINTERSTORE with NaN weights skiplist
[ok]: Basic ZPOP with a single key - skiplist
[ok]: ZPOP with count - skiplist
[ok]: BZPOP with a single existing sorted set - skiplist
[ok]: BZPOP with multiple existing sorted sets - skiplist
[ok]: BZPOP second sorted set has members - skiplist
[ok]: ZINTERSTORE regression with two sets, intset+hashtable
[ok]: ZUNIONSTORE regression, should not create NaN in scores
[ok]: ZINTERSTORE #516 regression, mixed sets and ziplist zsets
[ok]: Protocol desync regression test #2
[ok]: HMSET - big hash
[ok]: HMGET against non existing key and fields
[ok]: HMGET against wrong type
[ok]: HMGET - small hash
[ok]: Set encoding after DEBUG RELOAD
[ok]: SREM basics - regular set
[ok]: SREM basics - intset
[ok]: SREM with multiple arguments
[ok]: SREM variadic version with more args needed to destroy the key
[ok]: ZSCAN with encoding skiplist
[3/58 done]: unit/auth (1 seconds)
Testing unit/other
[ok]: SCAN guarantees check under write load
[ok]: SSCAN with integer encoded object (issue #1345)
[ok]: SSCAN with PATTERN
[ok]: HMGET - big hash
[ok]: HKEYS - small hash
[ok]: HSCAN with PATTERN
[ok]: ZSCAN with PATTERN
[ok]: ZUNIONSTORE result is sorted
[ok]: HKEYS - big hash
[ok]: ZMSCORE retrieve
[ok]: ZMSCORE retrieve from empty set
[ok]: ZMSCORE retrieve with missing member
[ok]: Protocol desync regression test #3
[ok]: ZMSCORE retrieve single member
[ok]: ZMSCORE retrieve requires one or more members
[ok]: ZSET commands don't accept the empty strings as valid score
[ok]: HVALS - small hash
=== (expire) Starting server 127.0.0.1:21112 ok
[ok]: HVALS - big hash
[ok]: HGETALL - small hash
[ok]: Generated sets must be encoded as hashtable
[ok]: SINTER with two sets - hashtable
[ok]: SINTERSTORE with two sets - hashtable
[ok]: SINTERSTORE with two sets, after a DEBUG RELOAD - hashtable
=== (other) Starting server 127.0.0.1:22113 ok
[ok]: SUNION with two sets - hashtable
[ok]: Old Linked list: SORT BY key
[ok]: Old Linked list: SORT BY key with limit
[ok]: ZSCAN scores: regression test for issue #2175
[ok]: HGETALL - big hash
[ok]: HDEL and return value
[ok]: HDEL - more than a single value
[ok]: HDEL - hash becomes empty before deleting all specified fields
[ok]: HEXISTS
[ok]: Old Linked list: SORT BY hash field
[ok]: Is a ziplist encoded Hash promoted on big payload?
[ok]: HINCRBY against non existing database key
[ok]: HINCRBY against non existing hash key
[ok]: HINCRBY against hash key created by hincrby itself
[ok]: HINCRBY against hash key originally set with HSET
[ok]: ZSCORE - ziplist
[ok]: HINCRBY over 32bit value
[ok]: SUNIONSTORE with two sets - hashtable
[ok]: HINCRBY over 32bit value with over 32bit increment
[ok]: HINCRBY fails against hash value with spaces (left)
[ok]: HINCRBY fails against hash value with spaces (right)
[ok]: HINCRBY can detect overflows
[ok]: HINCRBYFLOAT against non existing database key
[ok]: SINTER against three sets - hashtable
[ok]: HINCRBYFLOAT against non existing hash key
[ok]: SINTERSTORE with three sets - hashtable
[ok]: HINCRBYFLOAT against hash key created by hincrby itself
[ok]: HINCRBYFLOAT against hash key originally set with HSET
[ok]: EXPIRE - set timeouts multiple times
[ok]: EXPIRE - It should be still possible to read 'x'
[ok]: HINCRBYFLOAT over 32bit value
[ok]: HINCRBYFLOAT over 32bit value with over 32bit increment
[ok]: HINCRBYFLOAT fails against hash value with spaces (left)
[ok]: HINCRBYFLOAT fails against hash value with spaces (right)
[ok]: HINCRBYFLOAT fails against hash value that contains a null-terminator in the middle
[ok]: SAVE - make sure there are all the types as values
[ok]: HSTRLEN against the small hash
[ok]: SUNION with non existing keys - hashtable
[ok]: ZMSCORE - ziplist
[ok]: SDIFF with two sets - hashtable
[ok]: SDIFF with three sets - hashtable
[ok]: SDIFFSTORE with three sets - hashtable
[ok]: ZSCORE after a DEBUG RELOAD - ziplist
=== (regression) Starting server 127.0.0.1:22612 ok
[ok]: Regression for a crash with blocking ops and pipelining
[ok]: Generated sets must be encoded as intset
[ok]: SINTER with two sets - intset
[ok]: SINTERSTORE with two sets - intset
[ok]: SINTERSTORE with two sets, after a DEBUG RELOAD - intset
[ok]: SUNION with two sets - intset
[ok]: SUNIONSTORE with two sets - intset
[ok]: SINTER against three sets - intset
[ok]: SINTERSTORE with three sets - intset
[ok]: SUNION with non existing keys - intset
[ok]: SDIFF with two sets - intset
[ok]: SDIFF with three sets - intset
[ok]: SDIFFSTORE with three sets - intset
[ok]: SDIFF with first set empty
[ok]: SDIFF with same set two times
[ok]: ZSET sorting stresser - ziplist
[ok]: HSTRLEN against the big hash
[ok]: HSTRLEN against non existing field
[ok]: HSTRLEN corner cases
[ok]: Hash ziplist regression test for large keys
[ok]: XCLAIM can claim PEL items from another consumer
[4/58 done]: unit/protocol (1 seconds)
Testing unit/multi
[ok]: Hash fuzzing #1 - 10 fields
[ok]: Hash fuzzing #2 - 10 fields
=== (multi) Starting server 127.0.0.1:22613 ok
[ok]: MUTLI / EXEC basics
[ok]: DISCARD
[ok]: Nested MULTI are not allowed
[ok]: MULTI where commands alter argc/argv
[ok]: WATCH inside MULTI is not allowed
[ok]: EXEC fails if there are errors while queueing commands #1
[ok]: EXEC fails if there are errors while queueing commands #2
[ok]: If EXEC aborts, the client MULTI state is cleared
[ok]: EXEC works on WATCHed key not modified
[ok]: EXEC fail on WATCHed key modified (1 key of 1 watched)
[ok]: EXEC fail on WATCHed key modified (1 key of 5 watched)
[ok]: EXEC fail on WATCHed key modified by SORT with STORE even if the result is empty
[ok]: After successful EXEC key is no longer watched
[ok]: After failed EXEC key is no longer watched
[ok]: It is possible to UNWATCH
[ok]: UNWATCH when there is nothing watched works as expected
[ok]: FLUSHALL is able to touch the watched keys
[ok]: FLUSHALL does not touch non affected keys
[ok]: FLUSHDB is able to touch the watched keys
[ok]: FLUSHDB does not touch non affected keys
[ok]: WATCH is able to remember the DB a key belongs to
[ok]: WATCH will consider touched keys target of EXPIRE
[ok]: XCLAIM without JUSTID increments delivery count
[ok]: XINFO FULL output
[ok]: XGROUP CREATECONSUMER: create consumer if does not exist
[ok]: XGROUP CREATECONSUMER: group must exist
[ok]: DEL against expired key
[ok]: EXISTS
[ok]: Zero length value in key. SET/GET/EXISTS
[ok]: Commands pipelining
[ok]: Non existing command
[ok]: RENAME basic usage
[ok]: RENAME source key should no longer exist
[ok]: RENAME against already existing key
[ok]: RENAMENX basic usage
[ok]: RENAMENX against already existing key
[ok]: RENAMENX against already existing key (2)
[ok]: RENAME against non existing source key
[ok]: RENAME where source and dest key are the same (existing)
[ok]: RENAMENX where source and dest key are the same (existing)
[ok]: RENAME where source and dest key are the same (non existing)
[ok]: RENAME with volatile key, should move the TTL as well
[ok]: RENAME with volatile key, should not inherit TTL of target key
[ok]: DEL all keys again (DB 0)
[ok]: DEL all keys again (DB 1)
[ok]: MOVE basic usage
[ok]: MOVE against key existing in the target DB
[ok]: MOVE against non-integer DB (#1428)
[ok]: MOVE can move key expire metadata as well
[ok]: MOVE does not create an expire if it does not exist
[ok]: SET/GET keys in different DBs
[ok]: RANDOMKEY
[ok]: RANDOMKEY against empty DB
[ok]: RANDOMKEY regression 1
[ok]: KEYS * two times with long key, Github issue #1208
[ok]: BRPOPLPUSH timeout
[ok]: BLPOP when new key is moved into place
[ok]: BLPOP when result key is created by SORT..STORE
[ok]: BLPOP: with single empty list argument
[ok]: BLPOP: with negative timeout
=== (stream) Starting server 127.0.0.1:28612 ok
[ok]: FUZZ stresser with data model binary
[ok]: BLPOP: with non-integer timeout
[ok]: XREADGROUP with NOACK creates consumer
[5/58 done]: unit/keyspace (2 seconds)
Testing unit/quit

Waiting for background AOF rewrite to finish... === (quit) Starting server 127.0.0.1:23112 ok
[ok]: QUIT returns OK
[ok]: Pipelined commands after QUIT must not be executed
[ok]: Pipelined commands after QUIT that exceed read buffer size
[6/58 done]: unit/quit (0 seconds)
Testing unit/aofrw
=== (aofrw) Starting server 127.0.0.1:23113 ok

Waiting for background AOF rewrite to finish... [ok]: WATCH will consider touched expired keys
[ok]: DISCARD should clear the WATCH dirty flag on the client
[ok]: DISCARD should UNWATCH all the keys
[ok]: MULTI / EXEC is propagated correctly (single write command)
[ok]: FUZZ stresser with data model alpha
[ok]: MULTI / EXEC is propagated correctly (empty transaction)
[ok]: MULTI / EXEC is propagated correctly (read-only commands)
[ok]: MULTI / EXEC is propagated correctly (write command, no effect)
[ok]: DISCARD should not fail during OOM
[ok]: BLPOP: with zero timeout should block indefinitely
[ok]: BLPOP: second argument is not a list
[ok]: Consumer without PEL is present in AOF after AOFRW
[ok]: EXPIRE - After 2.1 seconds the key should no longer be here
[ok]: EXPIRE - write on expire should work
[ok]: EXPIREAT - Check for EXPIRE alike behavior
[ok]: SETEX - Set + Expire combo operation. Check for TTL
[ok]: SETEX - Check value
[ok]: SETEX - Overwrite old key
=== () Starting server 127.0.0.1:28613 ok
[ok]: MULTI and script timeout
[ok]: Consumer group last ID propagation to slave (NOACK=0)
[ok]: Consumer group last ID propagation to slave (NOACK=1)
[ok]: EXEC and script timeout
=== (stream) Starting server 127.0.0.1:28614 ok

Waiting for background AOF rewrite to finish... [ok]: XADD mass insertion and XLEN
[ok]: XADD with ID 0-0
[ok]: XRANGE COUNT works as expected
[ok]: XREVRANGE COUNT works as expected
[ok]: BLPOP: timeout
[ok]: BLPOP: arguments are empty
[ok]: BRPOP: with single empty list argument
[ok]: BRPOP: with negative timeout
[ok]: BRPOP: with non-integer timeout
[ok]: FUZZ stresser with data model compr

Waiting for background save to finish... [ok]: MULTI-EXEC body and script timeout
[ok]: SETEX - Wait for the key to expire
[ok]: SETEX - Wrong time parameter
[ok]: PERSIST can undo an EXPIRE
[ok]: PERSIST returns 0 against non existing or non volatile keys
[ok]: Hash fuzzing #1 - 512 fields
[ok]: Very big payload random access
[ok]: just EXEC and script timeout
[ok]: exec with write commands and state change
[ok]: exec with read commands and stale replica state change
[ok]: EXEC with only read commands should not be rejected when OOM
[ok]: EXEC with at least one use-memory command should fail
[7/58 done]: unit/multi (3 seconds)
Testing unit/acl
[ok]: Empty stream with no lastid can be rewrite into AOF correctly
=== (acl) Starting server 127.0.0.1:22614 ok
[ok]: BRPOP: with zero timeout should block indefinitely
[ok]: BRPOP: second argument is not a list
[ok]: XRANGE can be used to iterate the whole stream
[ok]: Connections start with the default user
[ok]: It is possible to create new users
[ok]: New users start disabled
[ok]: Enabling the user allows the login
[ok]: Only the set of correct passwords work
[ok]: It is possible to remove passwords from the set of valid ones
[ok]: Test password hashes can be added
[ok]: Test password hashes validate input
[ok]: ACL GETUSER returns the password hash instead of the actual password
[ok]: Test hashed passwords removal
[ok]: By default users are not able to access any command
[ok]: By default users are not able to access any key
[ok]: It's possible to allow the access of a subset of keys
[ok]: Users can be configured to authenticate with any password
[ok]: ACLs can exclude single commands
[ok]: ACLs can include or exclude whole classes of commands
[ok]: ACLs can include single subcommands
[ok]: ACL GETUSER is able to translate back command permissions
[ok]: ACL GETUSER provides reasonable results
[ok]: ACL #5998 regression: memory leaks adding / removing subcommands
[ok]: ACL LOG shows failed command executions at toplevel
[ok]: ACL LOG is able to test similar events
[ok]: ACL LOG is able to log keys access violations and key name
[ok]: ACL LOG RESET is able to flush the entries in the log
[ok]: ACL LOG can distinguish the transaction context (1)
[ok]: ACL LOG can distinguish the transaction context (2)
[ok]: ACL can log errors in the context of Lua scripting
[ok]: ACL LOG can accept a numerical argument to show less entries
[ok]: ACL LOG can log failed auth attempts
[ok]: ACL LOG entries are limited to a maximum amount
[ok]: When default user is off, new connections are not authenticated
[ok]: ACL HELP should not have unexpected options
[ok]: Delete a user that the client doesn't use
[ok]: Delete a user that the client is using
[ok]: BGSAVE
[ok]: SELECT an out of range DB
[ok]: Old Big Linked list: SORT BY key
[ok]: Old Big Linked list: SORT BY key with limit
[8/58 done]: unit/type/stream-cgroups (5 seconds)
Testing unit/latency-monitor
=== () Starting server 127.0.0.1:22615 ok
=== (latency-monitor) Starting server 127.0.0.1:28615 ok
[ok]: Alice: can excute all command
[ok]: Bob: just excute @set and acl command
[ok]: ACL load and save
[9/58 done]: unit/acl (1 seconds)
Testing integration/block-repl
=== (repl) Starting server 127.0.0.1:22616 ok
[ok]: ZRANGEBYSCORE fuzzy test, 100 ranges in 128 element sorted set - ziplist
=== () Starting server 127.0.0.1:22617 ok
[ok]: BRPOP: timeout
[ok]: BRPOP: arguments are empty
[ok]: BLPOP inside a transaction
[ok]: LPUSHX, RPUSHX - generic
[ok]: LPUSHX, RPUSHX - linkedlist
[ok]: LINSERT - linkedlist
[ok]: LPUSHX, RPUSHX - ziplist
[ok]: LINSERT - ziplist
[ok]: LINSERT raise error on bad syntax
[ok]: LINDEX consistency test - quicklist
[ok]: ZRANGEBYLEX fuzzy test, 100 ranges in 128 element sorted set - ziplist
[ok]: Old Big Linked list: SORT BY hash field
[ok]: LINDEX random access - quicklist
[ok]: Intset: SORT BY key
[ok]: EXPIRE precision is now the millisecond
[ok]: Intset: SORT BY key with limit
[ok]: Intset: SORT BY hash field
[ok]: Hash fuzzing #2 - 512 fields

Waiting for background AOF rewrite to finish... [ok]: Check if list is still ok after a DEBUG RELOAD - quicklist
[ok]: SET 10000 numeric keys and access all them in reverse order
[ok]: DBSIZE should be 10000 now
[ok]: SETNX target key missing
[ok]: SETNX target key exists
[ok]: SETNX against not-expired volatile key
[ok]: LINDEX consistency test - quicklist
[ok]: First server should have role slave after SLAVEOF
[ok]: Hash table: SORT BY key
[ok]: Hash table: SORT BY key with limit
[ok]: Hash table: SORT BY hash field
sub-second expire test attempts: 0
[ok]: PEXPIRE/PSETEX/PEXPIREAT can set sub-second expires
[ok]: TTL returns time to live in seconds
[ok]: PTTL returns time to live in milliseconds
[ok]: TTL / PTTL return -1 if key has no expire
[ok]: TTL / PTTL return -2 if key does not exit
[ok]: LINDEX random access - quicklist
[ok]: Check consistency of different data types after a reload

Waiting for background AOF rewrite to finish... [ok]: ZREMRANGEBYLEX fuzzy test, 100 ranges in 128 element sorted set - ziplist
[ok]: ZSETs skiplist implementation backlink consistency test - ziplist
[ok]: Check if list is still ok after a DEBUG RELOAD - quicklist
[ok]: LLEN against non-list value error
[ok]: LLEN against non existing key
[ok]: LINDEX against non-list value error
[ok]: LINDEX against non existing key
[ok]: LPUSH against non-list value error
[ok]: RPUSH against non-list value error
[ok]: RPOPLPUSH base case - linkedlist
[ok]: LMOVE left left base case - linkedlist
[ok]: LMOVE left right base case - linkedlist
[ok]: LMOVE right left base case - linkedlist
[ok]: LMOVE right right base case - linkedlist
[ok]: RPOPLPUSH with the same list as src and dst - linkedlist
[ok]: LMOVE left left with the same list as src and dst - linkedlist
[ok]: LMOVE left right with the same list as src and dst - linkedlist
[ok]: LMOVE right left with the same list as src and dst - linkedlist
[ok]: LMOVE right right with the same list as src and dst - linkedlist
[ok]: RPOPLPUSH with linkedlist source and existing target linkedlist
[ok]: LMOVE left left with linkedlist source and existing target linkedlist
[ok]: LMOVE left right with linkedlist source and existing target linkedlist
[ok]: LMOVE right left with linkedlist source and existing target linkedlist
[ok]: LMOVE right right with linkedlist source and existing target linkedlist
[ok]: RPOPLPUSH with linkedlist source and existing target ziplist
[ok]: LMOVE left left with linkedlist source and existing target ziplist
[ok]: LMOVE left right with linkedlist source and existing target ziplist
[ok]: LMOVE right left with linkedlist source and existing target ziplist
[ok]: LMOVE right right with linkedlist source and existing target ziplist
[ok]: RPOPLPUSH base case - ziplist
[ok]: LMOVE left left base case - ziplist
[ok]: LMOVE left right base case - ziplist
[ok]: LMOVE right left base case - ziplist
[ok]: LMOVE right right base case - ziplist
[ok]: RPOPLPUSH with the same list as src and dst - ziplist
[ok]: LMOVE left left with the same list as src and dst - ziplist
[ok]: LMOVE left right with the same list as src and dst - ziplist
[ok]: LMOVE right left with the same list as src and dst - ziplist
[ok]: LMOVE right right with the same list as src and dst - ziplist
[ok]: RPOPLPUSH with ziplist source and existing target linkedlist
[ok]: LMOVE left left with ziplist source and existing target linkedlist
[ok]: LMOVE left right with ziplist source and existing target linkedlist
[ok]: LMOVE right left with ziplist source and existing target linkedlist
[ok]: LMOVE right right with ziplist source and existing target linkedlist
[ok]: RPOPLPUSH with ziplist source and existing target ziplist
[ok]: LMOVE left left with ziplist source and existing target ziplist
[ok]: LMOVE left right with ziplist source and existing target ziplist
[ok]: LMOVE right left with ziplist source and existing target ziplist
[ok]: LMOVE right right with ziplist source and existing target ziplist
[ok]: RPOPLPUSH against non existing key
[ok]: RPOPLPUSH against non list src key
[ok]: RPOPLPUSH against non list dst key
[ok]: RPOPLPUSH against non existing src key
[ok]: Basic LPOP/RPOP - linkedlist
[ok]: Basic LPOP/RPOP - ziplist
[ok]: LPOP/RPOP against non list value
[ok]: Mass RPOP/LPOP - quicklist
[ok]: Mass RPOP/LPOP - quicklist
[ok]: LRANGE basics - linkedlist
[ok]: LRANGE inverted indexes - linkedlist
[ok]: LRANGE out of range indexes including the full list - linkedlist
[ok]: LRANGE out of range negative end index - linkedlist
[ok]: LRANGE basics - ziplist
[ok]: LRANGE inverted indexes - ziplist
[ok]: LRANGE out of range indexes including the full list - ziplist
[ok]: LRANGE out of range negative end index - ziplist
[ok]: LRANGE against non existing key
[ok]: LTRIM basics - linkedlist
[ok]: LTRIM out of range negative end index - linkedlist
[ok]: LTRIM basics - ziplist
[ok]: LTRIM out of range negative end index - ziplist
[ok]: LSET - linkedlist
[ok]: LSET out of range index - linkedlist
[ok]: LSET - ziplist
[ok]: LSET out of range index - ziplist
[ok]: LSET against non existing key
[ok]: LSET against non list value
[ok]: LREM remove all the occurrences - linkedlist
[ok]: LREM remove the first occurrence - linkedlist
[ok]: LREM remove non existing element - linkedlist
[ok]: LREM starting from tail with negative count - linkedlist
[ok]: LREM starting from tail with negative count (2) - linkedlist
[ok]: LREM deleting objects that may be int encoded - linkedlist
[ok]: LREM remove all the occurrences - ziplist
[ok]: LREM remove the first occurrence - ziplist
[ok]: LREM remove non existing element - ziplist
[ok]: LREM starting from tail with negative count - ziplist
[ok]: LREM starting from tail with negative count (2) - ziplist
[ok]: LREM deleting objects that may be int encoded - ziplist
[ok]: Redis should actively expire keys incrementally
[ok]: Same dataset digest if saving/reloading as AOF?

Waiting for background AOF rewrite to finish... [ok]: ZSETs ZRANK augmented skip list stress testing - ziplist
[ok]: BZPOPMIN, ZADD + DEL should not awake blocked client
[ok]: BZPOPMIN, ZADD + DEL + SET should not awake blocked client
[ok]: BZPOPMIN with same key multiple times should work
[ok]: MULTI/EXEC is isolated from the point of view of BZPOPMIN
[ok]: BZPOPMIN with variadic ZADD
[ok]: Test latency events logging
[ok]: LATENCY HISTORY output is ok
[ok]: LATENCY LATEST output is ok
[ok]: LATENCY HISTORY / RESET with wrong event name is fine
[ok]: LATENCY DOCTOR produces some output
[ok]: LATENCY RESET is able to reset events
[ok]: Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds
[ok]: client unblock tests
[ok]: Redis should lazy expire keys
[10/58 done]: unit/type/list (9 seconds)
Testing integration/replication
[ok]: EXPIRES after a reload (snapshot + append only file rewrite)

Waiting for background AOF rewrite to finish... === (repl) Starting server 127.0.0.1:25112 ok
[ok]: Stress test the hash ziplist -> hashtable encoding conversion
[ok]: Test HINCRBYFLOAT for correct float representation (issue #2846)
=== () Starting server 127.0.0.1:25113 ok
[ok]: XREVRANGE returns the reverse of XRANGE
[ok]: XREAD with non empty stream
[ok]: Non blocking XREAD with empty streams
[ok]: XREAD with non empty second stream
[ok]: Slave enters handshake
[ok]: Blocking XREAD waiting new data
[ok]: Blocking XREAD waiting old data
[ok]: Blocking XREAD will not reply with an empty array
[ok]: XREAD: XADD + DEL should not awake client
[ok]: XREAD: XADD + DEL + LPUSH should not awake client
[ok]: XREAD with same stream name multiple times should work
[ok]: XREAD + multiple XADD inside transaction
[ok]: XDEL basic test
[ok]: BZPOPMIN with zero timeout should block indefinitely
[ok]: ZSCORE - skiplist
[ok]: ZMSCORE - skiplist
[ok]: ZSCORE after a DEBUG RELOAD - skiplist
[11/58 done]: unit/type/hash (9 seconds)
Testing integration/replication-2
[ok]: ZSET sorting stresser - skiplist
=== (repl) Starting server 127.0.0.1:27612 ok
=== () Starting server 127.0.0.1:27613 ok
[ok]: SDIFF fuzzing
[ok]: SINTER against non-set should throw error
[ok]: SUNION against non-set should throw error
[ok]: SINTER should handle non existing key as empty
[ok]: SINTER with same integer elements but different encoding
[ok]: SINTERSTORE against non existing keys should delete dstkey
[ok]: SUNIONSTORE against non existing keys should delete dstkey
[ok]: SPOP basics - hashtable
[ok]: SPOP with =1 - hashtable
[ok]: SRANDMEMBER - hashtable
[ok]: SPOP basics - intset
[ok]: SPOP with =1 - intset
[ok]: SRANDMEMBER - intset
[ok]: SPOP with 
[ok]: SPOP with 
[ok]: SPOP using integers, testing Knuth's and Floyd's algorithm
[ok]: SPOP using integers with Knuth's algorithm
[ok]: SPOP new implementation: code path #1
[ok]: SPOP new implementation: code path #2
[ok]: SPOP new implementation: code path #3
[ok]: SRANDMEMBER with  against non existing key
[ok]: SRANDMEMBER with  - hashtable
[ok]: SRANDMEMBER with  - intset
[ok]: SMOVE basics - from regular set to intset
[ok]: SMOVE basics - from intset to regular set
[ok]: SMOVE non existing key
[ok]: SMOVE non existing src set
[ok]: SMOVE from regular set to non existing destination set
[ok]: SMOVE from intset to non existing destination set
[ok]: SMOVE wrong src key type
[ok]: SMOVE wrong dst key type
[ok]: SMOVE with identical source and destination
[ok]: EXPIRE should not resurrect keys (issue #1026)
[ok]: 5 keys in, 5 keys out
[ok]: EXPIRE with empty string as TTL should report an error
[ok]: First server should have role slave after SLAVEOF
[ok]: If min-slaves-to-write is honored, write is accepted
[ok]: No write if min-slaves-to-write is < attached slaves
[ok]: If min-slaves-to-write is honored, write is accepted (again)
[ok]: Big Hash table: SORT BY key
[ok]: Big Hash table: SORT BY key with limit
[ok]: SETNX against expired volatile key
[ok]: MGET
[ok]: MGET against non existing key
[ok]: MGET against non-string key
[ok]: GETSET (set new value)
[ok]: GETSET (replace old value)
[ok]: MSET base case
[ok]: MSET wrong number of args
[ok]: MSETNX with already existent key
[ok]: MSETNX with not existing keys
[ok]: STRLEN against non-existing key
[ok]: STRLEN against integer-encoded value
[ok]: STRLEN against plain string
[ok]: SETBIT against non-existing key
[ok]: SETBIT against string-encoded key
[ok]: SETBIT against integer-encoded key
[ok]: SETBIT against key with wrong type
[ok]: SETBIT with out of range bit offset
[ok]: SETBIT with non-bit argument
[ok]: SCAN regression test for issue #4906
[12/58 done]: unit/scan (10 seconds)
Testing integration/replication-3
=== (repl) Starting server 127.0.0.1:23612 ok
[ok]: Big Hash table: SORT BY hash field
[ok]: SORT GET #
[ok]: SORT GET 
[ok]: SORT GET (key and hash) with sanity check
[ok]: SORT BY key STORE
[ok]: SORT BY hash field STORE
[ok]: SORT extracts STORE correctly
[ok]: SORT extracts multiple STORE correctly
[ok]: SORT DESC
[ok]: SORT ALPHA against integer encoded strings
[ok]: SORT sorted set
[ok]: SORT sorted set BY nosort should retain ordering
[ok]: SORT sorted set BY nosort + LIMIT
[ok]: SORT sorted set BY nosort works as expected from scripts
[ok]: SORT sorted set: +inf and -inf handling
[ok]: SORT regression for issue #19, sorting floats
[ok]: SORT with STORE returns zero if result is empty (github issue 224)
[ok]: SORT with STORE does not create empty lists (github issue 224)
[ok]: SORT with STORE removes key if result is empty (github issue 227)
[ok]: SORT with BY  and STORE should still order output
[ok]: SORT will complain with numerical sorting and bad doubles (1)
[ok]: SORT will complain with numerical sorting and bad doubles (2)
[ok]: SORT BY sub-sorts lexicographically if score is the same
[ok]: SORT GET with pattern ending with just -> does not get hash field
[ok]: SORT by nosort retains native order for lists
[ok]: SORT by nosort plus store retains native order for lists
[ok]: SORT by nosort with limit returns based on original list order
=== () Starting server 127.0.0.1:23613 ok

  Average time to sort: 0.23000000000000001 milliseconds [ok]: SORT speed, 100 element list BY key, 100 times

  Average time to sort: 0.27000000000000002 milliseconds [ok]: SORT speed, 100 element list BY hash field, 100 times

  Average time to sort: 0.19 milliseconds [ok]: SORT speed, 100 element list directly, 100 times
[ok]: SETBIT fuzzing
[ok]: GETBIT against non-existing key
[ok]: GETBIT against string-encoded key
[ok]: GETBIT against integer-encoded key
[ok]: SETRANGE against non-existing key

  Average time to sort: 0.19 milliseconds [ok]: SORT speed, 100 element list BY , 100 times
[ok]: SETRANGE against string-encoded key
[ok]: SETRANGE against integer-encoded key
[ok]: SETRANGE against key with wrong type
[ok]: SETRANGE with out of range offset
[ok]: GETRANGE against non-existing key
[ok]: GETRANGE against string value
[ok]: GETRANGE against integer-encoded value
[ok]: First server should have role slave after SLAVEOF
[13/58 done]: unit/sort (10 seconds)
Testing integration/replication-4
=== (repl) Starting server 127.0.0.1:24613 ok
=== () Starting server 127.0.0.1:24614 ok
[ok]: XDEL fuzz test
[ok]: EXPIRES after AOF reload (without rewrite)
[ok]: ZRANGEBYSCORE fuzzy test, 100 ranges in 100 element sorted set - skiplist
[ok]: First server should have role slave after SLAVEOF
[ok]: Stress tester for #3343-alike bugs
[ok]: ZRANGEBYLEX fuzzy test, 100 ranges in 100 element sorted set - skiplist
[ok]: intsets implementation stress testing
[14/58 done]: unit/type/set (13 seconds)
Testing integration/replication-psync
=== (repl) Starting server 127.0.0.1:26612 ok
[ok]: LTRIM stress testing - linkedlist
=== () Starting server 127.0.0.1:26613 ok
[ok]: Slave should be able to synchronize with the master
[ok]: ZREMRANGEBYLEX fuzzy test, 100 ranges in 100 element sorted set - skiplist
[ok]: ZSETs skiplist implementation backlink consistency test - skiplist
[ok]: GETRANGE fuzzing
[ok]: Extended SET can detect syntax errors
[ok]: Extended SET NX option
[ok]: Extended SET XX option
[ok]: Extended SET GET option
[ok]: Extended SET GET option with no previous value
[ok]: Extended SET GET with NX option should result in syntax err
[ok]: Extended SET EX option
[ok]: Extended SET PX option
[ok]: Extended SET using multiple options at once
[ok]: GETRANGE with huge ranges, Github issue #1844
[ok]: STRALGO LCS string output with STRINGS option
[ok]: STRALGO LCS len
[ok]: LCS with KEYS option
[ok]: LCS indexes
[ok]: LCS indexes with match len
[ok]: LCS indexes with match len and minimum match len
[15/58 done]: unit/type/string (13 seconds)
Testing integration/aof
=== () Starting server 127.0.0.1:24112 
[ok]: Unfinished MULTI: Server should start if load-truncated is yes
=== () Starting server 127.0.0.1:24113 
[ok]: SET - use EX/PX option, TTL should not be reseted after loadaof
[ok]: SET command will remove expire
[ok]: SET - use KEEPTTL option, TTL should not be removed
[ok]: Short read: Server should start if load-truncated is yes
[ok]: Truncated AOF loaded: we expect foo to be equal to 5
[ok]: Append a new command after loading an incomplete AOF
[ok]: Detect write load to master
=== () Starting server 127.0.0.1:24114 
[ok]: Short read + command: Server should start
[ok]: Truncated AOF loaded: we expect foo to be equal to 6 now
[ok]: Test replication partial resync: no reconnection, just sync (diskless: no, disabled, reconnect: 0)
=== () Starting server 127.0.0.1:24115 
[ok]: Bad format: Server should have logged an error
=== () Starting server 127.0.0.1:24116 
[ok]: Unfinished MULTI: Server should have logged an error
=== () Starting server 127.0.0.1:24117 
[ok]: Short read: Server should have logged an error
=== (repl) Starting server 127.0.0.1:26614 ok
[ok]: Short read: Utility should confirm the AOF is not valid
[ok]: Short read: Utility should be able to fix the AOF
[ok]: ZSETs ZRANK augmented skip list stress testing - skiplist
[ok]: BZPOPMIN, ZADD + DEL should not awake blocked client
[ok]: BZPOPMIN, ZADD + DEL + SET should not awake blocked client
[ok]: BZPOPMIN with same key multiple times should work
[ok]: MULTI/EXEC is isolated from the point of view of BZPOPMIN
[ok]: BZPOPMIN with variadic ZADD
=== () Starting server 127.0.0.1:24118 
[ok]: Fixed AOF: Server should have been started
[ok]: Fixed AOF: Keyspace should contain values that were parseable
=== () Starting server 127.0.0.1:26615 ok
=== () Starting server 127.0.0.1:24119 
[ok]: AOF+SPOP: Server should have been started
[ok]: AOF+SPOP: Set should have 1 member
[ok]: Slave should be able to synchronize with the master
[ok]: PIPELINING stresser (also a regression for the old epoll bug)
[ok]: APPEND basics
[ok]: APPEND basics, integer encoded values
=== () Starting server 127.0.0.1:24120 
[ok]: AOF+SPOP: Server should have been started
[ok]: AOF+SPOP: Set should have 1 member
=== () Starting server 127.0.0.1:24121 
[ok]: AOF+EXPIRE: Server should have been started
[ok]: AOF+EXPIRE: List should be empty
[ok]: ziplist implementation: value encoding and backlink
[ok]: MIGRATE cached connections are released after some time
=== (repl) Starting server 127.0.0.1:21613 ok
[ok]: SET - use KEEPTTL option, TTL should not be removed after loadaof
[ok]: No write if min-slaves-max-lag is > of the slave lag
[ok]: min-slaves-to-write is ignored by slaves
[ok]: BZPOPMIN with zero timeout should block indefinitely
[ok]: APPEND fuzzing
=== () Starting server 127.0.0.1:24122 ok
[ok]: Redis should not try to convert DEL into EXPIREAT for EXPIRE -1
[16/58 done]: unit/expire (15 seconds)
Testing integration/rdb
[ok]: MIGRATE is able to migrate a key between two instances
=== (repl) Starting server 127.0.0.1:21614 ok
[ok]: Detect write load to master
[ok]: FLUSHDB
[ok]: Perform a final SAVE to leave a clean DB on disk
[ok]: MIGRATE is able to copy a key between two instances
=== () Starting server 127.0.0.1:21113 ok
[ok]: RDB encoding loading test
=== (repl) Starting server 127.0.0.1:21615 ok
[17/58 done]: unit/other (15 seconds)
Testing integration/convert-zipmap-hash-on-load
=== () Starting server 127.0.0.1:24123 ok
[ok]: MIGRATE will not overwrite existing keys, unless REPLACE is used
=== () Starting server 127.0.0.1:22114 ok
=== () Starting server 127.0.0.1:21114 ok
=== (repl) Starting server 127.0.0.1:21616 ok
[ok]: RDB load zipmap hash: converts to ziplist
[ok]: Server started empty with non-existing RDB file
[ok]: MIGRATE propagates TTL correctly
=== () Starting server 127.0.0.1:22115 ok
[ok]: RDB load zipmap hash: converts to hash table when hash-max-ziplist-entries is exceeded
=== () Starting server 127.0.0.1:21115 ok
[ok]: Server started empty with empty RDB file
=== () Starting server 127.0.0.1:22116 ok
[ok]: RDB load zipmap hash: converts to hash table when hash-max-ziplist-value is exceeded
=== () Starting server 127.0.0.1:21116 ok
[18/58 done]: integration/convert-zipmap-hash-on-load (1 seconds)
Testing integration/logging
[ok]: Test RDB stream encoding
[ok]: ZSET skiplist order consistency when elements are moved
=== () Starting server 127.0.0.1:22117 ok
[19/58 done]: unit/type/zset (18 seconds)
Testing integration/psync2
=== () Starting server 127.0.0.1:21117 
[ok]: Server should not start if RDB file can't be open
=== () Starting server 127.0.0.1:21118 
[ok]: Server is able to generate a stack trace on selected systems
[ok]: Server should not start if RDB is corrupted
=== (psync2) Starting server 127.0.0.1:27112 ok
=== () Starting server 127.0.0.1:21119 ok
=== () Starting server 127.0.0.1:27113 ok
[20/58 done]: integration/logging (1 seconds)
Testing integration/psync2-reg
=== (psync2) Starting server 127.0.0.1:22118 ok
[ok]: Test FLUSHALL aborts bgsave
=== () Starting server 127.0.0.1:27114 ok
[ok]: bgsave resets the change counter
=== () Starting server 127.0.0.1:27115 ok
=== () Starting server 127.0.0.1:22119 ok
=== () Starting server 127.0.0.1:27116 ok
[ok]: PSYNC2: --- CYCLE 1 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #1 as master
[ok]: PSYNC2: Set #4 to replicate from #1
[ok]: PSYNC2: Set #0 to replicate from #1
[ok]: PSYNC2: Set #3 to replicate from #1
[ok]: PSYNC2: Set #2 to replicate from #4
[ok]: Test replication with parallel clients writing in different DBs
=== () Starting server 127.0.0.1:22120 ok
=== () Starting server 127.0.0.1:21120 ok
=== (repl) Starting server 127.0.0.1:24615 ok
=== () Starting server 127.0.0.1:24616 ok
[ok]: Slave is able to detect timeout during handshake
[ok]: First server should have role slave after SLAVEOF
[ok]: With min-slaves-to-write (1,3): master should be writable
[ok]: With min-slaves-to-write (2,3): master should not be writable
[ok]: PSYNC2: cluster is consistent after failover
[ok]: PSYNC2 #3899 regression: setup
[ok]: PSYNC2 #3899 regression: kill first replica
=== (repl) Starting server 127.0.0.1:25114 ok
=== () Starting server 127.0.0.1:25115 ok
[ok]: Set instance A as slave of B
[ok]: LATENCY of expire events are correctly collected
[ok]: LATENCY HELP should not have unexpected options
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: PSYNC2 #3899 regression: kill chained replica
[21/58 done]: unit/latency-monitor (16 seconds)
Testing integration/psync2-pingoff
=== (psync2) Starting server 127.0.0.1:28616 ok
=== () Starting server 127.0.0.1:28617 ok
[ok]: INCRBYFLOAT replication, should not remove expire
[ok]: BRPOPLPUSH replication, when blocking against empty list
[ok]: PSYNC2 pingoff: setup
[ok]: PSYNC2 pingoff: write and wait replication
[ok]: BRPOPLPUSH replication, list exists
[ok]: BLMOVE (left, left) replication, when blocking against empty list
[ok]: AOF fsync always barrier issue
[22/58 done]: integration/aof (9 seconds)
Testing integration/redis-cli
=== (cli) Starting server 127.0.0.1:24124 ok
[ok]: Interactive CLI: INFO response should be printed raw
[ok]: Interactive CLI: Status reply
[ok]: Interactive CLI: Integer reply
[ok]: Interactive CLI: Bulk reply
[ok]: Interactive CLI: Multi-bulk reply
[ok]: Interactive CLI: Parsing quotes
[ok]: Non-interactive TTY CLI: Status reply
[ok]: Non-interactive TTY CLI: Integer reply
[ok]: Non-interactive TTY CLI: Bulk reply
[ok]: Non-interactive TTY CLI: Multi-bulk reply
[ok]: Non-interactive TTY CLI: Read last argument from pipe
[ok]: Non-interactive TTY CLI: Read last argument from file
[ok]: Non-interactive non-TTY CLI: Status reply
[ok]: Non-interactive non-TTY CLI: Integer reply
[ok]: Non-interactive non-TTY CLI: Bulk reply
[ok]: Non-interactive non-TTY CLI: Multi-bulk reply
[ok]: Non-interactive non-TTY CLI: Read last argument from pipe
[ok]: Non-interactive non-TTY CLI: Read last argument from file
[ok]: BLMOVE (left, left) replication, list exists
[ok]: BLMOVE (left, right) replication, when blocking against empty list
[ok]: With min-slaves-to-write: master not writable with lagged slave
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: client freed during loading
[23/58 done]: integration/rdb (7 seconds)
Testing unit/pubsub
[ok]: MASTER and SLAVE consistency with expire
=== (pubsub) Starting server 127.0.0.1:21121 ok
[ok]: Test replication partial resync: ok psync (diskless: no, disabled, reconnect: 1)
[ok]: Pub/Sub PING
[ok]: PUBLISH/SUBSCRIBE basics
[ok]: PUBLISH/SUBSCRIBE with two clients
[ok]: PUBLISH/SUBSCRIBE after UNSUBSCRIBE without arguments
[ok]: SUBSCRIBE to one channel more than once
[ok]: UNSUBSCRIBE from non-subscribed channels
[ok]: PUBLISH/PSUBSCRIBE basics
[ok]: PUBLISH/PSUBSCRIBE with two clients
[ok]: PUBLISH/PSUBSCRIBE after PUNSUBSCRIBE without arguments
[ok]: PUNSUBSCRIBE from non-subscribed channels
[ok]: NUMSUB returns numbers, not strings (#1561)
[ok]: Mix SUBSCRIBE and PSUBSCRIBE
[ok]: PUNSUBSCRIBE and UNSUBSCRIBE should always reply
[ok]: Keyspace notifications: we receive keyspace notifications
[ok]: Keyspace notifications: we receive keyevent notifications
[ok]: Keyspace notifications: we can receive both kind of events
[ok]: Keyspace notifications: we are able to mask events
[ok]: Keyspace notifications: general events test
[ok]: Keyspace notifications: list events test
[ok]: Keyspace notifications: set events test
[ok]: Keyspace notifications: zset events test
[ok]: Keyspace notifications: hash events test
=== (repl) Starting server 127.0.0.1:24617 ok
[ok]: PSYNC2 #3899 regression: kill first replica
=== () Starting server 127.0.0.1:24618 ok
[ok]: Keyspace notifications: expired events (triggered expire)
[ok]: First server should have role slave after SLAVEOF
[ok]: BLMOVE (left, right) replication, list exists
[ok]: BLMOVE (right, left) replication, when blocking against empty list
[ok]: Keyspace notifications: expired events (background expire)
[ok]: Keyspace notifications: evicted events
[ok]: Keyspace notifications: test CONFIG GET/SET of event flags
[ok]: AOF rewrite during write load: RDB preamble=yes
=== (repl) Starting server 127.0.0.1:26616 ok
[ok]: PSYNC2 #3899 regression: kill chained replica
=== () Starting server 127.0.0.1:26617 ok
[24/58 done]: unit/pubsub (1 seconds)
Testing unit/slowlog
[ok]: Slave should be able to synchronize with the master
=== (slowlog) Starting server 127.0.0.1:21122 ok
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 21962)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 2 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #3 as master
[ok]: PSYNC2: Set #0 to replicate from #3
[ok]: PSYNC2: Set #1 to replicate from #0
[ok]: PSYNC2: Set #2 to replicate from #0
[ok]: PSYNC2: Set #4 to replicate from #2
[ok]: PSYNC2: cluster is consistent after failover
[ok]: SLOWLOG - check that it starts with an empty log
[ok]: SLOWLOG - only logs commands taking more time than specified
[ok]: SLOWLOG - max entries is correctly handled
[ok]: SLOWLOG - GET optional argument to limit output len works
[ok]: SLOWLOG - RESET subcommand works
[ok]: SLOWLOG - logged entry sanity check
[ok]: SLOWLOG - commands with too many arguments are trimmed
[ok]: SLOWLOG - too long arguments are trimmed
[ok]: BLMOVE (right, left) replication, list exists
[ok]: BLMOVE (right, right) replication, when blocking against empty list
[ok]: SLOWLOG - EXEC is not logged, just executed commands
[ok]: SLOWLOG - can clean older entires
[ok]: PSYNC2 #3899 regression: kill first replica
[ok]: Detect write load to master
[ok]: SLOWLOG - can be disabled
[25/58 done]: unit/slowlog (2 seconds)
Testing unit/scripting
=== (scripting) Starting server 127.0.0.1:21123 ok
[ok]: EVAL - Does Lua interpreter replies to our requests?
[ok]: EVAL - Lua integer -> Redis protocol type conversion
[ok]: EVAL - Lua string -> Redis protocol type conversion
[ok]: EVAL - Lua true boolean -> Redis protocol type conversion
[ok]: EVAL - Lua false boolean -> Redis protocol type conversion
[ok]: EVAL - Lua status code reply -> Redis protocol type conversion
[ok]: EVAL - Lua error reply -> Redis protocol type conversion
[ok]: EVAL - Lua table -> Redis protocol type conversion
[ok]: EVAL - Are the KEYS and ARGV arrays populated correctly?
[ok]: EVAL - is Lua able to call Redis API?
[ok]: EVALSHA - Can we call a SHA1 if already defined?
[ok]: EVALSHA - Can we call a SHA1 in uppercase?
[ok]: EVALSHA - Do we get an error on invalid SHA1?
[ok]: EVALSHA - Do we get an error on non defined SHA1?
[ok]: EVAL - Redis integer -> Lua type conversion
[ok]: EVAL - Redis bulk -> Lua type conversion
[ok]: EVAL - Redis multi bulk -> Lua type conversion
[ok]: EVAL - Redis status reply -> Lua type conversion
[ok]: EVAL - Redis error reply -> Lua type conversion
[ok]: EVAL - Redis nil bulk reply -> Lua type conversion
[ok]: EVAL - Is the Lua client using the currently selected DB?
[ok]: EVAL - SELECT inside Lua should not affect the caller
[ok]: EVAL - Scripts can't run certain commands
[ok]: EVAL - Scripts can't run XREAD and XREADGROUP with BLOCK option
[ok]: EVAL - Scripts can't run certain commands
[ok]: EVAL - No arguments to redis.call/pcall is considered an error
[ok]: EVAL - redis.call variant raises a Lua error on Redis cmd error (1)
[ok]: EVAL - redis.call variant raises a Lua error on Redis cmd error (1)
[ok]: EVAL - redis.call variant raises a Lua error on Redis cmd error (1)
[ok]: EVAL - JSON numeric decoding
[ok]: EVAL - JSON string decoding
[ok]: EVAL - cmsgpack can pack double?
[ok]: EVAL - cmsgpack can pack negative int64?
[ok]: EVAL - cmsgpack can pack and unpack circular references?
[ok]: EVAL - Numerical sanity check from bitop
[ok]: EVAL - Verify minimal bitop functionality
[ok]: EVAL - Able to parse trailing comments
[ok]: SCRIPTING FLUSH - is able to clear the scripts cache?
[ok]: SCRIPT EXISTS - can detect already defined scripts?
[ok]: SCRIPT LOAD - is able to register scripts in the scripting cache
[ok]: In the context of Lua the output of random commands gets ordered
[ok]: SORT is normally not alpha re-ordered for the scripting engine
[ok]: SORT BY  output gets ordered for scripting
[ok]: SORT BY  with GET gets ordered for scripting
[ok]: redis.sha1hex() implementation
[ok]: Globals protection reading an undeclared global variable
[ok]: Globals protection setting an undeclared global*
[ok]: Test an example script DECR_IF_GT
[ok]: Scripting engine resets PRNG at every script execution
[ok]: Scripting engine PRNG can be seeded correctly
[ok]: BLMOVE (right, right) replication, list exists
[ok]: BLPOP followed by role change, issue #2473
=== (repl) Starting server 127.0.0.1:25116 ok
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: PSYNC2 pingoff: pause replica and promote it
=== () Starting server 127.0.0.1:25117 ok
[ok]: Second server should have role master at first
[ok]: SLAVEOF should start with link status "down"
[ok]: The role should immediately be changed to "replica"
[ok]: Sync should have transferred keys from master
[ok]: The link status should be up
[ok]: SET on the master should immediately propagate
[ok]: FLUSHALL should replicate
[ok]: ROLE in master reports master with a slave
[ok]: ROLE in slave reports slave in connected state

Waiting for background AOF rewrite to finish... [ok]: PSYNC2 #3899 regression: kill first replica
[ok]: EVAL does not leak in the Lua stack
=== (repl) Starting server 127.0.0.1:25118 ok
[ok]: EVAL processes writes from AOF in read-only slaves
[ok]: Make the old master a replica of the new one and check conditions
=== (repl) Starting server 127.0.0.1:21617 ok
=== () Starting server 127.0.0.1:25119 ok
=== () Starting server 127.0.0.1:25120 ok
[ok]: MIGRATE can correctly transfer large values
=== () Starting server 127.0.0.1:25121 ok

Waiting for background AOF rewrite to finish... === (repl) Starting server 127.0.0.1:21618 ok
=== (psync2) Starting server 127.0.0.1:28618 ok
[ok]: MIGRATE can correctly transfer hashes
=== (repl) Starting server 127.0.0.1:21619 ok
=== () Starting server 127.0.0.1:28619 ok
=== () Starting server 127.0.0.1:28620 ok
=== () Starting server 127.0.0.1:28621 ok
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 45938)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 3 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #4 as master
[ok]: PSYNC2: Set #2 to replicate from #4
[ok]: PSYNC2: Set #3 to replicate from #2
[ok]: PSYNC2: Set #0 to replicate from #3
[ok]: PSYNC2: Set #1 to replicate from #0
[ok]: PSYNC2: cluster is consistent after failover
=== () Starting server 127.0.0.1:28622 ok
[ok]: Slave is able to evict keys created in writable slaves
[ok]: MIGRATE timeout actually works
=== (repl) Starting server 127.0.0.1:21620 ok
[ok]: MIGRATE can migrate multiple keys at once
[ok]: MIGRATE with multiple keys must have empty key arg
=== (repl) Starting server 127.0.0.1:21621 ok
=== (repl) Starting server 127.0.0.1:23614 ok
[ok]: MIGRATE with multiple keys migrate just existing ones
=== () Starting server 127.0.0.1:23615 ok
=== (repl) Starting server 127.0.0.1:21622 ok
[ok]: First server should have role slave after SLAVEOF
[ok]: LTRIM stress testing - ziplist
[ok]: MIGRATE with multiple keys: stress command rewriting
=== (repl) Starting server 127.0.0.1:21623 ok
[26/58 done]: unit/type/list-2 (31 seconds)
Testing unit/maxmemory
=== (maxmemory) Starting server 127.0.0.1:25612 ok
[ok]: Without maxmemory small integers are shared
[ok]: With maxmemory and non-LRU policy integers are still shared
[ok]: With maxmemory and LRU policy integers are not shared
[ok]: MIGRATE with multiple keys: delete just ack keys
=== (repl) Starting server 127.0.0.1:21624 ok
[ok]: MIGRATE AUTH: correct and wrong password cases
[ok]: PSYNC2 #3899 regression: kill chained replica
[27/58 done]: unit/dump (32 seconds)
Testing unit/introspection
[ok]: Test replication with blocking lists and sorted sets operations
=== (introspection) Starting server 127.0.0.1:21625 ok
[ok]: CLIENT LIST
[ok]: MONITOR can log executed commands
[ok]: MONITOR can log commands issued by the scripting engine
[ok]: CLIENT GETNAME should return NIL if name is not assigned
[ok]: CLIENT LIST shows empty fields for unassigned names
[ok]: CLIENT SETNAME does not accept spaces
[ok]: CLIENT SETNAME can assign a name to this connection
[ok]: CLIENT SETNAME can change the name of an existing connection
[ok]: maxmemory - is the memory limit honoured? (policy allkeys-random)
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: After CLIENT SETNAME, connection can still be closed
[28/58 done]: integration/block-repl (27 seconds)
Testing unit/introspection-2
[ok]: Dumping an RDB
=== () Starting server 127.0.0.1:21626 ok
=== (introspection) Starting server 127.0.0.1:22618 ok
[ok]: PSYNC2 #3899 regression: kill first replica
=== () Starting server 127.0.0.1:21627 ok
[ok]: maxmemory - is the memory limit honoured? (policy allkeys-lru)
[ok]: CONFIG save params special case handled properly
[ok]: CONFIG sanity
[ok]: CONFIG REWRITE sanity
[ok]: test various edge cases of repl topology changes with missing pings at the end
[ok]: PSYNC2 #3899 regression: kill first replica
[29/58 done]: unit/introspection (1 seconds)
Testing unit/limits
=== (limits) Starting server 127.0.0.1:21628 ok
[ok]: PSYNC2 #3899 regression: kill first replica
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: maxmemory - is the memory limit honoured? (policy allkeys-lfu)
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: maxmemory - is the memory limit honoured? (policy volatile-lru)
[ok]: Connecting as a replica
=== (psync2) Starting server 127.0.0.1:28623 ok
script took 6342 milliseconds
[ok]: Check if maxclients works refusing connections
[ok]: Test replication partial resync: no backlog (diskless: no, disabled, reconnect: 1)
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 64554)
=== () Starting server 127.0.0.1:28624 ok
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 4 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #2 as master
[ok]: PSYNC2: Set #1 to replicate from #2
[ok]: PSYNC2: Set #4 to replicate from #1
[ok]: PSYNC2: Set #0 to replicate from #2
[ok]: PSYNC2: Set #3 to replicate from #1
[ok]: PSYNC2: cluster is consistent after failover
[30/58 done]: unit/limits (1 seconds)
Testing unit/obuf-limits
=== () Starting server 127.0.0.1:28625 ok
[ok]: maxmemory - is the memory limit honoured? (policy volatile-lfu)
=== (obuf-limits) Starting server 127.0.0.1:21629 ok
=== (repl) Starting server 127.0.0.1:26618 ok
=== () Starting server 127.0.0.1:26619 ok
[ok]: TTL and TYPYE do not alter the last access time of a key
[ok]: Slave should be able to synchronize with the master
[ok]: MASTER and SLAVE dataset should be identical after complex ops
[ok]: maxmemory - is the memory limit honoured? (policy volatile-random)
[ok]: PSYNC2 #3899 regression: kill chained replica
[31/58 done]: integration/replication-2 (27 seconds)
Testing unit/bitops
=== (bitops) Starting server 127.0.0.1:27614 ok
[ok]: BITCOUNT returns 0 against non existing key
[ok]: BITCOUNT returns 0 with out of range indexes
[ok]: BITCOUNT returns 0 with negative indexes where start > end
[ok]: BITCOUNT against test vector #1
[ok]: BITCOUNT against test vector #2
[ok]: BITCOUNT against test vector #3
[ok]: BITCOUNT against test vector #4
[ok]: BITCOUNT against test vector #5
[ok]: Detect write load to master
[ok]: maxmemory - is the memory limit honoured? (policy volatile-ttl)
[ok]: BITCOUNT fuzzing without start/end
[ok]: maxmemory - only allkeys-* should remove non-volatile keys (allkeys-random)
[ok]: Chained replicas disconnect when replica re-connect with the same master
[ok]: PSYNC2 #3899 regression: kill first replica
[ok]: TOUCH alters the last access time of a key
[ok]: TOUCH returns the number of existing keys specified
[ok]: command stats for GEOADD
[ok]: command stats for EXPIRE
[ok]: command stats for BRPOP
[ok]: command stats for MULTI
[ok]: command stats for scripts
[32/58 done]: integration/psync2-pingoff (17 seconds)
Testing unit/bitfield
[ok]: BITCOUNT fuzzing with start/end
[ok]: BITCOUNT with start, end
[ok]: BITCOUNT syntax error #1
[ok]: BITCOUNT regression test for github issue #582
[ok]: BITCOUNT misaligned prefix
[ok]: BITCOUNT misaligned prefix + full words + remainder
[ok]: BITOP NOT (empty string)
[ok]: BITOP NOT (known string)
[ok]: BITOP where dest and target are the same key
[ok]: BITOP AND|OR|XOR don't change the string with single input key
[ok]: BITOP missing key is considered a stream of zero
[ok]: BITOP shorter keys are zero-padded to the key with max length
=== (bitops) Starting server 127.0.0.1:28626 ok
[33/58 done]: unit/introspection-2 (6 seconds)
Testing unit/geo
[ok]: BITFIELD signed SET and GET basics
[ok]: BITFIELD unsigned SET and GET basics
[ok]: BITFIELD # form
[ok]: BITFIELD basic INCRBY form
[ok]: BITFIELD chaining of multiple commands
[ok]: BITFIELD unsigned overflow wrap
[ok]: BITFIELD unsigned overflow sat
[ok]: BITFIELD signed overflow wrap
[ok]: BITFIELD signed overflow sat
=== (geo) Starting server 127.0.0.1:22619 ok
[ok]: maxmemory - only allkeys-* should remove non-volatile keys (allkeys-lru)
[ok]: GEOADD create
[ok]: GEOADD update
[ok]: GEOADD invalid coordinates
[ok]: GEOADD multi add
[ok]: Check geoset values
[ok]: GEORADIUS simple (sorted)
[ok]: GEORADIUS withdist (sorted)
[ok]: GEORADIUS with COUNT
[ok]: GEORADIUS with COUNT but missing integer argument
[ok]: GEORADIUS with COUNT DESC
[ok]: GEORADIUS HUGE, issue #2767
[ok]: GEORADIUSBYMEMBER simple (sorted)
[ok]: GEORADIUSBYMEMBER withdist (sorted)
[ok]: GEOHASH is able to return geohash strings
[ok]: GEOPOS simple
[ok]: GEOPOS missing element
[ok]: GEODIST simple & unit
[ok]: GEODIST missing elements
[ok]: GEORADIUS STORE option: syntax error
[ok]: GEORANGE STORE option: incompatible options
[ok]: GEORANGE STORE option: plain usage
[ok]: GEORANGE STOREDIST option: plain usage
[ok]: GEORANGE STOREDIST option: COUNT ASC and DESC
[ok]: BITFIELD overflow detection fuzzing
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 84492)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 5 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #0 as master
[ok]: PSYNC2: Set #3 to replicate from #0
[ok]: PSYNC2: Set #1 to replicate from #3
[ok]: PSYNC2: Set #4 to replicate from #0
[ok]: PSYNC2: Set #2 to replicate from #1
[ok]: Piping raw protocol
[ok]: BITOP and fuzzing
[ok]: PSYNC2 #3899 regression: verify consistency
[ok]: maxmemory - only allkeys-* should remove non-volatile keys (volatile-lru)
[34/58 done]: integration/psync2-reg (22 seconds)
Testing unit/memefficiency
[ok]: BITFIELD overflow wrap fuzzing
[ok]: BITFIELD regression for #3221
[ok]: BITFIELD regression for #3564
=== (memefficiency) Starting server 127.0.0.1:22121 ok
[ok]: PSYNC2: cluster is consistent after failover
=== (repl) Starting server 127.0.0.1:28627 ok
=== () Starting server 127.0.0.1:28628 ok
[ok]: BITFIELD: setup slave
[ok]: BITFIELD: write on master, read on slave
[ok]: BITFIELD_RO fails when write option is used
[ok]: BITOP or fuzzing
loading took 6495 milliseconds
[ok]: EVAL timeout from AOF
[ok]: We can call scripts rewriting client->argv from Lua
[ok]: Call Redis command with many args from Lua (issue #1764)
[ok]: Number conversion precision test (issue #1118)
[ok]: String containing number precision test (regression of issue #1118)
[ok]: Verify negative arg count is error instead of crash (issue #1842)
[ok]: Correct handling of reused argv (issue #1939)
[ok]: Functions in the Redis namespace are able to report errors
[35/58 done]: integration/redis-cli (19 seconds)
Testing unit/hyperloglog
=== (hll) Starting server 127.0.0.1:24125 ok
=== (scripting) Starting server 127.0.0.1:21124 ok
[36/58 done]: unit/bitfield (3 seconds)
Testing unit/lazyfree
=== (lazyfree) Starting server 127.0.0.1:28629 ok
[ok]: maxmemory - only allkeys-* should remove non-volatile keys (volatile-random)
[ok]: Timedout read-only scripts can be killed by SCRIPT KILL
[ok]: Memory efficiency with values in range 32
[ok]: BITOP xor fuzzing
[ok]: Timedout script link is still usable after Lua returns
[ok]: BITOP NOT fuzzing
[ok]: BITOP with integer encoded source objects
[ok]: BITOP with non string source key
[ok]: BITOP with empty string after non empty string (issue #529)
[ok]: BITPOS bit=0 with empty key returns 0
[ok]: BITPOS bit=1 with empty key returns -1
[ok]: BITPOS bit=0 with string less than 1 word works
[ok]: BITPOS bit=1 with string less than 1 word works
[ok]: BITPOS bit=0 starting at unaligned address
[ok]: BITPOS bit=1 starting at unaligned address
[ok]: BITPOS bit=0 unaligned+full word+reminder
[ok]: BITPOS bit=1 unaligned+full word+reminder
[ok]: BITPOS bit=1 returns -1 if string is all 0 bits
[ok]: BITPOS bit=0 works with intervals
[ok]: BITPOS bit=1 works with intervals
[ok]: BITPOS bit=0 changes behavior if end is given
[ok]: Timedout scripts that modified data can't be killed by SCRIPT KILL
[ok]: SHUTDOWN NOSAVE can kill a timedout script anyway
[ok]: UNLINK can reclaim memory in background
[ok]: ziplist implementation: encoding stress testing
=== (scripting repl) Starting server 127.0.0.1:21125 ok
[ok]: BITPOS bit=1 fuzzy testing using SETBIT
=== () Starting server 127.0.0.1:21126 ok
[37/58 done]: unit/type/list-3 (43 seconds)
Testing unit/wait
[ok]: Before the replica connects we issue two EVAL commands (scripts replication)
=== (wait) Starting server 127.0.0.1:26112 ok
[ok]: maxmemory - only allkeys-* should remove non-volatile keys (volatile-ttl)
=== () Starting server 127.0.0.1:26113 ok
[ok]: Connect a replica to the master instance (scripts replication)
[ok]: Now use EVALSHA against the master, with both SHAs (scripts replication)
[ok]: If EVALSHA was replicated as EVAL, 'x' should be '4' (scripts replication)
[ok]: Replication of script multiple pushes to list with BLPOP (scripts replication)
[ok]: EVALSHA replication when first call is readonly (scripts replication)
[ok]: Lua scripts using SELECT are replicated correctly (scripts replication)
[ok]: BITPOS bit=0 fuzzy testing using SETBIT
[ok]: Setup slave
[ok]: WAIT should acknowledge 1 additional copy of the data
[38/58 done]: unit/bitops (7 seconds)
Testing unit/pendingquerybuf
=== (wait) Starting server 127.0.0.1:27615 ok
=== () Starting server 127.0.0.1:27616 ok
[ok]: FLUSHDB ASYNC can reclaim memory in background
=== (scripting repl) Starting server 127.0.0.1:21127 ok
[39/58 done]: unit/lazyfree (3 seconds)
Testing unit/tls
[ok]: Memory efficiency with values in range 64
=== (tls) Starting server 127.0.0.1:28630 ok
[ok]: maxmemory - policy volatile-lru should only remove volatile keys.
=== () Starting server 127.0.0.1:21128 ok
[ok]: Before the replica connects we issue two EVAL commands (commands replication)
[ok]: Connect a replica to the master instance (commands replication)
[ok]: Now use EVALSHA against the master, with both SHAs (commands replication)
[ok]: If EVALSHA was replicated as EVAL, 'x' should be '4' (commands replication)
[ok]: Replication of script multiple pushes to list with BLPOP (commands replication)
[ok]: EVALSHA replication when first call is readonly (commands replication)
[ok]: Lua scripts using SELECT are replicated correctly (commands replication)
[40/58 done]: unit/tls (0 seconds)
Testing unit/tracking
=== (tracking) Starting server 127.0.0.1:28631 ok
[ok]: Clients are able to enable tracking and redirect it
[ok]: The other connection is able to get invalidations
[ok]: The client is now able to disable tracking
[ok]: Clients can enable the BCAST mode with the empty prefix
[ok]: The connection gets invalidation messages about all the keys
[ok]: Clients can enable the BCAST mode with prefixes
[ok]: Adding prefixes to BCAST mode works
[ok]: Tracking NOLOOP mode in standard mode works
[ok]: Tracking NOLOOP mode in BCAST mode works
[ok]: WAIT should not acknowledge 2 additional copies of the data
[ok]: maxmemory - policy volatile-lfu should only remove volatile keys.
=== (scripting repl) Starting server 127.0.0.1:21129 ok
=== () Starting server 127.0.0.1:21130 ok
[ok]: Connect a replica to the master instance
[ok]: Redis.replicate_commands() must be issued before any write
[ok]: Redis.replicate_commands() must be issued before any write (2)
[ok]: Redis.set_repl() must be issued after replicate_commands()
[ok]: Redis.set_repl() don't accept invalid values
[ok]: Test selective replication of certain Redis commands from Lua
[ok]: PRNG is seeded randomly for command replication
[ok]: Using side effects is not a problem with command replication
[ok]: Tracking gets notification of expired keys
[ok]: Tracking gets notification on tracking table key eviction
[ok]: Memory efficiency with values in range 128
[41/58 done]: unit/tracking (1 seconds)
Testing unit/oom-score-adj
[ok]: maxmemory - policy volatile-random should only remove volatile keys.
=== (oom-score-adj) Starting server 127.0.0.1:28632 ok
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 105855)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 6 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #2 as master
[ok]: PSYNC2: Set #3 to replicate from #2
[ok]: PSYNC2: Set #4 to replicate from #3
[ok]: PSYNC2: Set #0 to replicate from #2
[ok]: PSYNC2: Set #1 to replicate from #4
[ok]: CONFIG SET oom-score-adj works as expected
[ok]: CONFIG SET oom-score-adj handles configuration failures
=== (scripting) Starting server 127.0.0.1:21131 ok
[42/58 done]: unit/scripting (20 seconds)
Testing unit/shutdown
=== (shutdown) Starting server 127.0.0.1:21132 ok
[ok]: Temp rdb will be deleted if we use bg_unlink when shutdown
=== (shutdown) Starting server 127.0.0.1:21133 ok
[ok]: maxmemory - policy volatile-ttl should only remove volatile keys.
[ok]: HyperLogLog self test passes
[ok]: PFADD without arguments creates an HLL value
[ok]: Approximated cardinality after creation is zero
[ok]: PFADD returns 1 when at least 1 reg was modified
[ok]: PFADD returns 0 when no reg was modified
[ok]: PFADD works with empty string (regression)
[ok]: PFCOUNT returns approximated cardinality of set
[ok]: Temp rdb will be deleted in signal handle
[43/58 done]: unit/shutdown (0 seconds)
=== (maxmemory) Starting server 127.0.0.1:25613 ok
[ok]: PSYNC2: cluster is consistent after failover
=== () Starting server 127.0.0.1:25614 ok
[44/58 done]: unit/oom-score-adj (2 seconds)
[ok]: Memory efficiency with values in range 1024
[ok]: Test replication partial resync: ok after delay (diskless: no, disabled, reconnect: 1)
[ok]: HyperLogLogs are promote from sparse to dense
=== (repl) Starting server 127.0.0.1:26620 ok
=== () Starting server 127.0.0.1:26621 ok
[ok]: Slave should be able to synchronize with the master
[ok]: WAIT should not acknowledge 1 additional copy if slave is blocked
[45/58 done]: unit/wait (6 seconds)
[ok]: Detect write load to master
[ok]: XRANGE fuzzing
[ok]: XREVRANGE regression test for issue #5006
[ok]: XREAD streamID edge (no-blocking)
[ok]: XREAD streamID edge (blocking)
[ok]: XADD streamID edge
[ok]: HyperLogLog sparse encoding stress test
[ok]: Corrupted sparse HyperLogLogs are detected: Additional at tail
[ok]: Corrupted sparse HyperLogLogs are detected: Broken magic
[ok]: Corrupted sparse HyperLogLogs are detected: Invalid encoding
[ok]: Corrupted dense HyperLogLogs are detected: Wrong length
=== (stream) Starting server 127.0.0.1:28112 ok
[ok]: XADD with MAXLEN > xlen can propagate correctly
=== (stream) Starting server 127.0.0.1:28113 ok
[ok]: XADD with ~ MAXLEN can propagate correctly
=== (stream) Starting server 127.0.0.1:28114 ok
[ok]: XTRIM with ~ MAXLEN can propagate correctly
=== (stream xsetid) Starting server 127.0.0.1:28115 ok
[ok]: Memory efficiency with values in range 16384
[ok]: XADD can CREATE an empty stream
[ok]: XSETID can set a specific ID
[ok]: XSETID cannot SETID with smaller ID
[ok]: XSETID cannot SETID on non-existent key
=== (stream) Starting server 127.0.0.1:28116 ok

Waiting for background AOF rewrite to finish... [46/58 done]: unit/memefficiency (11 seconds)
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 134732)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 7 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #4 as master
[ok]: PSYNC2: Set #1 to replicate from #4
[ok]: PSYNC2: Set #2 to replicate from #1
[ok]: PSYNC2: Set #3 to replicate from #2
[ok]: PSYNC2: Set #0 to replicate from #4
[ok]: Empty stream can be rewrite into AOF correctly

Waiting for background AOF rewrite to finish... [ok]: Client output buffer hard limit is enforced
[ok]: PSYNC2: cluster is consistent after failover
[ok]: Stream can be rewrite into AOF correctly after XDEL lastid
=== (stream) Starting server 127.0.0.1:28117 ok
[ok]: XGROUP HELP should not have unexpected options
[47/58 done]: unit/type/stream (54 seconds)
[ok]: Connect multiple replicas at the same time (issue #141), master diskless=no, replica diskless=disabled
=== (repl) Starting server 127.0.0.1:25122 ok
=== () Starting server 127.0.0.1:25123 ok
=== () Starting server 127.0.0.1:25124 ok
=== () Starting server 127.0.0.1:25125 ok
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 158792)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 8 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #0 as master
[ok]: PSYNC2: Set #1 to replicate from #0
[ok]: PSYNC2: Set #3 to replicate from #0
[ok]: PSYNC2: Set #4 to replicate from #3
[ok]: PSYNC2: Set #2 to replicate from #3
[ok]: PSYNC2: cluster is consistent after failover
[ok]: Test replication partial resync: backlog expired (diskless: no, disabled, reconnect: 1)
[ok]: pending querybuf: check size of pending_querybuf after set a big value
=== (repl) Starting server 127.0.0.1:26622 ok
=== () Starting server 127.0.0.1:26623 ok
[ok]: Slave should be able to synchronize with the master
[48/58 done]: unit/pendingquerybuf (19 seconds)
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 183829)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: Detect write load to master
[ok]: Test replication partial resync: no reconnection, just sync (diskless: no, swapdb, reconnect: 0)
=== (repl) Starting server 127.0.0.1:26624 ok
=== () Starting server 127.0.0.1:26625 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: PSYNC2: Bring the master back again for next test
[ok]: PSYNC2: Partial resync after restart using RDB aux fields
[ok]: MASTER and SLAVE consistency with EVALSHA replication
[ok]: PSYNC2: Replica RDB restart with EVALSHA in backlog issue #4483
[49/58 done]: integration/psync2 (48 seconds)
[ok]: SLAVE can reload "lua" AUX RDB fields of duplicated scripts
[ok]: AOF rewrite during write load: RDB preamble=no
[50/58 done]: integration/replication-3 (59 seconds)
=== (aofrw) Starting server 127.0.0.1:23114 ok

Waiting for background AOF rewrite to finish... [ok]: Test replication partial resync: ok psync (diskless: no, swapdb, reconnect: 1)
[ok]: Turning off AOF kills the background writing child if any

Waiting for background AOF rewrite to finish... === (repl) Starting server 127.0.0.1:26626 ok
=== () Starting server 127.0.0.1:26627 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Client output buffer soft limit is not enforced if time is not overreached
[ok]: AOF rewrite of list with quicklist encoding, string data

Waiting for background AOF rewrite to finish... [ok]: Replication: commands with many arguments (issue #1221)
[ok]: Detect write load to master
[ok]: Fuzzing dense/sparse encoding: Redis should always detect errors
[ok]: PFADD, PFCOUNT, PFMERGE type checking works
[ok]: PFMERGE results on the cardinality of union of sets
[ok]: AOF rewrite of list with quicklist encoding, int data

Waiting for background AOF rewrite to finish... [ok]: AOF rewrite of set with intset encoding, string data

Waiting for background AOF rewrite to finish... [ok]: Replication of SPOP command -- alsoPropagate() API
[ok]: AOF rewrite of set with hashtable encoding, string data

Waiting for background AOF rewrite to finish... [51/58 done]: integration/replication-4 (65 seconds)
[ok]: AOF rewrite of set with intset encoding, int data

Waiting for background AOF rewrite to finish... [ok]: AOF rewrite of set with hashtable encoding, int data

Waiting for background AOF rewrite to finish... [ok]: Connect multiple replicas at the same time (issue #141), master diskless=no, replica diskless=swapdb
[ok]: AOF rewrite of hash with ziplist encoding, string data

Waiting for background AOF rewrite to finish... [ok]: AOF rewrite of hash with hashtable encoding, string data

Waiting for background AOF rewrite to finish... [ok]: AOF rewrite of hash with ziplist encoding, int data

Waiting for background AOF rewrite to finish... === (repl) Starting server 127.0.0.1:25126 ok
=== () Starting server 127.0.0.1:25127 ok
=== () Starting server 127.0.0.1:25128 ok
=== () Starting server 127.0.0.1:25129 ok
[ok]: Test replication partial resync: no backlog (diskless: no, swapdb, reconnect: 1)
[ok]: AOF rewrite of hash with hashtable encoding, int data

Waiting for background AOF rewrite to finish... === (repl) Starting server 127.0.0.1:26628 ok
=== () Starting server 127.0.0.1:26629 ok
[ok]: PFCOUNT multiple-keys merge returns cardinality of union #1
[ok]: Slave should be able to synchronize with the master
[ok]: AOF rewrite of zset with ziplist encoding, string data

Waiting for background AOF rewrite to finish... [ok]: Detect write load to master
[ok]: AOF rewrite of zset with skiplist encoding, string data
[ok]: Client output buffer soft limit is enforced if time is overreached

Waiting for background AOF rewrite to finish... [ok]: No response for single command if client output buffer hard limit is enforced
[ok]: PFCOUNT multiple-keys merge returns cardinality of union #2
[ok]: AOF rewrite of zset with ziplist encoding, int data

Waiting for background AOF rewrite to finish... [ok]: No response for multi commands in pipeline if client output buffer limit is enforced
[ok]: Execute transactions completely even if client output buffer limit is enforced
[52/58 done]: unit/obuf-limits (51 seconds)
[ok]: AOF rewrite of zset with skiplist encoding, int data
[ok]: PFDEBUG GETREG returns the HyperLogLog raw registers
[ok]: PFADD / PFCOUNT cache invalidation works
[ok]: BGREWRITEAOF is delayed if BGSAVE is in progress
[ok]: BGREWRITEAOF is refused if already in progress
[53/58 done]: unit/aofrw (84 seconds)
[54/58 done]: unit/hyperloglog (45 seconds)
[ok]: slave buffer are counted correctly
=== (maxmemory) Starting server 127.0.0.1:25615 ok
=== () Starting server 127.0.0.1:25616 ok
[ok]: Test replication partial resync: ok after delay (diskless: no, swapdb, reconnect: 1)
=== (repl) Starting server 127.0.0.1:26630 ok
=== () Starting server 127.0.0.1:26631 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: GEOADD + GEORANGE randomized test
[55/58 done]: unit/geo (56 seconds)
[ok]: replica buffer don't induce eviction
[56/58 done]: unit/maxmemory (67 seconds)
[ok]: Connect multiple replicas at the same time (issue #141), master diskless=yes, replica diskless=disabled
=== (repl) Starting server 127.0.0.1:25130 ok
=== () Starting server 127.0.0.1:25131 ok
=== () Starting server 127.0.0.1:25132 ok
=== () Starting server 127.0.0.1:25133 ok
[ok]: Test replication partial resync: backlog expired (diskless: no, swapdb, reconnect: 1)
=== (repl) Starting server 127.0.0.1:26632 ok
=== () Starting server 127.0.0.1:26633 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: no reconnection, just sync (diskless: yes, disabled, reconnect: 0)
=== (repl) Starting server 127.0.0.1:26634 ok
=== () Starting server 127.0.0.1:26635 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: ok psync (diskless: yes, disabled, reconnect: 1)
=== (repl) Starting server 127.0.0.1:26636 ok
=== () Starting server 127.0.0.1:26637 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: no backlog (diskless: yes, disabled, reconnect: 1)
=== (repl) Starting server 127.0.0.1:26638 ok
=== () Starting server 127.0.0.1:26639 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Connect multiple replicas at the same time (issue #141), master diskless=yes, replica diskless=swapdb
[ok]: Test replication partial resync: ok after delay (diskless: yes, disabled, reconnect: 1)
=== (repl) Starting server 127.0.0.1:26640 ok
=== () Starting server 127.0.0.1:26641 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
=== (repl) Starting server 127.0.0.1:25134 ok
=== () Starting server 127.0.0.1:25135 ok
[ok]: Test replication partial resync: backlog expired (diskless: yes, disabled, reconnect: 1)
=== (repl) Starting server 127.0.0.1:26642 ok
=== () Starting server 127.0.0.1:26643 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: no reconnection, just sync (diskless: yes, swapdb, reconnect: 0)
=== (repl) Starting server 127.0.0.1:26644 ok
=== () Starting server 127.0.0.1:26645 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: ok psync (diskless: yes, swapdb, reconnect: 1)
=== (repl) Starting server 127.0.0.1:26646 ok
=== () Starting server 127.0.0.1:26647 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Master stream is correctly processed while the replica has a script in -BUSY state
=== (repl) Starting server 127.0.0.1:25136 ok
=== () Starting server 127.0.0.1:25137 ok
[ok]: Test replication partial resync: no backlog (diskless: yes, swapdb, reconnect: 1)
[ok]: slave fails full sync and diskless load swapdb recovers it
=== (repl) Starting server 127.0.0.1:26648 ok
=== (repl) Starting server 127.0.0.1:25138 ok
=== () Starting server 127.0.0.1:26649 ok
=== () Starting server 127.0.0.1:25139 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
filling took 2778 ms (TODO: use pipeline)
{651990:S 25 Oct 2020 11:22:09.616 # Internal error in RDB reading offset 0, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 48
{651990:S 25 Oct 2020 11:22:09.648 # Internal error in RDB reading offset 12047840, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 63
{651990:S 25 Oct 2020 11:22:09.670 # Internal error in RDB reading offset 6025808, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 78
{651990:S 25 Oct 2020 11:22:09.733 # Internal error in RDB reading offset 26711518, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 93
{651990:S 25 Oct 2020 11:22:09.763 # Internal error in RDB reading offset 9771741, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 108
{651990:S 25 Oct 2020 11:22:09.925 * MASTER <-> REPLICA sync: Finished with success} 122
{651990:S 25 Oct 2020 11:22:10.009 # Internal error in RDB reading offset 24353699, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 150
{651990:S 25 Oct 2020 11:22:10.040 # Internal error in RDB reading offset 14159213, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 165
{651990:S 25 Oct 2020 11:22:10.095 # Internal error in RDB reading offset 22883249, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 180
{651990:S 25 Oct 2020 11:22:10.145 # Internal error in RDB reading offset 20826470, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 195
{651990:S 25 Oct 2020 11:22:10.169 # Internal error in RDB reading offset 9771741, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 210
{651990:S 25 Oct 2020 11:22:10.206 # Internal error in RDB reading offset 14159213, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 225
{651990:S 25 Oct 2020 11:22:10.222 # Internal error in RDB reading offset 1737660, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 240
{651990:S 25 Oct 2020 11:22:10.242 # Internal error in RDB reading offset 3804677, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 255
{651990:S 25 Oct 2020 11:22:10.297 # Internal error in RDB reading offset 29225389, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 270
{651990:S 25 Oct 2020 11:22:10.324 # Internal error in RDB reading offset 9771741, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 285
{651990:S 25 Oct 2020 11:22:10.345 # Internal error in RDB reading offset 7596726, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 300
{651990:S 25 Oct 2020 11:22:10.503 * MASTER <-> REPLICA sync: Finished with success} 314
{651990:S 25 Oct 2020 11:22:10.572 # Internal error in RDB reading offset 3804678, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 342
{651990:S 25 Oct 2020 11:22:10.579 # Internal error in RDB reading offset 0, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 357
{651990:S 25 Oct 2020 11:22:10.630 # Internal error in RDB reading offset 24353700, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 372
{651990:S 25 Oct 2020 11:22:10.803 * MASTER <-> REPLICA sync: Finished with success} 386
{651990:S 25 Oct 2020 11:22:10.849 # Internal error in RDB reading offset 14159214, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 414
{651990:S 25 Oct 2020 11:22:10.909 # Internal error in RDB reading offset 26711519, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 429
{651990:S 25 Oct 2020 11:22:10.963 # Internal error in RDB reading offset 22883250, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 444
{651990:S 25 Oct 2020 11:22:11.009 # Internal error in RDB reading offset 18448611, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 459
{651990:S 25 Oct 2020 11:22:11.034 # Internal error in RDB reading offset 6025809, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 474
{651990:S 25 Oct 2020 11:22:11.070 # Internal error in RDB reading offset 12047841, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 489
{651990:S 25 Oct 2020 11:22:11.086 # Internal error in RDB reading offset 3804678, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 504
{651990:S 25 Oct 2020 11:22:11.099 # Internal error in RDB reading offset 1737661, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 519
{651990:S 25 Oct 2020 11:22:11.132 # Internal error in RDB reading offset 12047841, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 534
{651990:S 25 Oct 2020 11:22:11.178 # Internal error in RDB reading offset 18448611, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 549
{651990:S 25 Oct 2020 11:22:11.198 # Internal error in RDB reading offset 3804678, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 564
{651990:S 25 Oct 2020 11:22:11.211 # Internal error in RDB reading offset 1737661, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 579
{651990:S 25 Oct 2020 11:22:11.242 # Internal error in RDB reading offset 12047841, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 594
{651990:S 25 Oct 2020 11:22:11.387 * MASTER <-> REPLICA sync: Finished with success} 608
{651990:S 25 Oct 2020 11:22:11.464 # Internal error in RDB reading offset 20826471, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 636
{651990:S 25 Oct 2020 11:22:11.504 # Internal error in RDB reading offset 16776819, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 651
{651990:S 25 Oct 2020 11:22:11.519 # Internal error in RDB reading offset 1737661, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 666
{651990:S 25 Oct 2020 11:22:11.563 # Internal error in RDB reading offset 18448611, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 681
{651990:S 25 Oct 2020 11:22:11.607 # Internal error in RDB reading offset 20826471, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 696
{651990:S 25 Oct 2020 11:22:11.660 # Internal error in RDB reading offset 20826471, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 711
{651990:S 25 Oct 2020 11:22:11.670 # Internal error in RDB reading offset 0, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 726
{651990:S 25 Oct 2020 11:22:11.727 # Internal error in RDB reading offset 22883250, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 741
{651990:S 25 Oct 2020 11:22:11.772 # Internal error in RDB reading offset 16776819, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 756
{651990:S 25 Oct 2020 11:22:11.923 * MASTER <-> REPLICA sync: Finished with success} 770
{651990:S 25 Oct 2020 11:22:11.972 # Internal error in RDB reading offset 16776819, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 798
{651990:S 25 Oct 2020 11:22:12.123 * MASTER <-> REPLICA sync: Finished with success} 812
{651990:S 25 Oct 2020 11:22:12.166 # Internal error in RDB reading offset 7596727, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 840
{651990:S 25 Oct 2020 11:22:12.221 # Internal error in RDB reading offset 20826471, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 855
{651990:S 25 Oct 2020 11:22:12.270 # Internal error in RDB reading offset 16776819, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 870
{651990:S 25 Oct 2020 11:22:12.307 # Internal error in RDB reading offset 14159214, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 885
{651990:S 25 Oct 2020 11:22:12.337 # Internal error in RDB reading offset 7596727, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 900
{651990:S 25 Oct 2020 11:22:12.387 # Internal error in RDB reading offset 22883250, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 915
{651990:S 25 Oct 2020 11:22:12.539 * MASTER <-> REPLICA sync: Finished with success} 929
{651990:S 25 Oct 2020 11:22:12.617 # Internal error in RDB reading offset 20826471, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 957
{651990:S 25 Oct 2020 11:22:12.658 # Internal error in RDB reading offset 14159214, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 972
{651990:S 25 Oct 2020 11:22:12.684 # Internal error in RDB reading offset 7596727, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 987
{651990:S 25 Oct 2020 11:22:12.736 # Internal error in RDB reading offset 24353700, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1002
{651990:S 25 Oct 2020 11:22:12.756 # Internal error in RDB reading offset 1737661, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1017
{651990:S 25 Oct 2020 11:22:12.912 * MASTER <-> REPLICA sync: Finished with success} 1031
{651990:S 25 Oct 2020 11:22:13.014 # Internal error in RDB reading offset 24353700, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1059
{651990:S 25 Oct 2020 11:22:13.050 # Internal error in RDB reading offset 12047841, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1074
{651990:S 25 Oct 2020 11:22:13.079 # Internal error in RDB reading offset 9771742, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1089
{651990:S 25 Oct 2020 11:22:13.105 # Internal error in RDB reading offset 7596727, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1104
{651990:S 25 Oct 2020 11:22:13.124 # Internal error in RDB reading offset 7596727, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1119
{651990:S 25 Oct 2020 11:22:13.268 * MASTER <-> REPLICA sync: Finished with success} 1133
{651990:S 25 Oct 2020 11:22:13.325 # Internal error in RDB reading offset 16776819, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1161
{651990:S 25 Oct 2020 11:22:13.349 # Internal error in RDB reading offset 6025809, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1176
{651990:S 25 Oct 2020 11:22:13.398 # Internal error in RDB reading offset 18448611, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1191
{651990:S 25 Oct 2020 11:22:13.421 # Internal error in RDB reading offset 6025809, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1206
{651990:S 25 Oct 2020 11:22:13.461 # Internal error in RDB reading offset 14159214, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1221
{651990:S 25 Oct 2020 11:22:13.503 # Internal error in RDB reading offset 18448611, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1236
{651990:S 25 Oct 2020 11:22:13.512 # Internal error in RDB reading offset 1737661, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1251
{651990:S 25 Oct 2020 11:22:13.657 * MASTER <-> REPLICA sync: Finished with success} 1265
{651990:S 25 Oct 2020 11:22:13.716 # Internal error in RDB reading offset 0, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1293
{651990:S 25 Oct 2020 11:22:13.758 # Internal error in RDB reading offset 14159214, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1308
{651990:S 25 Oct 2020 11:22:13.909 * MASTER <-> REPLICA sync: Finished with success} 1322
{651990:S 25 Oct 2020 11:22:14.015 # Internal error in RDB reading offset 20826471, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1350
{651990:S 25 Oct 2020 11:22:14.055 # Internal error in RDB reading offset 14159214, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1365
{651990:S 25 Oct 2020 11:22:14.110 # Internal error in RDB reading offset 24353700, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1380
{651990:S 25 Oct 2020 11:22:14.150 # Internal error in RDB reading offset 12047841, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1395
{651990:S 25 Oct 2020 11:22:14.188 # Internal error in RDB reading offset 14159214, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1410
{651990:S 25 Oct 2020 11:22:14.205 # Internal error in RDB reading offset 1737661, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1425
{651990:S 25 Oct 2020 11:22:14.225 # Internal error in RDB reading offset 6025809, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1440
{651990:S 25 Oct 2020 11:22:14.268 # Internal error in RDB reading offset 12047841, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1455
{651990:S 25 Oct 2020 11:22:14.305 # Internal error in RDB reading offset 14159214, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1470
{651990:S 25 Oct 2020 11:22:14.320 # Internal error in RDB reading offset 1737661, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1485
{651990:S 25 Oct 2020 11:22:14.363 # Internal error in RDB reading offset 12047841, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1500
{651990:S 25 Oct 2020 11:22:14.388 # Internal error in RDB reading offset 3804678, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1515
{651990:S 25 Oct 2020 11:22:14.437 # Internal error in RDB reading offset 22883250, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1530
{651990:S 25 Oct 2020 11:22:14.470 # Internal error in RDB reading offset 7596727, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1545
{651990:S 25 Oct 2020 11:22:14.496 # Internal error in RDB reading offset 6025809, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1560
{651990:S 25 Oct 2020 11:22:14.553 # Internal error in RDB reading offset 24353700, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1575
{651990:S 25 Oct 2020 11:22:14.568 # Internal error in RDB reading offset 1737661, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1590
{651990:S 25 Oct 2020 11:22:14.713 * MASTER <-> REPLICA sync: Finished with success} 1604
{651990:S 25 Oct 2020 11:22:14.772 # Internal error in RDB reading offset 20826471, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1632
{651990:S 25 Oct 2020 11:22:14.824 # Internal error in RDB reading offset 24353700, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1647
{651990:S 25 Oct 2020 11:22:14.873 # Internal error in RDB reading offset 20826471, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1662
{651990:S 25 Oct 2020 11:22:14.885 # Internal error in RDB reading offset 3804678, function at rdb.c:2401 -> Unexpected EOF reading RDB file. Failure loading rdb format from socket, assuming connection error, resuming operation.} 1677
test took 5287 ms
[ok]: diskless loading short read
=== (repl) Starting server 127.0.0.1:25140 ok
=== () Starting server 127.0.0.1:25141 ok
=== () Starting server 127.0.0.1:25142 ok
[ok]: Test replication partial resync: ok after delay (diskless: yes, swapdb, reconnect: 1)
=== (repl) Starting server 127.0.0.1:26650 ok
=== () Starting server 127.0.0.1:26651 ok
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: diskless no replicas drop during rdb pipe
=== () Starting server 127.0.0.1:25143 ok
=== () Starting server 127.0.0.1:25144 ok
[ok]: diskless slow replicas drop during rdb pipe
=== () Starting server 127.0.0.1:25145 ok
=== () Starting server 127.0.0.1:25146 ok
[ok]: Test replication partial resync: backlog expired (diskless: yes, swapdb, reconnect: 1)
[57/58 done]: integration/replication-psync (177 seconds)
[ok]: diskless fast replicas drop during rdb pipe
=== () Starting server 127.0.0.1:25147 ok
=== () Starting server 127.0.0.1:25148 ok
[ok]: diskless all replicas drop during rdb pipe
=== (repl) Starting server 127.0.0.1:25149 ok
=== () Starting server 127.0.0.1:25150 ok
fork child is 653279
[ok]: diskless replication child being killed is collected
=== (repl) Starting server 127.0.0.1:25151 ok
=== () Starting server 127.0.0.1:25152 ok
=== () Starting server 127.0.0.1:25153 ok
[ok]: replicaof right after disconnection
=== (repl) Starting server 127.0.0.1:25154 ok
=== () Starting server 127.0.0.1:25155 ok
=== () Starting server 127.0.0.1:25156 ok
[ok]: Kill rdb child process if its dumping RDB is not useful
[58/58 done]: integration/replication (192 seconds)
Testing solo test
=== (defrag) Starting server 127.0.0.1:25157 ok
[58/58 done]: defrag (0 seconds)

                   The End

Execution time of different units:
  1 seconds - unit/type/incr
  1 seconds - unit/printver
  1 seconds - unit/auth
  1 seconds - unit/protocol
  2 seconds - unit/keyspace
  0 seconds - unit/quit
  3 seconds - unit/multi
  5 seconds - unit/type/stream-cgroups
  1 seconds - unit/acl
  9 seconds - unit/type/list
  9 seconds - unit/type/hash
  10 seconds - unit/scan
  10 seconds - unit/sort
  13 seconds - unit/type/set
  13 seconds - unit/type/string
  15 seconds - unit/expire
  15 seconds - unit/other
  1 seconds - integration/convert-zipmap-hash-on-load
  18 seconds - unit/type/zset
  1 seconds - integration/logging
  16 seconds - unit/latency-monitor
  9 seconds - integration/aof
  7 seconds - integration/rdb
  1 seconds - unit/pubsub
  2 seconds - unit/slowlog
  31 seconds - unit/type/list-2
  32 seconds - unit/dump
  27 seconds - integration/block-repl
  1 seconds - unit/introspection
  1 seconds - unit/limits
  27 seconds - integration/replication-2
  17 seconds - integration/psync2-pingoff
  6 seconds - unit/introspection-2
  22 seconds - integration/psync2-reg
  19 seconds - integration/redis-cli
  3 seconds - unit/bitfield
  43 seconds - unit/type/list-3
  7 seconds - unit/bitops
  3 seconds - unit/lazyfree
  0 seconds - unit/tls
  1 seconds - unit/tracking
  20 seconds - unit/scripting
  0 seconds - unit/shutdown
  2 seconds - unit/oom-score-adj
  6 seconds - unit/wait
  11 seconds - unit/memefficiency
  54 seconds - unit/type/stream
  19 seconds - unit/pendingquerybuf
  48 seconds - integration/psync2
  59 seconds - integration/replication-3
  65 seconds - integration/replication-4
  51 seconds - unit/obuf-limits
  84 seconds - unit/aofrw
  45 seconds - unit/hyperloglog
  56 seconds - unit/geo
  67 seconds - unit/maxmemory
  177 seconds - integration/replication-psync
  192 seconds - integration/replication
  0 seconds - defrag

\o/ All tests passed without errors!

Cleanup: may take some time... OK