RecidivCI

Details for run #1166 (ok)

libc-malloc (ad0a9df77)

Wed Jul 08 14:41:11 UTC 2020

@cd /home/ubuntu/ci/redis

Working dir is now '/home/ubuntu/ci/redis'

@git checkout unstable

Already on 'unstable'
Your branch is up to date with 'origin/unstable'.

@git pull origin unstable

From https://github.com/antirez/redis
 * branch                unstable   -> FETCH_HEAD
Already up to date.

@make distclean

cd src && make distclean
make[1]: Entering directory '/home/ubuntu/ci/redis/src'
/bin/sh: 1: pkg-config: not found
rm -rf redis-server redis-sentinel redis-cli redis-benchmark redis-check-rdb redis-check-aof *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep dict-benchmark
rm -f adlist.d quicklist.d ae.d anet.d dict.d server.d sds.d zmalloc.d lzf_c.d lzf_d.d pqsort.d zipmap.d sha1.d ziplist.d release.d networking.d util.d object.d db.d replication.d rdb.d t_string.d t_list.d t_set.d t_zset.d t_hash.d config.d aof.d pubsub.d multi.d debug.d sort.d intset.d syncio.d cluster.d crc16.d endianconv.d slowlog.d scripting.d bio.d rio.d rand.d memtest.d crcspeed.d crc64.d bitops.d sentinel.d notify.d setproctitle.d blocked.d hyperloglog.d latency.d sparkline.d redis-check-rdb.d redis-check-aof.d geo.d lazyfree.d module.d evict.d expire.d geohash.d geohash_helper.d childinfo.d defrag.d siphash.d rax.d t_stream.d listpack.d localtime.d lolwut.d lolwut5.d lolwut6.d acl.d gopher.d tracking.d connection.d tls.d sha256.d timeout.d setcpuaffinity.d anet.d adlist.d dict.d redis-cli.d zmalloc.d release.d ae.d crcspeed.d crc64.d siphash.d crc16.d ae.d anet.d redis-benchmark.d adlist.d dict.d zmalloc.d siphash.d
(cd ../deps && make distclean)
make[2]: Entering directory '/home/ubuntu/ci/redis/deps'
(cd hiredis && make clean) > /dev/null || true
(cd linenoise && make clean) > /dev/null || true
(cd lua && make clean) > /dev/null || true
(cd jemalloc && [ -f Makefile ] && make distclean) > /dev/null || true
(rm -f .make-*)
make[2]: Leaving directory '/home/ubuntu/ci/redis/deps'
(rm -f .make-*)
make[1]: Leaving directory '/home/ubuntu/ci/redis/src'

@make MALLOC=libc -j 8

cd src && make all
make[1]: Entering directory '/home/ubuntu/ci/redis/src'
/bin/sh: 1: pkg-config: not found
    CC Makefile.dep
/bin/sh: 1: pkg-config: not found
rm -rf redis-server redis-sentinel redis-cli redis-benchmark redis-check-rdb redis-check-aof *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep dict-benchmark
rm -f adlist.d quicklist.d ae.d anet.d dict.d server.d sds.d zmalloc.d lzf_c.d lzf_d.d pqsort.d zipmap.d sha1.d ziplist.d release.d networking.d util.d object.d db.d replication.d rdb.d t_string.d t_list.d t_set.d t_zset.d t_hash.d config.d aof.d pubsub.d multi.d debug.d sort.d intset.d syncio.d cluster.d crc16.d endianconv.d slowlog.d scripting.d bio.d rio.d rand.d memtest.d crcspeed.d crc64.d bitops.d sentinel.d notify.d setproctitle.d blocked.d hyperloglog.d latency.d sparkline.d redis-check-rdb.d redis-check-aof.d geo.d lazyfree.d module.d evict.d expire.d geohash.d geohash_helper.d childinfo.d defrag.d siphash.d rax.d t_stream.d listpack.d localtime.d lolwut.d lolwut5.d lolwut6.d acl.d gopher.d tracking.d connection.d tls.d sha256.d timeout.d setcpuaffinity.d anet.d adlist.d dict.d redis-cli.d zmalloc.d release.d ae.d crcspeed.d crc64.d siphash.d crc16.d ae.d anet.d redis-benchmark.d adlist.d dict.d zmalloc.d siphash.d
(cd ../deps && make distclean)
make[2]: Entering directory '/home/ubuntu/ci/redis/deps'
(cd hiredis && make clean) > /dev/null || true
(cd linenoise && make clean) > /dev/null || true
(cd lua && make clean) > /dev/null || true
(cd jemalloc && [ -f Makefile ] && make distclean) > /dev/null || true
(rm -f .make-*)
make[2]: Leaving directory '/home/ubuntu/ci/redis/deps'
(rm -f .make-*)
echo STD=-std=c11 -pedantic -DREDIS_STATIC='' >> .make-settings
echo WARN=-Wall -W -Wno-missing-field-initializers >> .make-settings
echo OPT=-O2 >> .make-settings
echo MALLOC=libc >> .make-settings
echo CFLAGS= >> .make-settings
echo LDFLAGS= >> .make-settings
echo REDIS_CFLAGS= >> .make-settings
echo REDIS_LDFLAGS= >> .make-settings
echo PREV_FINAL_CFLAGS=-std=c11 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O2 -g -ggdb   -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src >> .make-settings
echo PREV_FINAL_LDFLAGS=  -g -ggdb -rdynamic >> .make-settings
(cd ../deps && make hiredis linenoise lua)
make[2]: Entering directory '/home/ubuntu/ci/redis/deps'
(cd hiredis && make clean) > /dev/null || true
(cd linenoise && make clean) > /dev/null || true
(cd lua && make clean) > /dev/null || true
(cd jemalloc && [ -f Makefile ] && make distclean) > /dev/null || true
(rm -f .make-*)
(echo "" > .make-cflags)
(echo "" > .make-ldflags)
MAKE hiredis
cd hiredis && make static 
MAKE linenoise
cd linenoise && make
MAKE lua
cd lua/src && make all CFLAGS="-O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' " MYLDFLAGS="" AR="ar rcu"
make[3]: Entering directory '/home/ubuntu/ci/redis/deps/hiredis'
make[3]: Entering directory '/home/ubuntu/ci/redis/deps/linenoise'
cc  -Wall -Os -g  -c linenoise.c
make[3]: Entering directory '/home/ubuntu/ci/redis/deps/lua/src'
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lapi.o lapi.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lcode.o lcode.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ldebug.o ldebug.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ldo.o ldo.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb net.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ldump.o ldump.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lfunc.o lfunc.c
ldo.c: In function ‘f_parser’:
ldo.c:496:7: warning: unused variable ‘c’ [-Wunused-variable]
  496 |   int c = luaZ_lookahead(p->z);
      |       ^
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb hiredis.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lgc.o lgc.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o llex.o llex.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lmem.o lmem.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb sds.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb async.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lobject.o lobject.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lopcodes.o lopcodes.c
make[3]: Leaving directory '/home/ubuntu/ci/redis/deps/linenoise'
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lparser.o lparser.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb read.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lstate.o lstate.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lstring.o lstring.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ltable.o ltable.c
cc -std=c99 -pedantic -c -O3 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb sockcompat.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ltm.o ltm.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lundump.o lundump.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lvm.o lvm.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lzio.o lzio.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o strbuf.o strbuf.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o fpconv.o fpconv.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lauxlib.o lauxlib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lbaselib.o lbaselib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ldblib.o ldblib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o liolib.o liolib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lmathlib.o lmathlib.c
lauxlib.c: In function ‘luaL_loadfile’:
lauxlib.c:577:4: warning: this ‘while’ clause does not guard... [-Wmisleading-indentation]
  577 |    while ((c = getc(lf.f)) != EOF && c != LUA_SIGNATURE[0]) ;
      |    ^~~~~
lauxlib.c:578:5: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘while’
  578 |     lf.extraline = 0;
      |     ^~
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o loslib.o loslib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ltablib.o ltablib.c
ltablib.c: In function ‘addfield’:
ltablib.c:137:3: warning: this ‘if’ clause does not guard... [-Wmisleading-indentation]
  137 |   if (!lua_isstring(L, -1))
      |   ^~
ltablib.c:140:5: note: ...this statement, but the latter is misleadingly indented as if it were guarded by the ‘if’
  140 |     luaL_addvalue(b);
      |     ^~~~~~~~~~~~~
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lstrlib.o lstrlib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o loadlib.o loadlib.c
ar rcs libhiredis.a net.o hiredis.o sds.o async.o read.o sockcompat.o
make[3]: Leaving directory '/home/ubuntu/ci/redis/deps/hiredis'
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o linit.o linit.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lua_cjson.o lua_cjson.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lua_struct.o lua_struct.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lua_cmsgpack.o lua_cmsgpack.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lua_bit.o lua_bit.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lua.o lua.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o luac.o luac.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o print.o print.c
ar rcu liblua.a lapi.o lcode.o ldebug.o ldo.o ldump.o lfunc.o lgc.o llex.o lmem.o lobject.o lopcodes.o lparser.o lstate.o lstring.o ltable.o ltm.o lundump.o lvm.o lzio.o strbuf.o fpconv.o lauxlib.o lbaselib.o ldblib.o liolib.o lmathlib.o loslib.o ltablib.o lstrlib.o loadlib.o linit.o lua_cjson.o lua_struct.o lua_cmsgpack.o lua_bit.o	# DLL needs all object files
ar: `u' modifier ignored since `D' is the default (see `U')
ranlib liblua.a
cc -o lua  lua.o liblua.a -lm 
cc -o luac  luac.o print.o liblua.a -lm 
/usr/bin/ld: liblua.a(loslib.o): in function `os_tmpname':
loslib.c:(.text+0x2b5): warning: the use of `tmpnam' is dangerous, better use `mkstemp'
make[3]: Leaving directory '/home/ubuntu/ci/redis/deps/lua/src'
make[2]: Leaving directory '/home/ubuntu/ci/redis/deps'
    CC adlist.o
    CC quicklist.o
    CC ae.o
    CC anet.o
    CC server.o
    CC dict.o
    CC sds.o
    CC zmalloc.o
    CC lzf_c.o
    CC lzf_d.o
    CC pqsort.o
    CC zipmap.o
    CC sha1.o
    CC ziplist.o
    CC release.o
    CC networking.o
    CC util.o
    CC object.o
    CC db.o
    CC replication.o
    CC rdb.o
    CC t_string.o
    CC t_list.o
    CC t_set.o
    CC t_zset.o
    CC t_hash.o
    CC config.o
    CC aof.o
    CC pubsub.o
    CC multi.o
    CC debug.o
    CC sort.o
    CC intset.o
    CC syncio.o
    CC cluster.o
    CC crc16.o
    CC endianconv.o
    CC slowlog.o
    CC scripting.o
    CC bio.o
    CC rio.o
    CC rand.o
    CC memtest.o
    CC crcspeed.o
    CC crc64.o
    CC bitops.o
    CC sentinel.o
    CC notify.o
    CC setproctitle.o
    CC blocked.o
    CC hyperloglog.o
    CC latency.o
    CC sparkline.o
    CC redis-check-rdb.o
    CC redis-check-aof.o
    CC geo.o
    CC lazyfree.o
    CC module.o
    CC evict.o
    CC expire.o
    CC geohash.o
    CC geohash_helper.o
    CC childinfo.o
    CC defrag.o
    CC siphash.o
    CC rax.o
    CC t_stream.o
    CC listpack.o
    CC localtime.o
    CC lolwut.o
    CC lolwut5.o
    CC lolwut6.o
    CC acl.o
    CC gopher.o
    CC tracking.o
    CC connection.o
    CC tls.o
    CC sha256.o
    CC timeout.o
    CC setcpuaffinity.o
    CC redis-cli.o
    CC redis-benchmark.o
    LINK redis-server
    LINK redis-benchmark
    INSTALL redis-sentinel
    INSTALL redis-check-rdb
    INSTALL redis-check-aof
    LINK redis-cli

Hint: It's a good idea to run 'make test' ;)

make[1]: Leaving directory '/home/ubuntu/ci/redis/src'

@./runtest

Cleanup: may take some time... OK
Starting test server at port 11111
[ready]: 370632
Testing unit/printver
[ready]: 370633
Testing unit/dump
[ready]: 370634
Testing unit/auth
[ready]: 370635
Testing unit/protocol
[ready]: 370636
Testing unit/keyspace
[ready]: 370637
Testing unit/scan
[ready]: 370638
Testing unit/type/string
[ready]: 370639
Testing unit/type/incr
[ready]: 370640
Testing unit/type/list
[ready]: 370641
Testing unit/type/list-2
[ready]: 370642
Testing unit/type/list-3
[ready]: 370643
Testing unit/type/set
[ready]: 370645
Testing unit/type/zset
[ready]: 370644
Testing unit/type/hash
[ready]: 370646
Testing unit/type/stream
[ready]: 370647
Testing unit/type/stream-cgroups
Testing Redis version 999.999.999 (ad0a9df7)
[ok]: XADD can add entries into a stream that XRANGE can fetch
[ok]: XADD IDs are incremental
[ok]: Explicit regression for a list bug
[ok]: XADD IDs are incremental when ms is the same as well
[ok]: XADD IDs correctly report an error when overflowing
[ok]: DUMP / RESTORE are able to serialize / unserialize a simple key
[ok]: RESTORE can set an arbitrary expire to the materialized key
[ok]: RESTORE can set an expire that overflows a 32 bit integer
[ok]: RESTORE can set an absolute expire
[ok]: RESTORE can set LRU
[ok]: RESTORE can set LFU
[ok]: RESTORE returns an error of the key already exists
[ok]: RESTORE can overwrite an existing key with REPLACE
[ok]: RESTORE can detect a syntax error for unrecongized options
[ok]: DUMP of non existing key returns nil
[ok]: HSET/HLEN - Small hash creation
[ok]: Is the small hash encoded with a ziplist?
[ok]: INCR against non existing key
[ok]: INCR against key created by incr itself
[ok]: INCR against key originally set with SET
[ok]: INCR over 32bit value
[ok]: INCRBY over 32bit value with over 32bit increment
[ok]: INCR fails against key with spaces (left)
[ok]: INCR fails against key with spaces (right)
[ok]: INCR fails against key with spaces (both)
[ok]: INCR fails against a key holding a list
[ok]: DECRBY over 32bit value with over 32bit increment, negative res
[ok]: INCR uses shared objects in the 0-9999 range
[ok]: INCR can modify objects in-place
[ok]: INCRBYFLOAT against non existing key
[ok]: INCRBYFLOAT against key originally set with SET
[ok]: INCRBYFLOAT over 32bit value
[ok]: INCRBYFLOAT over 32bit value with over 32bit increment
[ok]: INCRBYFLOAT fails against key with spaces (left)
[ok]: INCRBYFLOAT fails against key with spaces (right)
[ok]: INCRBYFLOAT fails against key with spaces (both)
[ok]: INCRBYFLOAT fails against a key holding a list
[ok]: INCRBYFLOAT does not allow NaN or Infinity
[ok]: INCRBYFLOAT decrement
[ok]: string to double with null terminator
[ok]: No negative zero
[ok]: DEL against a single item
[ok]: Vararg DEL
[ok]: KEYS with pattern
[ok]: KEYS to get all keys
[ok]: DBSIZE
[ok]: DEL all keys
[ok]: LPOS basic usage
[ok]: LPOS RANK (positive and negative rank) option
[ok]: LPOS COUNT option
[ok]: LPOS COUNT + RANK option
[ok]: XGROUP CREATE: creation and duplicate group name detection
[ok]: LPOS non existing key
[ok]: XGROUP CREATE: automatic stream creation fails without MKSTREAM
[ok]: LPOS no match
[ok]: XGROUP CREATE: automatic stream creation works with MKSTREAM
[ok]: LPOS MAXLEN
[ok]: LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist
[ok]: XREADGROUP will return only new elements
[ok]: XREADGROUP can read the history of the elements we own
[ok]: LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - regular list
[ok]: R/LPOP against empty list
[ok]: XPENDING is able to return pending items
[ok]: XPENDING can return single consumer items
[ok]: Variadic RPUSH/LPUSH
[ok]: XACK is able to remove items from the client/group PEL
[ok]: DEL a list
[ok]: XACK can't remove the same item multiple times
[ok]: BLPOP, BRPOP: single existing list - linkedlist
[ok]: XACK is able to accept multiple arguments
[ok]: XACK should fail if got at least one invalid ID
[ok]: BLPOP, BRPOP: multiple existing lists - linkedlist
[ok]: PEL NACK reassignment after XGROUP SETID event
[ok]: BLPOP, BRPOP: second list has an entry - linkedlist
[ok]: XREADGROUP will not report data on empty history. Bug #5577
[ok]: BRPOPLPUSH - linkedlist
[ok]: XREADGROUP history reporting of deleted entries. Bug #5570
[ok]: BLPOP, BRPOP: single existing list - ziplist
[ok]: BLPOP, BRPOP: multiple existing lists - ziplist
[ok]: BLPOP, BRPOP: second list has an entry - ziplist
[ok]: AUTH fails if there is no password configured server side
[ok]: BRPOPLPUSH - ziplist
[ok]: BLPOP, LPUSH + DEL should not awake blocked client
[ok]: SADD, SCARD, SISMEMBER, SMEMBERS basics - regular set
[ok]: BLPOP, LPUSH + DEL + SET should not awake blocked client
[ok]: SADD, SCARD, SISMEMBER, SMEMBERS basics - intset
[ok]: SADD against non set
[ok]: BLPOP with same key multiple times should work (issue #801)
[ok]: SADD a non-integer against an intset
[ok]: MULTI/EXEC is isolated from the point of view of BLPOP
[ok]: SADD an integer larger than 64 bits
[ok]: BLPOP with variadic LPUSH
[ok]: SADD overflows the maximum allowed integers in an intset
[ok]: Variadic SADD
[ok]: Blocking XREADGROUP will not reply with an empty array
[ok]: XGROUP DESTROY should unblock XREADGROUP with -NOGROUP
[ok]: RENAME can unblock XREADGROUP with data
[ok]: RENAME can unblock XREADGROUP with -NOGROUP
[ok]: XADD with MAXLEN option
[ok]: Handle an empty query
[ok]: Negative multibulk length
[1/54 done]: unit/printver (0 seconds)
Testing unit/sort
[ok]: Out of range multibulk length
[ok]: Wrong multibulk payload header
[ok]: HSET/HLEN - Big hash creation
[ok]: Negative multibulk payload length
[ok]: Out of range multibulk payload length
[ok]: Is the big hash encoded with an hash table?
[ok]: Non-number multibulk payload length
[ok]: Multi bulk request not followed by bulk arguments
[ok]: Regression for quicklist #3343 bug
[ok]: Generic wrong number of args
[ok]: Unbalanced number of quotes
[ok]: Check encoding - ziplist
[ok]: ZSET basic ZADD and score update - ziplist
[ok]: ZSET element can't be set to NaN with ZADD - ziplist
[ok]: ZSET element can't be set to NaN with ZINCRBY
[ok]: ZADD with options syntax error with incomplete pair
[ok]: ZADD XX option without key - ziplist
[ok]: ZADD XX existing key - ziplist
[ok]: ZADD XX returns the number of elements actually added
[ok]: ZADD XX updates existing elements score
[ok]: ZADD XX and NX are not compatible
[ok]: ZADD NX with non existing key
[ok]: ZADD NX only add new elements without updating old ones
[ok]: ZADD INCR works like ZINCRBY
[ok]: ZADD INCR works with a single score-elemenet pair
[ok]: ZADD CH option changes return value to all changed elements
[ok]: ZINCRBY calls leading to NaN result in error
[ok]: HGET against the small hash
[ok]: ZADD - Variadic version base case
[ok]: ZADD - Return value is the number of actually added items
[ok]: ZADD - Variadic version does not add nothing on single parsing err
[ok]: ZADD - Variadic version will raise error on missing arg
[ok]: Set encoding after DEBUG RELOAD
[ok]: ZINCRBY does not work variadic even if shares ZADD implementation
[ok]: ZCARD basics - ziplist
[ok]: SREM basics - regular set
[ok]: SREM basics - intset
[ok]: ZREM removes key after last element is removed
[ok]: SREM with multiple arguments
[ok]: ZREM variadic version
[ok]: SREM variadic version with more args needed to destroy the key
[ok]: ZREM variadic version -- remove elements after key deletion
[ok]: ZRANGE basics - ziplist
[ok]: SET and GET an item
[ok]: SET and GET an empty item
[ok]: ZREVRANGE basics - ziplist
[ok]: ZRANK/ZREVRANK basics - ziplist
[ok]: ZRANK - after deletion - ziplist
[ok]: ZINCRBY - can create a new sorted set - ziplist
[ok]: ZINCRBY - increment and decrement - ziplist
[ok]: ZINCRBY return value
[ok]: ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics
[ok]: ZRANGEBYSCORE with WITHSCORES
[ok]: ZRANGEBYSCORE with LIMIT
[ok]: ZRANGEBYSCORE with LIMIT and WITHSCORES
[ok]: ZRANGEBYSCORE with non-value min or max
[ok]: ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics
[ok]: ZLEXCOUNT advanced
[ok]: SCAN basic
[ok]: ZRANGEBYSLEX with LIMIT
[ok]: ZRANGEBYLEX with invalid lex range specifiers
[ok]: Generated sets must be encoded as hashtable
[ok]: SINTER with two sets - hashtable
[ok]: SINTERSTORE with two sets - hashtable
[ok]: SINTERSTORE with two sets, after a DEBUG RELOAD - hashtable
[ok]: SUNION with two sets - hashtable
[ok]: ZREMRANGEBYSCORE basics
[ok]: ZREMRANGEBYSCORE with non-value min or max
[ok]: ZREMRANGEBYRANK basics
[ok]: SCAN COUNT
[ok]: ZUNIONSTORE against non-existing key doesn't set destination - ziplist
[ok]: SUNIONSTORE with two sets - hashtable
[ok]: ZUNIONSTORE with empty set - ziplist
[ok]: ZUNIONSTORE basics - ziplist
[ok]: ZUNIONSTORE with weights - ziplist
[ok]: SINTER against three sets - hashtable
[ok]: ZUNIONSTORE with a regular set and weights - ziplist
[ok]: SINTERSTORE with three sets - hashtable
[ok]: ZUNIONSTORE with AGGREGATE MIN - ziplist
[ok]: Protocol desync regression test #1
[ok]: ZUNIONSTORE with AGGREGATE MAX - ziplist
[ok]: ZINTERSTORE basics - ziplist
[ok]: ZINTERSTORE with weights - ziplist
[2/54 done]: unit/type/incr (0 seconds)
Testing unit/expire
[ok]: ZINTERSTORE with a regular set and weights - ziplist
[ok]: SCAN MATCH
[ok]: ZINTERSTORE with AGGREGATE MIN - ziplist
[ok]: ZINTERSTORE with AGGREGATE MAX - ziplist
[ok]: ZUNIONSTORE with +inf/-inf scores - ziplist
[ok]: ZUNIONSTORE with NaN weights ziplist
[ok]: SUNION with non existing keys - hashtable
[ok]: ZINTERSTORE with +inf/-inf scores - ziplist
[ok]: SDIFF with two sets - hashtable
[ok]: ZINTERSTORE with NaN weights ziplist
[ok]: SDIFF with three sets - hashtable
[ok]: SDIFFSTORE with three sets - hashtable
[ok]: Basic ZPOP with a single key - ziplist
[ok]: ZPOP with count - ziplist
[ok]: BZPOP with a single existing sorted set - ziplist
[ok]: BZPOP with multiple existing sorted sets - ziplist
[ok]: BZPOP second sorted set has members - ziplist
[ok]: Check encoding - skiplist
[ok]: ZSET basic ZADD and score update - skiplist
[ok]: ZSET element can't be set to NaN with ZADD - skiplist
[ok]: ZSET element can't be set to NaN with ZINCRBY
[ok]: ZADD with options syntax error with incomplete pair
[ok]: ZADD XX option without key - skiplist
[ok]: ZADD XX existing key - skiplist
[ok]: ZADD XX returns the number of elements actually added
[ok]: ZADD XX updates existing elements score
[ok]: ZADD XX and NX are not compatible
[ok]: ZADD NX with non existing key
[ok]: ZADD NX only add new elements without updating old ones
[ok]: ZADD INCR works like ZINCRBY
[ok]: ZADD INCR works with a single score-elemenet pair
[ok]: ZADD CH option changes return value to all changed elements
[ok]: ZINCRBY calls leading to NaN result in error
[ok]: ZADD - Variadic version base case
[ok]: ZADD - Return value is the number of actually added items
[ok]: ZADD - Variadic version does not add nothing on single parsing err
[ok]: ZADD - Variadic version will raise error on missing arg
[ok]: ZINCRBY does not work variadic even if shares ZADD implementation
[ok]: ZCARD basics - skiplist
[ok]: ZREM removes key after last element is removed
[ok]: ZREM variadic version
[ok]: Very big payload in GET/SET
[ok]: ZREM variadic version -- remove elements after key deletion
[ok]: ZRANGE basics - skiplist
[ok]: SCAN TYPE
[ok]: SSCAN with encoding intset
[ok]: Protocol desync regression test #2
[ok]: SSCAN with encoding hashtable
[ok]: ZREVRANGE basics - skiplist
[ok]: HSCAN with encoding ziplist
[ok]: ZRANK/ZREVRANK basics - skiplist
[ok]: ZRANK - after deletion - skiplist
[ok]: ZINCRBY - can create a new sorted set - skiplist
[ok]: ZINCRBY - increment and decrement - skiplist
[ok]: ZINCRBY return value
[ok]: ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics
[ok]: ZRANGEBYSCORE with WITHSCORES
[ok]: ZRANGEBYSCORE with LIMIT
[ok]: ZRANGEBYSCORE with LIMIT and WITHSCORES
[ok]: ZRANGEBYSCORE with non-value min or max
[ok]: ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics
[ok]: ZLEXCOUNT advanced
[ok]: HGET against the big hash
[ok]: HGET against non existing key
[ok]: HSET in update and insert mode
[ok]: HSETNX target key missing - small hash
[ok]: HSETNX target key exists - small hash
[ok]: HSETNX target key missing - big hash
[ok]: HSETNX target key exists - big hash
[ok]: HMSET wrong number of args
[ok]: HMSET - small hash
[ok]: ZRANGEBYSLEX with LIMIT
[ok]: ZRANGEBYLEX with invalid lex range specifiers
[ok]: Protocol desync regression test #3
[ok]: Generated sets must be encoded as intset
[ok]: SINTER with two sets - intset
[ok]: SINTERSTORE with two sets - intset
[ok]: SINTERSTORE with two sets, after a DEBUG RELOAD - intset
[ok]: ZREMRANGEBYSCORE basics
[ok]: ZREMRANGEBYSCORE with non-value min or max
[ok]: SUNION with two sets - intset
[ok]: ZREMRANGEBYRANK basics
[ok]: ZUNIONSTORE against non-existing key doesn't set destination - skiplist
[ok]: ZUNIONSTORE with empty set - skiplist
[ok]: ZUNIONSTORE basics - skiplist
[ok]: ZUNIONSTORE with weights - skiplist
[ok]: ZUNIONSTORE with a regular set and weights - skiplist
[ok]: ZUNIONSTORE with AGGREGATE MIN - skiplist
[ok]: ZUNIONSTORE with AGGREGATE MAX - skiplist
[ok]: ZINTERSTORE basics - skiplist
[ok]: ZINTERSTORE with weights - skiplist
[ok]: SUNIONSTORE with two sets - intset
[ok]: SINTER against three sets - intset
[ok]: SINTERSTORE with three sets - intset
[ok]: ZINTERSTORE with a regular set and weights - skiplist
[ok]: ZINTERSTORE with AGGREGATE MIN - skiplist
[ok]: ZINTERSTORE with AGGREGATE MAX - skiplist
[ok]: ZUNIONSTORE with +inf/-inf scores - skiplist
[ok]: ZUNIONSTORE with NaN weights skiplist
[ok]: SUNION with non existing keys - intset
[ok]: ZINTERSTORE with +inf/-inf scores - skiplist
[ok]: ZINTERSTORE with NaN weights skiplist
[ok]: Basic ZPOP with a single key - skiplist
[ok]: ZPOP with count - skiplist
[ok]: BZPOP with a single existing sorted set - skiplist
[ok]: BZPOP with multiple existing sorted sets - skiplist
[ok]: BZPOP second sorted set has members - skiplist
[ok]: SDIFF with two sets - intset
[ok]: SDIFF with three sets - intset
[ok]: ZINTERSTORE regression with two sets, intset+hashtable
[ok]: SDIFFSTORE with three sets - intset
[ok]: SDIFF with first set empty
[ok]: ZUNIONSTORE regression, should not create NaN in scores
[ok]: SDIFF with same set two times
[ok]: ZINTERSTORE #516 regression, mixed sets and ziplist zsets
[ok]: HSCAN with encoding hashtable
[ok]: ZSCAN with encoding ziplist
[ok]: MIGRATE is caching connections
[ok]: HMSET - big hash
[ok]: HMGET against non existing key and fields
[ok]: HMGET against wrong type
[ok]: HMGET - small hash
[ok]: Old Ziplist: SORT BY key
[ok]: Old Ziplist: SORT BY key with limit
[ok]: Old Ziplist: SORT BY hash field
[ok]: EXPIRE - set timeouts multiple times
[ok]: EXPIRE - It should be still possible to read 'x'
[ok]: HMGET - big hash
[ok]: HKEYS - small hash
[ok]: ZSCAN with encoding skiplist
[ok]: HKEYS - big hash
[ok]: ZUNIONSTORE result is sorted
[ok]: ZSET commands don't accept the empty strings as valid score
[ok]: HVALS - small hash
[ok]: SCAN guarantees check under write load
[ok]: SSCAN with integer encoded object (issue #1345)
[ok]: AUTH fails when a wrong password is given
[ok]: Arbitrary command gives an error when AUTH is required
[ok]: AUTH succeeds when the right password is given
[ok]: SSCAN with PATTERN
[ok]: Once AUTH succeeded we can actually send commands to the server
[ok]: HSCAN with PATTERN
[ok]: ZSCAN with PATTERN
[ok]: HVALS - big hash
[ok]: HGETALL - small hash
[ok]: ZSCORE - ziplist
[ok]: HGETALL - big hash
[ok]: HDEL and return value
[ok]: ZSCORE after a DEBUG RELOAD - ziplist
[ok]: HDEL - more than a single value
[ok]: HDEL - hash becomes empty before deleting all specified fields
[ok]: HEXISTS
[ok]: Is a ziplist encoded Hash promoted on big payload?
[ok]: HINCRBY against non existing database key
[ok]: HINCRBY against non existing hash key
[ok]: HINCRBY against hash key created by hincrby itself
[ok]: HINCRBY against hash key originally set with HSET
[ok]: HINCRBY over 32bit value
[ok]: HINCRBY over 32bit value with over 32bit increment
[ok]: HINCRBY fails against hash value with spaces (left)
[ok]: HINCRBY fails against hash value with spaces (right)
[ok]: HINCRBY can detect overflows
[ok]: HINCRBYFLOAT against non existing database key
[ok]: HINCRBYFLOAT against non existing hash key
[ok]: HINCRBYFLOAT against hash key created by hincrby itself
[ok]: HINCRBYFLOAT against hash key originally set with HSET
[ok]: HINCRBYFLOAT over 32bit value
[ok]: HINCRBYFLOAT over 32bit value with over 32bit increment
[ok]: HINCRBYFLOAT fails against hash value with spaces (left)
[ok]: ZSCAN scores: regression test for issue #2175
[ok]: HINCRBYFLOAT fails against hash value with spaces (right)
[ok]: HINCRBYFLOAT fails against hash value that contains a null-terminator in the middle
[ok]: HSTRLEN against the small hash
[ok]: ZSET sorting stresser - ziplist
[ok]: HSTRLEN against the big hash
[ok]: HSTRLEN against non existing field
[ok]: HSTRLEN corner cases
[ok]: Hash ziplist regression test for large keys
[3/54 done]: unit/auth (1 seconds)
Testing unit/other
[ok]: Old Linked list: SORT BY key
[ok]: Old Linked list: SORT BY key with limit
[ok]: Hash fuzzing #1 - 10 fields
[ok]: Old Linked list: SORT BY hash field
[ok]: Hash fuzzing #2 - 10 fields
[ok]: XCLAIM can claim PEL items from another consumer
[ok]: SAVE - make sure there are all the types as values
[ok]: Regression for a crash with blocking ops and pipelining
[4/54 done]: unit/protocol (1 seconds)
Testing unit/multi
[ok]: BRPOPLPUSH with zero timeout should block indefinitely
[ok]: XCLAIM without JUSTID increments delivery count
[ok]: XINFO FULL output
[ok]: DEL against expired key
[ok]: EXISTS
[ok]: Zero length value in key. SET/GET/EXISTS
[ok]: Commands pipelining
[ok]: Non existing command
[ok]: RENAME basic usage
[ok]: RENAME source key should no longer exist
[ok]: RENAME against already existing key
[ok]: RENAMENX basic usage
[ok]: RENAMENX against already existing key
[ok]: RENAMENX against already existing key (2)
[ok]: RENAME against non existing source key
[ok]: RENAME where source and dest key are the same (existing)
[ok]: RENAMENX where source and dest key are the same (existing)
[ok]: RENAME where source and dest key are the same (non existing)
[ok]: RENAME with volatile key, should move the TTL as well
[ok]: RENAME with volatile key, should not inherit TTL of target key
[ok]: DEL all keys again (DB 0)
[ok]: DEL all keys again (DB 1)
[ok]: MOVE basic usage
[ok]: MOVE against key existing in the target DB
[ok]: MOVE against non-integer DB (#1428)
[ok]: MOVE can move key expire metadata as well
[ok]: MOVE does not create an expire if it does not exist
[ok]: SET/GET keys in different DBs
[ok]: RANDOMKEY
[ok]: RANDOMKEY against empty DB
[ok]: RANDOMKEY regression 1
[ok]: KEYS * two times with long key, Github issue #1208
[ok]: MUTLI / EXEC basics
[ok]: DISCARD
[ok]: Nested MULTI are not allowed
[ok]: MULTI where commands alter argc/argv
[ok]: WATCH inside MULTI is not allowed
[ok]: EXEC fails if there are errors while queueing commands #1
[ok]: EXEC fails if there are errors while queueing commands #2
[ok]: If EXEC aborts, the client MULTI state is cleared
[ok]: EXEC works on WATCHed key not modified
[ok]: EXEC fail on WATCHed key modified (1 key of 1 watched)
[ok]: EXEC fail on WATCHed key modified (1 key of 5 watched)
[ok]: EXEC fail on WATCHed key modified by SORT with STORE even if the result is empty
[ok]: After successful EXEC key is no longer watched
[ok]: After failed EXEC key is no longer watched
[ok]: It is possible to UNWATCH
[ok]: UNWATCH when there is nothing watched works as expected
[ok]: FLUSHALL is able to touch the watched keys
[ok]: FLUSHALL does not touch non affected keys
[ok]: FLUSHDB is able to touch the watched keys
[ok]: FLUSHDB does not touch non affected keys
[ok]: WATCH is able to remember the DB a key belongs to
[ok]: WATCH will consider touched keys target of EXPIRE
[5/54 done]: unit/keyspace (1 seconds)
Testing unit/quit
[ok]: FUZZ stresser with data model binary
[ok]: QUIT returns OK
[ok]: Pipelined commands after QUIT must not be executed
[ok]: Pipelined commands after QUIT that exceed read buffer size
[6/54 done]: unit/quit (1 seconds)
Testing unit/aofrw
[ok]: BRPOPLPUSH with a client BLPOPing the target list
[ok]: BRPOPLPUSH with wrong source type
[ok]: Consumer group last ID propagation to slave (NOACK=0)
[ok]: XADD mass insertion and XLEN
[ok]: XADD with ID 0-0
[ok]: XRANGE COUNT works as expected
[ok]: XREVRANGE COUNT works as expected
[ok]: WATCH will not consider touched expired keys
[ok]: DISCARD should clear the WATCH dirty flag on the client
[ok]: DISCARD should UNWATCH all the keys
[ok]: MULTI / EXEC is propagated correctly (single write command)
[ok]: FUZZ stresser with data model alpha
[ok]: MULTI / EXEC is propagated correctly (empty transaction)
[ok]: EXPIRE - After 2.1 seconds the key should no longer be here
[ok]: EXPIRE - write on expire should work
[ok]: EXPIREAT - Check for EXPIRE alike behavior
[ok]: SETEX - Set + Expire combo operation. Check for TTL
[ok]: SETEX - Check value
[ok]: SETEX - Overwrite old key
[ok]: MULTI / EXEC is propagated correctly (read-only commands)
[ok]: MULTI / EXEC is propagated correctly (write command, no effect)
[ok]: DISCARD should not fail during OOM
[ok]: XRANGE can be used to iterate the whole stream
[ok]: BRPOPLPUSH with wrong destination type
[ok]: BRPOPLPUSH maintains order of elements after failure
[ok]: BRPOPLPUSH with multiple blocked clients
[ok]: Linked BRPOPLPUSH
[ok]: Circular BRPOPLPUSH
[ok]: Self-referential BRPOPLPUSH
[ok]: BRPOPLPUSH inside a transaction
[ok]: PUSH resulting from BRPOPLPUSH affect WATCH
[ok]: BRPOPLPUSH does not affect WATCH while still blocked
[ok]: MULTI and script timeout
[ok]: Consumer group last ID propagation to slave (NOACK=1)
[ok]: EXEC and script timeout
[ok]: SETEX - Wait for the key to expire
[ok]: SETEX - Wrong time parameter
[ok]: PERSIST can undo an EXPIRE
[ok]: PERSIST returns 0 against non existing or non volatile keys
[ok]: MULTI-EXEC body and script timeout
[ok]: Hash fuzzing #1 - 512 fields
[ok]: Very big payload random access
[ok]: FUZZ stresser with data model compr
[ok]: just EXEC and script timeout
[ok]: exec with write commands and state change
[ok]: exec with read commands and stale replica state change
[7/54 done]: unit/multi (3 seconds)
Testing unit/acl
[ok]: Empty stream with no lastid can be rewrite into AOF correctly
[ok]: Connections start with the default user
[ok]: It is possible to create new users
[ok]: New users start disabled
[ok]: Enabling the user allows the login
[ok]: Only the set of correct passwords work
[ok]: It is possible to remove passwords from the set of valid ones
[ok]: Test password hashes can be added
[ok]: Test password hashes validate input
[ok]: ACL GETUSER returns the password hash instead of the actual password
[ok]: Test hashed passwords removal
[ok]: By default users are not able to access any command
[ok]: By default users are not able to access any key
[ok]: It's possible to allow the access of a subset of keys
[ok]: Users can be configured to authenticate with any password
[ok]: ACLs can exclude single commands
[ok]: ACLs can include or exclude whole classes of commands
[ok]: ACLs can include single subcommands
[ok]: ACL GETUSER is able to translate back command permissions
[ok]: ACL #5998 regression: memory leaks adding / removing subcommands
[ok]: ACL LOG shows failed command executions at toplevel
[ok]: ACL LOG is able to test similar events
[ok]: ACL LOG is able to log keys access violations and key name
[ok]: ACL LOG RESET is able to flush the entries in the log
[ok]: ACL LOG can distinguish the transaction context (1)
[ok]: ACL LOG can distinguish the transaction context (2)
[ok]: ACL can log errors in the context of Lua scripting
[ok]: ACL LOG can accept a numerical argument to show less entries
[ok]: ACL LOG can log failed auth attempts
[ok]: ACL LOG entries are limited to a maximum amount
[ok]: When default user is off, new connections are not authenticated
[8/54 done]: unit/acl (1 seconds)
Testing integration/block-repl
[ok]: Old Big Linked list: SORT BY key
[ok]: Old Big Linked list: SORT BY key with limit
[ok]: BRPOPLPUSH timeout
[ok]: BLPOP when new key is moved into place
[ok]: BLPOP when result key is created by SORT..STORE
[ok]: BLPOP: with single empty list argument
[ok]: BLPOP: with negative timeout
[ok]: BLPOP: with non-integer timeout
[ok]: BGSAVE
[ok]: SELECT an out of range DB
[9/54 done]: unit/type/stream-cgroups (5 seconds)
Testing integration/replication
[ok]: ZRANGEBYSCORE fuzzy test, 100 ranges in 128 element sorted set - ziplist
[ok]: EXPIRE pricision is now the millisecond
[ok]: Old Big Linked list: SORT BY hash field
[ok]: Intset: SORT BY key
[ok]: Intset: SORT BY key with limit
[ok]: Intset: SORT BY hash field
[ok]: ZRANGEBYLEX fuzzy test, 100 ranges in 128 element sorted set - ziplist
[ok]: Hash fuzzing #2 - 512 fields
[ok]: BLPOP: with zero timeout should block indefinitely
[ok]: BLPOP: second argument is not a list
[ok]: Hash table: SORT BY key
[ok]: Hash table: SORT BY key with limit
[ok]: Hash table: SORT BY hash field
[ok]: SET 10000 numeric keys and access all them in reverse order
[ok]: DBSIZE should be 10000 now
[ok]: SETNX target key missing
[ok]: SETNX target key exists
[ok]: SETNX against not-expired volatile key
[ok]: Slave enters handshake
[ok]: First server should have role slave after SLAVEOF
[ok]: ZREMRANGEBYLEX fuzzy test, 100 ranges in 128 element sorted set - ziplist
[ok]: ZSETs skiplist implementation backlink consistency test - ziplist
[ok]: BLPOP: timeout
[ok]: BLPOP: arguments are empty
[ok]: BRPOP: with single empty list argument
[ok]: BRPOP: with negative timeout
[ok]: BRPOP: with non-integer timeout
[ok]: Check consistency of different data types after a reload
[ok]: PEXPIRE/PSETEX/PEXPIREAT can set sub-second expires
[ok]: TTL returns time to live in seconds
[ok]: PTTL returns time to live in milliseconds
[ok]: TTL / PTTL return -1 if key has no expire
[ok]: TTL / PTTL return -2 if key does not exit
[ok]: XREVRANGE returns the reverse of XRANGE
[ok]: XREAD with non empty stream
[ok]: Non blocking XREAD with empty streams
[ok]: XREAD with non empty second stream
[ok]: Blocking XREAD waiting new data
[ok]: Blocking XREAD waiting old data
[ok]: Blocking XREAD will not reply with an empty array
[ok]: XREAD: XADD + DEL should not awake client
[ok]: XREAD: XADD + DEL + LPUSH should not awake client
[ok]: XREAD with same stream name multiple times should work
[ok]: XREAD + multiple XADD inside transaction
[ok]: XDEL basic test
[ok]: BRPOP: with zero timeout should block indefinitely
[ok]: BRPOP: second argument is not a list
[ok]: SDIFF fuzzing
[ok]: SINTER against non-set should throw error
[ok]: SUNION against non-set should throw error
[ok]: SINTER should handle non existing key as empty
[ok]: SINTER with same integer elements but different encoding
[ok]: SINTERSTORE against non existing keys should delete dstkey
[ok]: SUNIONSTORE against non existing keys should delete dstkey
[ok]: SPOP basics - hashtable
[ok]: SPOP with =1 - hashtable
[ok]: SRANDMEMBER - hashtable
[ok]: SPOP basics - intset
[ok]: SPOP with =1 - intset
[ok]: SRANDMEMBER - intset
[ok]: SPOP with 
[ok]: SPOP with 
[ok]: SPOP using integers, testing Knuth's and Floyd's algorithm
[ok]: SPOP using integers with Knuth's algorithm
[ok]: SPOP new implementation: code path #1
[ok]: SPOP new implementation: code path #2
[ok]: SPOP new implementation: code path #3
[ok]: SRANDMEMBER with  against non existing key
[ok]: ZSETs ZRANK augmented skip list stress testing - ziplist
[ok]: SRANDMEMBER with  - hashtable
[ok]: BZPOPMIN, ZADD + DEL should not awake blocked client
[ok]: BZPOPMIN, ZADD + DEL + SET should not awake blocked client
[ok]: BZPOPMIN with same key multiple times should work
[ok]: MULTI/EXEC is isolated from the point of view of BZPOPMIN
[ok]: BZPOPMIN with variadic ZADD
[ok]: SRANDMEMBER with  - intset
[ok]: SMOVE basics - from regular set to intset
[ok]: SMOVE basics - from intset to regular set
[ok]: SMOVE non existing key
[ok]: SMOVE non existing src set
[ok]: SMOVE from regular set to non existing destination set
[ok]: Same dataset digest if saving/reloading as AOF?
[ok]: SMOVE from intset to non existing destination set
[ok]: SMOVE wrong src key type
[ok]: SMOVE wrong dst key type
[ok]: SMOVE with identical source and destination
[ok]: Redis should actively expire keys incrementally
[ok]: Stress test the hash ziplist -> hashtable encoding conversion
[ok]: Test HINCRBYFLOAT for correct float representation (issue #2846)
[ok]: BRPOP: timeout
[ok]: BRPOP: arguments are empty
[ok]: BLPOP inside a transaction
[ok]: LPUSHX, RPUSHX - generic
[ok]: LPUSHX, RPUSHX - linkedlist
[ok]: LINSERT - linkedlist
[ok]: LPUSHX, RPUSHX - ziplist
[ok]: LINSERT - ziplist
[ok]: LINSERT raise error on bad syntax
[10/54 done]: unit/type/hash (9 seconds)
Testing integration/replication-2
[ok]: LINDEX consistency test - quicklist
[ok]: BZPOPMIN with zero timeout should block indefinitely
[ok]: LINDEX random access - quicklist
[ok]: ZSCORE - skiplist
[ok]: EXPIRES after a reload (snapshot + append only file rewrite)
[ok]: ZSCORE after a DEBUG RELOAD - skiplist
[ok]: ZSET sorting stresser - skiplist
[ok]: Redis should lazy expire keys
[ok]: Big Hash table: SORT BY key
[ok]: Check if list is still ok after a DEBUG RELOAD - quicklist
[ok]: Big Hash table: SORT BY key with limit
[ok]: SETNX against expired volatile key
[ok]: MGET
[ok]: MGET against non existing key
[ok]: MGET against non-string key
[ok]: GETSET (set new value)
[ok]: GETSET (replace old value)
[ok]: MSET base case
[ok]: MSET wrong number of args
[ok]: MSETNX with already existent key
[ok]: MSETNX with not existing keys
[ok]: STRLEN against non-existing key
[ok]: STRLEN against integer-encoded value
[ok]: STRLEN against plain string
[ok]: SETBIT against non-existing key
[ok]: SETBIT against string-encoded key
[ok]: SETBIT against integer-encoded key
[ok]: SETBIT against key with wrong type
[ok]: SETBIT with out of range bit offset
[ok]: SETBIT with non-bit argument
[ok]: LINDEX consistency test - quicklist
[ok]: LINDEX random access - quicklist
[ok]: Check if list is still ok after a DEBUG RELOAD - quicklist
[ok]: LLEN against non-list value error
[ok]: LLEN against non existing key
[ok]: LINDEX against non-list value error
[ok]: LINDEX against non existing key
[ok]: LPUSH against non-list value error
[ok]: RPUSH against non-list value error
[ok]: RPOPLPUSH base case - linkedlist
[ok]: RPOPLPUSH with the same list as src and dst - linkedlist
[ok]: RPOPLPUSH with linkedlist source and existing target linkedlist
[ok]: RPOPLPUSH with linkedlist source and existing target ziplist
[ok]: RPOPLPUSH base case - ziplist
[ok]: RPOPLPUSH with the same list as src and dst - ziplist
[ok]: RPOPLPUSH with ziplist source and existing target linkedlist
[ok]: RPOPLPUSH with ziplist source and existing target ziplist
[ok]: RPOPLPUSH against non existing key
[ok]: RPOPLPUSH against non list src key
[ok]: RPOPLPUSH against non list dst key
[ok]: RPOPLPUSH against non existing src key
[ok]: Basic LPOP/RPOP - linkedlist
[ok]: Basic LPOP/RPOP - ziplist
[ok]: LPOP/RPOP against non list value
[ok]: Mass RPOP/LPOP - quicklist
[ok]: Mass RPOP/LPOP - quicklist
[ok]: LRANGE basics - linkedlist
[ok]: LRANGE inverted indexes - linkedlist
[ok]: LRANGE out of range indexes including the full list - linkedlist
[ok]: LRANGE out of range negative end index - linkedlist
[ok]: LRANGE basics - ziplist
[ok]: LRANGE inverted indexes - ziplist
[ok]: LRANGE out of range indexes including the full list - ziplist
[ok]: LRANGE out of range negative end index - ziplist
[ok]: LRANGE against non existing key
[ok]: LTRIM basics - linkedlist
[ok]: LTRIM out of range negative end index - linkedlist
[ok]: LTRIM basics - ziplist
[ok]: LTRIM out of range negative end index - ziplist
[ok]: LSET - linkedlist
[ok]: LSET out of range index - linkedlist
[ok]: LSET - ziplist
[ok]: LSET out of range index - ziplist
[ok]: LSET against non existing key
[ok]: LSET against non list value
[ok]: LREM remove all the occurrences - linkedlist
[ok]: LREM remove the first occurrence - linkedlist
[ok]: LREM remove non existing element - linkedlist
[ok]: LREM starting from tail with negative count - linkedlist
[ok]: LREM starting from tail with negative count (2) - linkedlist
[ok]: LREM deleting objects that may be int encoded - linkedlist
[ok]: LREM remove all the occurrences - ziplist
[ok]: LREM remove the first occurrence - ziplist
[ok]: LREM remove non existing element - ziplist
[ok]: LREM starting from tail with negative count - ziplist
[ok]: LREM starting from tail with negative count (2) - ziplist
[ok]: LREM deleting objects that may be int encoded - ziplist
[ok]: SCAN regression test for issue #4906
[ok]: Big Hash table: SORT BY hash field
[ok]: SORT GET #
[ok]: SORT GET 
[ok]: SORT GET (key and hash) with sanity check
[ok]: SORT BY key STORE
[ok]: SORT BY hash field STORE
[ok]: SORT extracts STORE correctly
[ok]: SORT extracts multiple STORE correctly
[ok]: SORT DESC
[ok]: SORT ALPHA against integer encoded strings
[ok]: SORT sorted set
[ok]: SORT sorted set BY nosort should retain ordering
[ok]: SORT sorted set BY nosort + LIMIT
[ok]: SORT sorted set BY nosort works as expected from scripts
[ok]: SORT sorted set: +inf and -inf handling
[ok]: SORT regression for issue #19, sorting floats
[ok]: SORT with STORE returns zero if result is empty (github issue 224)
[ok]: SORT with STORE does not create empty lists (github issue 224)
[ok]: SORT with STORE removes key if result is empty (github issue 227)
[ok]: SORT with BY  and STORE should still order output
[ok]: SORT will complain with numerical sorting and bad doubles (1)
[ok]: SORT will complain with numerical sorting and bad doubles (2)
[ok]: SORT BY sub-sorts lexicographically if score is the same
[ok]: SORT GET with pattern ending with just -> does not get hash field
[ok]: SORT by nosort retains native order for lists
[ok]: SORT by nosort plus store retains native order for lists
[ok]: SORT by nosort with limit returns based on original list order
[ok]: SETBIT fuzzing
[ok]: GETBIT against non-existing key
[ok]: GETBIT against string-encoded key
[ok]: GETBIT against integer-encoded key
[ok]: SETRANGE against non-existing key
[ok]: SETRANGE against string-encoded key
[ok]: SETRANGE against integer-encoded key
[ok]: SETRANGE against key with wrong type
[ok]: SETRANGE with out of range offset
[ok]: GETRANGE against non-existing key
[ok]: GETRANGE against string value
[ok]: GETRANGE against integer-encoded value
[ok]: XDEL fuzz test
[11/54 done]: unit/scan (10 seconds)
Testing integration/replication-3
[ok]: SORT speed, 100 element list BY key, 100 times
[ok]: intsets implementation stress testing
[ok]: SORT speed, 100 element list BY hash field, 100 times
[ok]: SORT speed, 100 element list directly, 100 times
[ok]: SORT speed, 100 element list BY , 100 times
[ok]: First server should have role slave after SLAVEOF
[ok]: If min-slaves-to-write is honored, write is accepted
[ok]: No write if min-slaves-to-write is < attached slaves
[ok]: If min-slaves-to-write is honored, write is accepted (again)
[12/54 done]: unit/type/set (10 seconds)
Testing integration/replication-4
[ok]: EXPIRE should not resurrect keys (issue #1026)
[ok]: 5 keys in, 5 keys out
[ok]: EXPIRE with empty string as TTL should report an error
[13/54 done]: unit/sort (10 seconds)
Testing integration/replication-psync
[ok]: Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds
[14/54 done]: unit/type/list (11 seconds)
Testing integration/aof
[ok]: First server should have role slave after SLAVEOF
[ok]: ZRANGEBYSCORE fuzzy test, 100 ranges in 100 element sorted set - skiplist
[ok]: LTRIM stress testing - linkedlist
[ok]: Unfinished MULTI: Server should start if load-truncated is yes
[ok]: ZRANGEBYLEX fuzzy test, 100 ranges in 100 element sorted set - skiplist
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Stress tester for #3343-alike bugs
[ok]: First server should have role slave after SLAVEOF
[ok]: GETRANGE fuzzing
[ok]: Extended SET can detect syntax errors
[ok]: Extended SET NX option
[ok]: Extended SET XX option
[ok]: Extended SET EX option
[ok]: Extended SET PX option
[ok]: Extended SET using multiple options at once
[ok]: GETRANGE with huge ranges, Github issue #1844
[ok]: STRALGO LCS string output with STRINGS option
[ok]: STRALGO LCS len
[ok]: LCS with KEYS option
[ok]: LCS indexes
[ok]: LCS indexes with match len
[ok]: LCS indexes with match len and minimum match len
[ok]: Test replication partial resync: no reconnection, just sync (diskless: no, disabled, reconnect: 0)
[ok]: Short read: Server should start if load-truncated is yes
[ok]: Truncated AOF loaded: we expect foo to be equal to 5
[ok]: Append a new command after loading an incomplete AOF
[ok]: ZREMRANGEBYLEX fuzzy test, 100 ranges in 100 element sorted set - skiplist
[15/54 done]: unit/type/string (12 seconds)
Testing integration/rdb
[ok]: ZSETs skiplist implementation backlink consistency test - skiplist
[ok]: EXPIRES after AOF reload (without rewrite)
[ok]: Short read + command: Server should start
[ok]: Truncated AOF loaded: we expect foo to be equal to 6 now
[ok]: RDB encoding loading test
[ok]: Bad format: Server should have logged an error
[ok]: Server started empty with non-existing RDB file
[ok]: Unfinished MULTI: Server should have logged an error
[ok]: ZSETs ZRANK augmented skip list stress testing - skiplist
[ok]: BZPOPMIN, ZADD + DEL should not awake blocked client
[ok]: BZPOPMIN, ZADD + DEL + SET should not awake blocked client
[ok]: BZPOPMIN with same key multiple times should work
[ok]: MULTI/EXEC is isolated from the point of view of BZPOPMIN
[ok]: BZPOPMIN with variadic ZADD
[ok]: Server started empty with empty RDB file
[ok]: Short read: Server should have logged an error
[ok]: Short read: Utility should confirm the AOF is not valid
[ok]: Short read: Utility should be able to fix the AOF
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Fixed AOF: Server should have been started
[ok]: Fixed AOF: Keyspace should contain values that were parseable
[ok]: Test RDB stream encoding
[ok]: AOF+SPOP: Server should have been started
[ok]: AOF+SPOP: Set should have 1 member
[ok]: SET - use EX/PX option, TTL should not be reseted after loadaof
[ok]: SET command will remove expire
[ok]: SET - use KEEPTTL option, TTL should not be removed
[ok]: BZPOPMIN with zero timeout should block indefinitely
[ok]: Server should not start if RDB file can't be open
[ok]: Server should not start if RDB is corrupted
[ok]: PIPELINING stresser (also a regression for the old epoll bug)
[ok]: APPEND basics
[ok]: APPEND basics, integer encoded values
[ok]: AOF+SPOP: Server should have been started
[ok]: AOF+SPOP: Set should have 1 member
[ok]: ziplist implementation: value encoding and backlink
[ok]: Test FLUSHALL aborts bgsave
[ok]: MIGRATE cached connections are released after some time
[ok]: AOF+EXPIRE: Server should have been started
[ok]: AOF+EXPIRE: List should be empty
[ok]: APPEND fuzzing
[ok]: MIGRATE is able to migrate a key between two instances
[ok]: FLUSHDB
[ok]: Perform a final SAVE to leave a clean DB on disk
[ok]: Redis should not try to convert DEL into EXPIREAT for EXPIRE -1
[16/54 done]: unit/other (15 seconds)
Testing integration/convert-zipmap-hash-on-load
[ok]: RDB load zipmap hash: converts to ziplist
[ok]: MIGRATE is able to copy a key between two instances
[ok]: ZSET skiplist order consistency when elements are moved
[ok]: No write if min-slaves-max-lag is > of the slave lag
[ok]: min-slaves-to-write is ignored by slaves
[ok]: SET - use KEEPTTL option, TTL should not be removed after loadaof
[17/54 done]: unit/type/zset (16 seconds)
Testing integration/logging
[ok]: Slave is able to detect timeout during handshake
[ok]: RDB load zipmap hash: converts to hash table when hash-max-ziplist-entries is exceeded
[18/54 done]: unit/expire (17 seconds)
Testing integration/psync2
[ok]: MIGRATE will not overwrite existing keys, unless REPLACE is used
[ok]: Server is able to generate a stack trace on selected systems
[ok]: RDB load zipmap hash: converts to hash table when hash-max-ziplist-value is exceeded
[19/54 done]: integration/logging (1 seconds)
Testing integration/psync2-reg
[20/54 done]: integration/convert-zipmap-hash-on-load (1 seconds)
Testing integration/psync2-pingoff
[ok]: MIGRATE propagates TTL correctly
[ok]: PSYNC2: --- CYCLE 1 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #1 as master
[ok]: PSYNC2: Set #3 to replicate from #1
[ok]: PSYNC2: Set #0 to replicate from #3
[ok]: PSYNC2: Set #2 to replicate from #1
[ok]: PSYNC2: Set #4 to replicate from #1
[ok]: Set instance A as slave of B
[ok]: Test replication with parallel clients writing in differnet DBs
[ok]: PSYNC2 pingoff: setup
[ok]: PSYNC2 pingoff: write and wait replication
[ok]: PSYNC2 #3899 regression: setup
[ok]: INCRBYFLOAT replication, should not remove expire
[ok]: BRPOPLPUSH replication, when blocking against empty list
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: PSYNC2: cluster is consistent after failover
[ok]: First server should have role slave after SLAVEOF
[ok]: With min-slaves-to-write (1,3): master should be writable
[ok]: With min-slaves-to-write (2,3): master should not be writable
[ok]: BRPOPLPUSH replication, list exists
[ok]: BLPOP followed by role change, issue #2473
[ok]: client freed during loading
[21/54 done]: integration/rdb (9 seconds)
Testing unit/pubsub
[ok]: Pub/Sub PING
[ok]: PUBLISH/SUBSCRIBE basics
[ok]: PUBLISH/SUBSCRIBE with two clients
[ok]: PUBLISH/SUBSCRIBE after UNSUBSCRIBE without arguments
[ok]: SUBSCRIBE to one channel more than once
[ok]: UNSUBSCRIBE from non-subscribed channels
[ok]: PUBLISH/PSUBSCRIBE basics
[ok]: PUBLISH/PSUBSCRIBE with two clients
[ok]: PUBLISH/PSUBSCRIBE after PUNSUBSCRIBE without arguments
[ok]: PUNSUBSCRIBE from non-subscribed channels
[ok]: NUMSUB returns numbers, not strings (#1561)
[ok]: Mix SUBSCRIBE and PSUBSCRIBE
[ok]: PUNSUBSCRIBE and UNSUBSCRIBE should always reply
[ok]: Keyspace notifications: we receive keyspace notifications
[ok]: Keyspace notifications: we receive keyevent notifications
[ok]: Keyspace notifications: we can receive both kind of events
[ok]: Keyspace notifications: we are able to mask events
[ok]: Keyspace notifications: general events test
[ok]: Keyspace notifications: list events test
[ok]: Keyspace notifications: set events test
[ok]: Keyspace notifications: zset events test
[ok]: Keyspace notifications: hash events test
[ok]: Keyspace notifications: expired events (triggered expire)
[ok]: Second server should have role master at first
[ok]: SLAVEOF should start with link status "down"
[ok]: The role should immediately be changed to "replica"
[ok]: Test replication partial resync: ok psync (diskless: no, disabled, reconnect: 1)
[ok]: Keyspace notifications: expired events (background expire)
[ok]: Keyspace notifications: evicted events
[ok]: Keyspace notifications: test CONFIG GET/SET of event flags
[22/54 done]: unit/pubsub (1 seconds)
Testing unit/slowlog
[ok]: AOF fsync always barrier issue
[ok]: SLOWLOG - check that it starts with an empty log
[23/54 done]: integration/aof (11 seconds)
Testing unit/scripting
[ok]: SLOWLOG - only logs commands taking more time than specified
[ok]: SLOWLOG - max entries is correctly handled
[ok]: SLOWLOG - GET optional argument to limit output len works
[ok]: SLOWLOG - RESET subcommand works
[ok]: Sync should have transferred keys from master
[ok]: The link status should be up
[ok]: SET on the master should immediately propagate
[ok]: FLUSHALL should replicate
[ok]: ROLE in master reports master with a slave
[ok]: ROLE in slave reports slave in connected state
[ok]: EVAL - Does Lua interpreter replies to our requests?
[ok]: EVAL - Lua integer -> Redis protocol type conversion
[ok]: EVAL - Lua string -> Redis protocol type conversion
[ok]: EVAL - Lua true boolean -> Redis protocol type conversion
[ok]: EVAL - Lua false boolean -> Redis protocol type conversion
[ok]: EVAL - Lua status code reply -> Redis protocol type conversion
[ok]: EVAL - Lua error reply -> Redis protocol type conversion
[ok]: EVAL - Lua table -> Redis protocol type conversion
[ok]: EVAL - Are the KEYS and ARGV arrays populated correctly?
[ok]: EVAL - is Lua able to call Redis API?
[ok]: EVALSHA - Can we call a SHA1 if already defined?
[ok]: EVALSHA - Can we call a SHA1 in uppercase?
[ok]: EVALSHA - Do we get an error on invalid SHA1?
[ok]: EVALSHA - Do we get an error on non defined SHA1?
[ok]: EVAL - Redis integer -> Lua type conversion
[ok]: EVAL - Redis bulk -> Lua type conversion
[ok]: EVAL - Redis multi bulk -> Lua type conversion
[ok]: EVAL - Redis status reply -> Lua type conversion
[ok]: EVAL - Redis error reply -> Lua type conversion
[ok]: EVAL - Redis nil bulk reply -> Lua type conversion
[ok]: EVAL - Is the Lua client using the currently selected DB?
[ok]: EVAL - SELECT inside Lua should not affect the caller
[ok]: EVAL - Scripts can't run certain commands
[ok]: EVAL - Scripts can't run XREAD and XREADGROUP with BLOCK option
[ok]: EVAL - Scripts can't run certain commands
[ok]: EVAL - No arguments to redis.call/pcall is considered an error
[ok]: EVAL - redis.call variant raises a Lua error on Redis cmd error (1)
[ok]: EVAL - redis.call variant raises a Lua error on Redis cmd error (1)
[ok]: EVAL - redis.call variant raises a Lua error on Redis cmd error (1)
[ok]: EVAL - JSON numeric decoding
[ok]: EVAL - JSON string decoding
[ok]: EVAL - cmsgpack can pack double?
[ok]: EVAL - cmsgpack can pack negative int64?
[ok]: EVAL - cmsgpack can pack and unpack circular references?
[ok]: EVAL - Numerical sanity check from bitop
[ok]: EVAL - Verify minimal bitop functionality
[ok]: EVAL - Able to parse trailing comments
[ok]: SCRIPTING FLUSH - is able to clear the scripts cache?
[ok]: SCRIPT EXISTS - can detect already defined scripts?
[ok]: SCRIPT LOAD - is able to register scripts in the scripting cache
[ok]: In the context of Lua the output of random commands gets ordered
[ok]: SORT is normally not alpha re-ordered for the scripting engine
[ok]: SORT BY  output gets ordered for scripting
[ok]: SORT BY  with GET gets ordered for scripting
[ok]: redis.sha1hex() implementation
[ok]: Globals protection reading an undeclared global variable
[ok]: Globals protection setting an undeclared global*
[ok]: Test an example script DECR_IF_GT
[ok]: Scripting engine resets PRNG at every script execution
[ok]: Scripting engine PRNG can be seeded correctly
[ok]: SLOWLOG - logged entry sanity check
[ok]: SLOWLOG - commands with too many arguments are trimmed
[ok]: SLOWLOG - too long arguments are trimmed
[ok]: MASTER and SLAVE consistency with expire
[ok]: SLOWLOG - EXEC is not logged, just executed commands
[ok]: AOF rewrite during write load: RDB preamble=yes
[ok]: SLOWLOG - can clean older entires
[ok]: SLOWLOG - can be disabled
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[24/54 done]: unit/slowlog (1 seconds)
Testing unit/maxmemory
[ok]: EVAL does not leak in the Lua stack
[ok]: PSYNC2 pingoff: pause replica and promote it
[ok]: Without maxmemory small integers are shared
[ok]: With maxmemory and non-LRU policy integers are still shared
[ok]: With maxmemory and LRU policy integers are not shared
[ok]: EVAL processes writes from AOF in read-only slaves
[ok]: We can call scripts rewriting client->argv from Lua
[ok]: Call Redis command with many args from Lua (issue #1764)
[ok]: Number conversion precision test (issue #1118)
[ok]: String containing number precision test (regression of issue #1118)
[ok]: Verify negative arg count is error instead of crash (issue #1842)
[ok]: Correct handling of reused argv (issue #1939)
[ok]: Functions in the Redis namespace are able to report errors
[ok]: With min-slaves-to-write: master not writable with lagged slave
[ok]: maxmemory - is the memory limit honoured? (policy allkeys-random)
[ok]: Make the old master a replica of the new one and check conditions
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 24484)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 2 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #0 as master
[ok]: PSYNC2: Set #1 to replicate from #0
[ok]: PSYNC2: Set #3 to replicate from #1
[ok]: PSYNC2: Set #4 to replicate from #1
[ok]: PSYNC2: Set #2 to replicate from #1
[ok]: Timedout read-only scripts can be killed by SCRIPT KILL
[ok]: First server should have role slave after SLAVEOF
[ok]: Timedout script link is still usable after Lua returns
[ok]: Timedout scripts that modified data can't be killed by SCRIPT KILL
[ok]: SHUTDOWN NOSAVE can kill a timedout script anyway
[ok]: maxmemory - is the memory limit honoured? (policy allkeys-lru)
[ok]: Before the replica connects we issue two EVAL commands (scripts replication)
[ok]: maxmemory - is the memory limit honoured? (policy allkeys-lfu)
[ok]: PSYNC2: cluster is consistent after failover
[ok]: Connect a replica to the master instance (scripts replication)
[ok]: Now use EVALSHA against the master, with both SHAs (scripts replication)
[ok]: If EVALSHA was replicated as EVAL, 'x' should be '4' (scripts replication)
[ok]: Replication of script multiple pushes to list with BLPOP (scripts replication)
[ok]: EVALSHA replication when first call is readonly (scripts replication)
[ok]: Lua scripts using SELECT are replicated correctly (scripts replication)
[ok]: maxmemory - is the memory limit honoured? (policy volatile-lru)
[ok]: Before the replica connects we issue two EVAL commands (commands replication)
[ok]: Slave is able to evict keys created in writable slaves
[ok]: Connect a replica to the master instance (commands replication)
[ok]: Now use EVALSHA against the master, with both SHAs (commands replication)
[ok]: If EVALSHA was replicated as EVAL, 'x' should be '4' (commands replication)
[ok]: Replication of script multiple pushes to list with BLPOP (commands replication)
[ok]: EVALSHA replication when first call is readonly (commands replication)
[ok]: Lua scripts using SELECT are replicated correctly (commands replication)
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: LTRIM stress testing - ziplist
[ok]: maxmemory - is the memory limit honoured? (policy volatile-lfu)
[25/54 done]: unit/type/list-2 (29 seconds)
Testing unit/introspection
[ok]: CLIENT LIST
[ok]: MONITOR can log executed commands
[ok]: MONITOR can log commands issued by the scripting engine
[ok]: CLIENT GETNAME should return NIL if name is not assigned
[ok]: CLIENT LIST shows empty fields for unassigned names
[ok]: CLIENT SETNAME does not accept spaces
[ok]: CLIENT SETNAME can assign a name to this connection
[ok]: CLIENT SETNAME can change the name of an existing connection
[ok]: After CLIENT SETNAME, connection can still be closed
[ok]: CONFIG sanity
[26/54 done]: unit/introspection (1 seconds)
Testing unit/introspection-2
[ok]: MIGRATE can correctly transfer large values
[ok]: First server should have role slave after SLAVEOF
[ok]: maxmemory - is the memory limit honoured? (policy volatile-random)
[ok]: MIGRATE can correctly transfer hashes
[ok]: Connect a replica to the master instance
[ok]: Redis.replicate_commands() must be issued before any write
[ok]: Redis.replicate_commands() must be issued before any write (2)
[ok]: Redis.set_repl() must be issued after replicate_commands()
[ok]: Redis.set_repl() don't accept invalid values
[ok]: Test selective replication of certain Redis commands from Lua
[ok]: PRNG is seeded randomly for command replication
[ok]: Using side effects is not a problem with command replication
[ok]: maxmemory - is the memory limit honoured? (policy volatile-ttl)
[ok]: PSYNC2 #3899 regression: kill first replica
[ok]: Test replication with blocking lists and sorted sets operations
[ok]: MIGRATE timeout actually works
[27/54 done]: unit/scripting (10 seconds)
Testing unit/limits
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 46479)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 3 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #2 as master
[ok]: PSYNC2: Set #4 to replicate from #2
[ok]: PSYNC2: Set #0 to replicate from #4
[ok]: PSYNC2: Set #1 to replicate from #4
[ok]: PSYNC2: Set #3 to replicate from #2
[ok]: Test replication partial resync: no backlog (diskless: no, disabled, reconnect: 1)
[28/54 done]: integration/block-repl (27 seconds)
Testing unit/obuf-limits
[ok]: MASTER and SLAVE dataset should be identical after complex ops
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: MIGRATE can migrate multiple keys at once
[ok]: MIGRATE with multiple keys must have empty key arg
[ok]: maxmemory - only allkeys-* should remove non-volatile keys (allkeys-random)
[29/54 done]: integration/replication-2 (24 seconds)
Testing unit/bitops
[ok]: BITCOUNT returns 0 against non existing key
[ok]: BITCOUNT returns 0 with out of range indexes
[ok]: BITCOUNT returns 0 with negative indexes where start > end
[ok]: BITCOUNT against test vector #1
[ok]: BITCOUNT against test vector #2
[ok]: BITCOUNT against test vector #3
[ok]: BITCOUNT against test vector #4
[ok]: BITCOUNT against test vector #5
[ok]: MIGRATE with multiple keys migrate just existing ones
[ok]: PSYNC2 #3899 regression: kill first replica
[ok]: Check if maxclients works refusing connections
[30/54 done]: unit/limits (1 seconds)
Testing unit/bitfield
[ok]: MIGRATE with multiple keys: stress command rewriting
[ok]: BITCOUNT fuzzing without start/end
[ok]: TTL and TYPYE do not alter the last access time of a key
[ok]: maxmemory - only allkeys-* should remove non-volatile keys (allkeys-lru)
[ok]: test various edge cases of repl topology changes with missing pings at the end
[ok]: BITFIELD signed SET and GET basics
[ok]: BITFIELD unsigned SET and GET basics
[ok]: BITFIELD # form
[ok]: BITFIELD basic INCRBY form
[ok]: BITFIELD chaining of multiple commands
[ok]: BITFIELD unsigned overflow wrap
[ok]: BITFIELD unsigned overflow sat
[ok]: BITFIELD signed overflow wrap
[ok]: BITFIELD signed overflow sat
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: MIGRATE with multiple keys: delete just ack keys
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: BITFIELD overflow detection fuzzing
[ok]: MIGRATE AUTH: correct and wrong password cases
[ok]: BITCOUNT fuzzing with start/end
[ok]: BITCOUNT with start, end
[ok]: BITCOUNT syntax error #1
[ok]: BITCOUNT regression test for github issue #582
[ok]: BITCOUNT misaligned prefix
[ok]: BITCOUNT misaligned prefix + full words + remainder
[ok]: BITOP NOT (empty string)
[ok]: BITOP NOT (known string)
[ok]: BITOP where dest and target are the same key
[ok]: BITOP AND|OR|XOR don't change the string with single input key
[ok]: BITOP missing key is considered a stream of zero
[ok]: BITOP shorter keys are zero-padded to the key with max length
[ok]: maxmemory - only allkeys-* should remove non-volatile keys (volatile-lru)
[31/54 done]: unit/dump (34 seconds)
Testing unit/geo
[ok]: PSYNC2: cluster is consistent after failover
[ok]: GEOADD create
[ok]: GEOADD update
[ok]: GEOADD invalid coordinates
[ok]: GEOADD multi add
[ok]: Check geoset values
[ok]: GEORADIUS simple (sorted)
[ok]: GEORADIUS withdist (sorted)
[ok]: GEORADIUS with COUNT
[ok]: GEORADIUS with COUNT but missing integer argument
[ok]: GEORADIUS with COUNT DESC
[ok]: GEORADIUS HUGE, issue #2767
[ok]: GEORADIUSBYMEMBER simple (sorted)
[ok]: GEORADIUSBYMEMBER withdist (sorted)
[ok]: GEOHASH is able to return geohash strings
[ok]: GEOPOS simple
[ok]: GEOPOS missing element
[ok]: GEODIST simple & unit
[ok]: GEODIST missing elements
[ok]: GEORADIUS STORE option: syntax error
[ok]: GEORANGE STORE option: incompatible options
[ok]: GEORANGE STORE option: plain usage
[ok]: GEORANGE STOREDIST option: plain usage
[ok]: GEORANGE STOREDIST option: COUNT ASC and DESC
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: BITFIELD overflow wrap fuzzing
[ok]: BITFIELD regression for #3221
[ok]: BITFIELD regression for #3564
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: maxmemory - only allkeys-* should remove non-volatile keys (volatile-random)
[ok]: BITOP and fuzzing
[ok]: ziplist implementation: encoding stress testing
[ok]: TOUCH alters the last access time of a key
[ok]: TOUCH returns the number of existing keys specified
[ok]: command stats for GEOADD
[ok]: command stats for EXPIRE
[ok]: command stats for BRPOP
[ok]: command stats for MULTI
[ok]: command stats for scripts
[ok]: maxmemory - only allkeys-* should remove non-volatile keys (volatile-ttl)
[32/54 done]: unit/type/list-3 (37 seconds)
Testing unit/memefficiency
[33/54 done]: unit/introspection-2 (7 seconds)
Testing unit/hyperloglog
[ok]: BITFIELD: setup slave
[ok]: BITFIELD: write on master, read on slave
[ok]: BITFIELD_RO fails when write option is used
[ok]: PSYNC2 #3899 regression: kill first replica
[ok]: BITOP or fuzzing
[34/54 done]: unit/bitfield (4 seconds)
Testing unit/lazyfree
[ok]: maxmemory - policy volatile-lru should only remove volatile keys.
[ok]: UNLINK can reclaim memory in background
[ok]: maxmemory - policy volatile-lfu should only remove volatile keys.
[ok]: Memory efficiency with values in range 32
[ok]: PSYNC2 #3899 regression: kill chained replica
[ok]: BITOP xor fuzzing
[ok]: BITOP NOT fuzzing
[ok]: BITOP with integer encoded source objects
[ok]: BITOP with non string source key
[ok]: BITOP with empty string after non empty string (issue #529)
[ok]: BITPOS bit=0 with empty key returns 0
[ok]: BITPOS bit=1 with empty key returns -1
[ok]: BITPOS bit=0 with string less than 1 word works
[ok]: BITPOS bit=1 with string less than 1 word works
[ok]: BITPOS bit=0 starting at unaligned address
[ok]: BITPOS bit=1 starting at unaligned address
[ok]: BITPOS bit=0 unaligned+full word+reminder
[ok]: BITPOS bit=1 unaligned+full word+reminder
[ok]: BITPOS bit=1 returns -1 if string is all 0 bits
[ok]: BITPOS bit=0 works with intervals
[ok]: BITPOS bit=1 works with intervals
[ok]: BITPOS bit=0 changes behavior if end is given
[ok]: BITPOS bit=1 fuzzy testing using SETBIT
[ok]: maxmemory - policy volatile-random should only remove volatile keys.
[ok]: BITPOS bit=0 fuzzy testing using SETBIT
[ok]: FLUSHDB ASYNC can reclaim memory in background
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 66472)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 4 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #2 as master
[ok]: PSYNC2: Set #4 to replicate from #2
[ok]: PSYNC2: Set #0 to replicate from #4
[ok]: PSYNC2: Set #3 to replicate from #0
[ok]: PSYNC2: Set #1 to replicate from #3
[35/54 done]: unit/bitops (7 seconds)
Testing unit/wait
[36/54 done]: unit/lazyfree (3 seconds)
Testing unit/pendingquerybuf
[ok]: PSYNC2 #3899 regression: verify consistency
[ok]: maxmemory - policy volatile-ttl should only remove volatile keys.
[ok]: Memory efficiency with values in range 64
[ok]: Chained replicas disconnect when replica re-connect with the same master
[ok]: PSYNC2: cluster is consistent after failover
[37/54 done]: integration/psync2-reg (24 seconds)
Testing unit/tls
[ok]: HyperLogLog self test passes
[ok]: PFADD without arguments creates an HLL value
[ok]: Approximated cardinality after creation is zero
[ok]: PFADD returns 1 when at least 1 reg was modified
[ok]: PFADD returns 0 when no reg was modified
[ok]: PFADD works with empty string (regression)
[ok]: PFCOUNT returns approximated cardinality of set
[38/54 done]: integration/psync2-pingoff (24 seconds)
Testing unit/tracking
[ok]: Setup slave
[ok]: WAIT should acknowledge 1 additional copy of the data
[39/54 done]: unit/tls (0 seconds)
[ok]: Clients are able to enable tracking and redirect it
[ok]: The other connection is able to get invalidations
[ok]: The client is now able to disable tracking
[ok]: Clients can enable the BCAST mode with the empty prefix
[ok]: The connection gets invalidation messages about all the keys
[ok]: Clients can enable the BCAST mode with prefixes
[ok]: Adding prefixes to BCAST mode works
[ok]: Tracking NOLOOP mode in standard mode works
[ok]: Tracking NOLOOP mode in BCAST mode works
[ok]: HyperLogLogs are promote from sparse to dense
[ok]: Memory efficiency with values in range 128
[ok]: WAIT should not acknowledge 2 additional copies of the data
[ok]: Tracking gets notification of expired keys
[ok]: Tracking gets notification on tracking table key eviction
[40/54 done]: unit/tracking (2 seconds)
[ok]: HyperLogLog sparse encoding stress test
[ok]: Corrupted sparse HyperLogLogs are detected: Additionl at tail
[ok]: Corrupted sparse HyperLogLogs are detected: Broken magic
[ok]: Corrupted sparse HyperLogLogs are detected: Invalid encoding
[ok]: Corrupted dense HyperLogLogs are detected: Wrong length
[ok]: Memory efficiency with values in range 1024
[ok]: XRANGE fuzzing
[ok]: XREVRANGE regression test for issue #5006
[ok]: XREAD streamID edge (no-blocking)
[ok]: XREAD streamID edge (blocking)
[ok]: XADD streamID edge
[ok]: Test replication partial resync: ok after delay (diskless: no, disabled, reconnect: 1)
[ok]: XADD with MAXLEN > xlen can propagate correctly
[ok]: Client output buffer hard limit is enforced
[ok]: XADD with ~ MAXLEN can propagate correctly
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 93491)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 5 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #4 as master
[ok]: PSYNC2: Set #2 to replicate from #4
[ok]: PSYNC2: Set #1 to replicate from #4
[ok]: PSYNC2: Set #0 to replicate from #4
[ok]: PSYNC2: Set #3 to replicate from #4
[ok]: XTRIM with ~ MAXLEN can propagate correctly
[ok]: WAIT should not acknowledge 1 additional copy if slave is blocked
[ok]: XADD can CREATE an empty stream
[ok]: XSETID can set a specific ID
[ok]: XSETID cannot SETID with smaller ID
[ok]: XSETID cannot SETID on non-existent key
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[41/54 done]: unit/wait (7 seconds)
[ok]: PSYNC2: cluster is consistent after failover
[ok]: Memory efficiency with values in range 16384
[ok]: Empty stream can be rewrite into AOF correctly
[42/54 done]: unit/memefficiency (12 seconds)
[ok]: Stream can be rewrite into AOF correctly after XDEL lastid
[ok]: Connect multiple replicas at the same time (issue #141), master diskless=no, replica diskless=disabled
[43/54 done]: unit/type/stream (50 seconds)
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 115254)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: PSYNC2: --- CYCLE 6 ---
[ok]: PSYNC2: [NEW LAYOUT] Set #4 as master
[ok]: PSYNC2: Set #3 to replicate from #4
[ok]: PSYNC2: Set #0 to replicate from #3
[ok]: PSYNC2: Set #2 to replicate from #0
[ok]: PSYNC2: Set #1 to replicate from #4
[ok]: PSYNC2: cluster is consistent after failover
[ok]: pending querybuf: check size of pending_querybuf after set a big value
[ok]: Test replication partial resync: backlog expired (diskless: no, disabled, reconnect: 1)
[44/54 done]: unit/pendingquerybuf (18 seconds)
[ok]: PSYNC2: generate load while killing replication links
[ok]: PSYNC2: cluster is consistent after load (x = 142149)
[ok]: PSYNC2: total sum of full synchronizations is exactly 4
[ok]: MASTER and SLAVE consistency with EVALSHA replication
[ok]: PSYNC2: Bring the master back again for next test
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: no reconnection, just sync (diskless: no, swapdb, reconnect: 0)
[ok]: PSYNC2: Partial resync after restart using RDB aux fields
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Client output buffer soft limit is not enforced if time is not overreached
[ok]: PSYNC2: Replica RDB restart with EVALSHA in backlog issue #4483
[ok]: SLAVE can reload "lua" AUX RDB fields of duplicated scripts
[45/54 done]: integration/psync2 (47 seconds)
[46/54 done]: integration/replication-3 (54 seconds)
[ok]: Fuzzing dense/sparse encoding: Redis should always detect errors
[ok]: PFADD, PFCOUNT, PFMERGE type checking works
[ok]: PFMERGE results on the cardinality of union of sets
[ok]: AOF rewrite during write load: RDB preamble=no
[ok]: Replication: commands with many arguments (issue #1221)
[ok]: Turning off AOF kills the background writing child if any
[ok]: Test replication partial resync: ok psync (diskless: no, swapdb, reconnect: 1)
[ok]: Replication of SPOP command -- alsoPropagate() API
[ok]: AOF rewrite of list with quicklist encoding, string data
[47/54 done]: integration/replication-4 (60 seconds)
[ok]: AOF rewrite of list with quicklist encoding, int data
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: AOF rewrite of set with intset encoding, string data
[ok]: AOF rewrite of set with hashtable encoding, string data
[ok]: PFCOUNT multiple-keys merge returns cardinality of union #1
[ok]: AOF rewrite of set with intset encoding, int data
[ok]: Connect multiple replicas at the same time (issue #141), master diskless=no, replica diskless=swapdb
[ok]: AOF rewrite of set with hashtable encoding, int data
[ok]: Client output buffer soft limit is enforced if time is overreached
[48/54 done]: unit/obuf-limits (44 seconds)
[ok]: AOF rewrite of hash with ziplist encoding, string data
[ok]: PFCOUNT multiple-keys merge returns cardinality of union #2
[ok]: PFDEBUG GETREG returns the HyperLogLog raw registers
[ok]: PFADD / PFCOUNT cache invalidation works
[49/54 done]: unit/hyperloglog (40 seconds)
[ok]: AOF rewrite of hash with hashtable encoding, string data
[ok]: AOF rewrite of hash with ziplist encoding, int data
[ok]: AOF rewrite of hash with hashtable encoding, int data
[ok]: Test replication partial resync: no backlog (diskless: no, swapdb, reconnect: 1)
[ok]: AOF rewrite of zset with ziplist encoding, string data
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: AOF rewrite of zset with skiplist encoding, string data
[ok]: AOF rewrite of zset with ziplist encoding, int data
[ok]: AOF rewrite of zset with skiplist encoding, int data
[ok]: BGREWRITEAOF is delayed if BGSAVE is in progress
[ok]: BGREWRITEAOF is refused if already in progress
[50/54 done]: unit/aofrw (82 seconds)
[ok]: GEOADD + GEORANGE randomized test
[51/54 done]: unit/geo (51 seconds)
[ok]: slave buffer are counted correctly
[ok]: Test replication partial resync: ok after delay (diskless: no, swapdb, reconnect: 1)
[ok]: replica buffer don't induce eviction
[52/54 done]: unit/maxmemory (69 seconds)
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Connect multiple replicas at the same time (issue #141), master diskless=yes, replica diskless=disabled
[ok]: Test replication partial resync: backlog expired (diskless: no, swapdb, reconnect: 1)
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: no reconnection, just sync (diskless: yes, disabled, reconnect: 0)
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: ok psync (diskless: yes, disabled, reconnect: 1)
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: no backlog (diskless: yes, disabled, reconnect: 1)
[ok]: Connect multiple replicas at the same time (issue #141), master diskless=yes, replica diskless=swapdb
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: ok after delay (diskless: yes, disabled, reconnect: 1)
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: backlog expired (diskless: yes, disabled, reconnect: 1)
[ok]: Master stream is correctly processed while the replica has a script in -BUSY state
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: no reconnection, just sync (diskless: yes, swapdb, reconnect: 0)
[ok]: slave fails full sync and diskless load swapdb recovers it
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: ok psync (diskless: yes, swapdb, reconnect: 1)
[ok]: diskless loading short read
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: diskless no replicas drop during rdb pipe
[ok]: Test replication partial resync: no backlog (diskless: yes, swapdb, reconnect: 1)
[ok]: diskless slow replicas drop during rdb pipe
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: diskless fast replicas drop during rdb pipe
[ok]: diskless all replicas drop during rdb pipe
[ok]: Test replication partial resync: ok after delay (diskless: yes, swapdb, reconnect: 1)
[ok]: replicaof right after disconnection
[53/54 done]: integration/replication (206 seconds)
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Test replication partial resync: backlog expired (diskless: yes, swapdb, reconnect: 1)
[54/54 done]: integration/replication-psync (214 seconds)
Testing solo test
[54/54 done]: defrag (1 seconds)

                   The End

Execution time of different units:
  0 seconds - unit/printver
  0 seconds - unit/type/incr
  1 seconds - unit/auth
  1 seconds - unit/protocol
  1 seconds - unit/keyspace
  1 seconds - unit/quit
  3 seconds - unit/multi
  1 seconds - unit/acl
  5 seconds - unit/type/stream-cgroups
  9 seconds - unit/type/hash
  10 seconds - unit/scan
  10 seconds - unit/type/set
  10 seconds - unit/sort
  11 seconds - unit/type/list
  12 seconds - unit/type/string
  15 seconds - unit/other
  16 seconds - unit/type/zset
  17 seconds - unit/expire
  1 seconds - integration/logging
  1 seconds - integration/convert-zipmap-hash-on-load
  9 seconds - integration/rdb
  1 seconds - unit/pubsub
  11 seconds - integration/aof
  1 seconds - unit/slowlog
  29 seconds - unit/type/list-2
  1 seconds - unit/introspection
  10 seconds - unit/scripting
  27 seconds - integration/block-repl
  24 seconds - integration/replication-2
  1 seconds - unit/limits
  34 seconds - unit/dump
  37 seconds - unit/type/list-3
  7 seconds - unit/introspection-2
  4 seconds - unit/bitfield
  7 seconds - unit/bitops
  3 seconds - unit/lazyfree
  24 seconds - integration/psync2-reg
  24 seconds - integration/psync2-pingoff
  0 seconds - unit/tls
  2 seconds - unit/tracking
  7 seconds - unit/wait
  12 seconds - unit/memefficiency
  50 seconds - unit/type/stream
  18 seconds - unit/pendingquerybuf
  47 seconds - integration/psync2
  54 seconds - integration/replication-3
  60 seconds - integration/replication-4
  44 seconds - unit/obuf-limits
  40 seconds - unit/hyperloglog
  82 seconds - unit/aofrw
  51 seconds - unit/geo
  69 seconds - unit/maxmemory
  206 seconds - integration/replication
  214 seconds - integration/replication-psync
  1 seconds - defrag

\o/ All tests passed without errors!

Cleanup: may take some time... OK