RecidivCI

Details for run #94579 (err)

unstable-valgrind (a92921d)

Wed Oct 09 12:54:27 UTC 2019

Error in: ./runtest --valgrind

@cd /home/ubuntu/ci/redis

Working dir is now '/home/ubuntu/ci/redis'

@git checkout unstable

Already on 'unstable'
Your branch is up-to-date with 'origin/unstable'.

@git pull origin unstable

From git://github.com/antirez/redis
 * branch            unstable   -> FETCH_HEAD
Already up-to-date.

@make distclean

cd src && make distclean
make[1]: Entering directory '/home/ubuntu/ci/redis/src'
rm -rf redis-server redis-sentinel redis-cli redis-benchmark redis-check-rdb redis-check-aof *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep dict-benchmark
(cd ../deps && make distclean)
make[2]: Entering directory '/home/ubuntu/ci/redis/deps'
(cd hiredis && make clean) > /dev/null || true
(cd linenoise && make clean) > /dev/null || true
(cd lua && make clean) > /dev/null || true
(cd jemalloc && [ -f Makefile ] && make distclean) > /dev/null || true
(rm -f .make-*)
make[2]: Leaving directory '/home/ubuntu/ci/redis/deps'
(rm -f .make-*)
make[1]: Leaving directory '/home/ubuntu/ci/redis/src'

@make valgrind -j 8

cd src && make valgrind
make[1]: Entering directory '/home/ubuntu/ci/redis/src'
    CC Makefile.dep
make OPTIMIZATION="-O0" MALLOC="libc"
make[2]: Entering directory '/home/ubuntu/ci/redis/src'
rm -rf redis-server redis-sentinel redis-cli redis-benchmark redis-check-rdb redis-check-aof *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep dict-benchmark
(cd ../deps && make distclean)
make[3]: Entering directory '/home/ubuntu/ci/redis/deps'
(cd hiredis && make clean) > /dev/null || true
(cd linenoise && make clean) > /dev/null || true
(cd lua && make clean) > /dev/null || true
(cd jemalloc && [ -f Makefile ] && make distclean) > /dev/null || true
(rm -f .make-*)
make[3]: Leaving directory '/home/ubuntu/ci/redis/deps'
(rm -f .make-*)
echo STD=-std=c11 -pedantic -DREDIS_STATIC='' >> .make-settings
echo WARN=-Wall -W -Wno-missing-field-initializers >> .make-settings
echo OPT=-O0 >> .make-settings
echo MALLOC=libc >> .make-settings
echo CFLAGS= >> .make-settings
echo LDFLAGS= >> .make-settings
echo REDIS_CFLAGS= >> .make-settings
echo REDIS_LDFLAGS= >> .make-settings
echo PREV_FINAL_CFLAGS=-std=c11 -pedantic -DREDIS_STATIC='' -Wall -W -Wno-missing-field-initializers -O0 -g -ggdb   -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src >> .make-settings
echo PREV_FINAL_LDFLAGS=  -g -ggdb -rdynamic >> .make-settings
(cd ../deps && make hiredis linenoise lua)
make[3]: Entering directory '/home/ubuntu/ci/redis/deps'
(cd hiredis && make clean) > /dev/null || true
(cd linenoise && make clean) > /dev/null || true
(cd lua && make clean) > /dev/null || true
(cd jemalloc && [ -f Makefile ] && make distclean) > /dev/null || true
(rm -f .make-*)
(echo "" > .make-cflags)
(echo "" > .make-ldflags)
MAKE hiredis
cd hiredis && make static
MAKE linenoise
MAKE lua
cd linenoise && make
cd lua/src && make all CFLAGS="-O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC='' " MYLDFLAGS="" AR="ar rcu"
make[4]: Entering directory '/home/ubuntu/ci/redis/deps/hiredis'
make[4]: Entering directory '/home/ubuntu/ci/redis/deps/linenoise'
cc  -Wall -Os -g  -c linenoise.c
make[4]: Entering directory '/home/ubuntu/ci/redis/deps/lua/src'
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lapi.o lapi.c
cc -std=c99 -pedantic -c -O0 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb net.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lcode.o lcode.c
cc -std=c99 -pedantic -c -O0 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb hiredis.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ldebug.o ldebug.c
cc -std=c99 -pedantic -c -O0 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb sds.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ldo.o ldo.c
ldo.c: In function ‘f_parser’:
ldo.c:496:7: warning: unused variable ‘c’ [-Wunused-variable]
   int c = luaZ_lookahead(p->z);
       ^
cc -std=c99 -pedantic -c -O0 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb async.c
cc -std=c99 -pedantic -c -O0 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb read.c
cc -std=c99 -pedantic -c -O0 -fPIC   -Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers -g -ggdb sockcompat.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ldump.o ldump.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lfunc.o lfunc.c
ar rcs libhiredis.a net.o hiredis.o sds.o async.o read.o sockcompat.o
make[4]: Leaving directory '/home/ubuntu/ci/redis/deps/hiredis'
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lgc.o lgc.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o llex.o llex.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lmem.o lmem.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lobject.o lobject.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lopcodes.o lopcodes.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lparser.o lparser.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lstate.o lstate.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lstring.o lstring.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ltable.o ltable.c
make[4]: Leaving directory '/home/ubuntu/ci/redis/deps/linenoise'
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ltm.o ltm.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lundump.o lundump.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lvm.o lvm.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lzio.o lzio.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o strbuf.o strbuf.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o fpconv.o fpconv.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lauxlib.o lauxlib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lbaselib.o lbaselib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ldblib.o ldblib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o liolib.o liolib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lmathlib.o lmathlib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o loslib.o loslib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o ltablib.o ltablib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lstrlib.o lstrlib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o loadlib.o loadlib.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o linit.o linit.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lua_cjson.o lua_cjson.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lua_struct.o lua_struct.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lua_cmsgpack.o lua_cmsgpack.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lua_bit.o lua_bit.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o lua.o lua.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o luac.o luac.c
cc -O2 -Wall -DLUA_ANSI -DENABLE_CJSON_GLOBAL -DREDIS_STATIC=''    -c -o print.o print.c
ar rcu liblua.a lapi.o lcode.o ldebug.o ldo.o ldump.o lfunc.o lgc.o llex.o lmem.o lobject.o lopcodes.o lparser.o lstate.o lstring.o ltable.o ltm.o lundump.o lvm.o lzio.o strbuf.o fpconv.o lauxlib.o lbaselib.o ldblib.o liolib.o lmathlib.o loslib.o ltablib.o lstrlib.o loadlib.o linit.o lua_cjson.o lua_struct.o lua_cmsgpack.o lua_bit.o	# DLL needs all object files
ar: `u' modifier ignored since `D' is the default (see `U')
ranlib liblua.a
cc -o lua  lua.o liblua.a -lm 
cc -o luac  luac.o print.o liblua.a -lm 
liblua.a(loslib.o): In function `os_tmpname':
loslib.c:(.text+0x28c): warning: the use of `tmpnam' is dangerous, better use `mkstemp'
make[4]: Leaving directory '/home/ubuntu/ci/redis/deps/lua/src'
make[3]: Leaving directory '/home/ubuntu/ci/redis/deps'
    CC adlist.o
    CC quicklist.o
    CC anet.o
    CC ae.o
    CC server.o
    CC dict.o
    CC zmalloc.o
    CC sds.o
    CC lzf_c.o
    CC lzf_d.o
lzf_d.c:57:9: warning: unknown option after ‘#pragma GCC diagnostic’ kind [-Wpragmas]
 #pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
         ^
    CC pqsort.o
    CC zipmap.o
    CC sha1.o
    CC ziplist.o
    CC release.o
    CC networking.o
    CC util.o
    CC object.o
    CC db.o
    CC replication.o
    CC rdb.o
    CC t_string.o
    CC t_list.o
    CC t_set.o
    CC t_zset.o
    CC t_hash.o
    CC config.o
    CC aof.o
    CC pubsub.o
    CC multi.o
    CC debug.o
    CC sort.o
    CC intset.o
    CC syncio.o
    CC cluster.o
    CC crc16.o
    CC endianconv.o
    CC slowlog.o
    CC scripting.o
    CC bio.o
    CC rio.o
    CC rand.o
    CC memtest.o
    CC crc64.o
    CC bitops.o
    CC sentinel.o
    CC notify.o
    CC setproctitle.o
    CC blocked.o
    CC hyperloglog.o
    CC latency.o
    CC sparkline.o
    CC redis-check-rdb.o
    CC redis-check-aof.o
    CC geo.o
    CC lazyfree.o
    CC module.o
    CC expire.o
    CC evict.o
    CC geohash.o
    CC geohash_helper.o
    CC childinfo.o
    CC defrag.o
    CC siphash.o
    CC rax.o
    CC t_stream.o
    CC listpack.o
    CC localtime.o
    CC lolwut.o
    CC lolwut5.o
    CC lolwut6.o
    CC acl.o
    CC gopher.o
    CC tracking.o
    CC sha256.o
    CC redis-cli.o
    CC redis-benchmark.o
    LINK redis-server
    INSTALL redis-sentinel
    INSTALL redis-check-rdb
    INSTALL redis-check-aof
    LINK redis-benchmark
    LINK redis-cli

Hint: It's a good idea to run 'make test' ;)

make[2]: Leaving directory '/home/ubuntu/ci/redis/src'
make[1]: Leaving directory '/home/ubuntu/ci/redis/src'

@./runtest --valgrind


Cleanup: may take some time... OK
Starting test server at port 11111
[ready]: 124961
Testing unit/printver
[ready]: 124959
Testing unit/dump
[ready]: 124956
Testing unit/auth
[ready]: 124957
Testing unit/protocol
[ready]: 124958
Testing unit/keyspace
[ready]: 124962
Testing unit/scan
[ready]: 124960
Testing unit/type/string
[ready]: 124963
Testing unit/type/incr
[ready]: 124964
Testing unit/type/list
[ready]: 124965
Testing unit/type/list-2
[ready]: 124967
Testing unit/type/list-3
[ready]: 124966
Testing unit/type/set
[ready]: 124970
Testing unit/type/zset
[ready]: 124968
Testing unit/type/hash
[ready]: 124969
Testing unit/type/stream
[ready]: 124971
Testing unit/type/stream-cgroups
[ok]: Handle an empty query
[ok]: DUMP / RESTORE are able to serialize / unserialize a simple key
[ok]: AUTH fails if there is no password configured server side
[ok]: RESTORE can set an arbitrary expire to the materialized key
[ok]: RESTORE can set an expire that overflows a 32 bit integer
[ok]: SADD, SCARD, SISMEMBER, SMEMBERS basics - regular set
[ok]: RESTORE can set an absolute expire
[ok]: DEL against a single item
[ok]: Check encoding - ziplist
[ok]: Vararg DEL
[ok]: Negative multibulk length
[ok]: RESTORE can set LRU
[ok]: SADD, SCARD, SISMEMBER, SMEMBERS basics - intset
[ok]: RESTORE can set LFU
[ok]: KEYS with pattern
[ok]: RESTORE returns an error of the key already exists
[ok]: KEYS to get all keys
[ok]: RESTORE can overwrite an existing key with REPLACE
[ok]: RESTORE can detect a syntax error for unrecongized options
[ok]: DBSIZE
[ok]: DUMP of non existing key returns nil
[ok]: SADD against non set
[ok]: DEL all keys
[ok]: Out of range multibulk length
[ok]: HSET/HLEN - Small hash creation
[ok]: XGROUP CREATE: creation and duplicate group name detection
[ok]: ZSET basic ZADD and score update - ziplist
[ok]: Explicit regression for a list bug
[ok]: SADD a non-integer against an intset
[ok]: Wrong multibulk payload header
[ok]: ZSET element can't be set to NaN with ZADD - ziplist
[ok]: ZSET element can't be set to NaN with ZINCRBY
[ok]: ZADD with options syntax error with incomplete pair
[ok]: Negative multibulk payload length
[ok]: ZADD XX option without key - ziplist
[ok]: Out of range multibulk payload length
[ok]: SADD an integer larger than 64 bits
[ok]: ZADD XX existing key - ziplist
[ok]: Non-number multibulk payload length
[ok]: XGROUP CREATE: automatic stream creation fails without MKSTREAM
[ok]: ZADD XX returns the number of elements actually added
[ok]: Multi bulk request not followed by bulk arguments
[ok]: XGROUP CREATE: automatic stream creation works with MKSTREAM
[ok]: Generic wrong number of args
[ok]: Unbalanced number of quotes
[ok]: Is the small hash encoded with a ziplist?
[ok]: ZADD XX updates existing elements score
[ok]: ZADD XX and NX are not compatible
[ok]: ZADD NX with non existing key
[ok]: XADD can add entries into a stream that XRANGE can fetch
[ok]: ZADD NX only add new elements without updating old ones
[ok]: XADD IDs are incremental
[ok]: ZADD INCR works like ZINCRBY
[ok]: ZADD INCR works with a single score-elemenet pair
[ok]: ZADD CH option changes return value to all changed elements
Testing Redis version 999.999.999 (a92921da)
[ok]: Protocol desync regression test #1
[ok]: SET and GET an item
[ok]: SET and GET an empty item
[ok]: ZINCRBY calls leading to NaN result in error
[ok]: XADD IDs are incremental when ms is the same as well
[ok]: ZADD - Variadic version base case
[ok]: ZADD - Return value is the number of actually added items
[ok]: ZADD - Variadic version does not add nothing on single parsing err
[ok]: ZADD - Variadic version will raise error on missing arg
[ok]: ZINCRBY does not work variadic even if shares ZADD implementation
[ok]: LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist
[ok]: ZCARD basics - ziplist
[ok]: Protocol desync regression test #2
[ok]: ZREM removes key after last element is removed
[ok]: ZREM variadic version
[ok]: ZREM variadic version -- remove elements after key deletion
[ok]: LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - regular list
[ok]: R/LPOP against empty list
[ok]: XREADGROUP will return only new elements
[ok]: SCAN basic
[ok]: ZRANGE basics - ziplist
[ok]: Protocol desync regression test #3
[ok]: ZREVRANGE basics - ziplist
[ok]: XREADGROUP can read the history of the elements we own
[ok]: ZRANK/ZREVRANK basics - ziplist
[ok]: Variadic RPUSH/LPUSH
[ok]: ZRANK - after deletion - ziplist
[ok]: ZINCRBY - can create a new sorted set - ziplist
[ok]: DEL a list
[ok]: XPENDING is able to return pending items
[ok]: ZINCRBY - increment and decrement - ziplist
[ok]: XPENDING can return single consumer items
[ok]: ZINCRBY return value
[ok]: XACK is able to remove items from the client/group PEL
[ok]: XACK can't remove the same item multiple times
[ok]: XACK is able to accept multiple arguments
[ok]: BLPOP, BRPOP: single existing list - linkedlist
[ok]: INCR against non existing key
[ok]: INCR against key created by incr itself
[ok]: PEL NACK reassignment after XGROUP SETID event
[ok]: BLPOP, BRPOP: multiple existing lists - linkedlist
[ok]: Regression for quicklist #3343 bug
[ok]: INCR against key originally set with SET
[ok]: XREADGROUP will not report data on empty history. Bug #5577
[ok]: INCR over 32bit value
[ok]: BLPOP, BRPOP: second list has an entry - linkedlist
[ok]: INCRBY over 32bit value with over 32bit increment
[ok]: INCR fails against key with spaces (left)
[ok]: INCR fails against key with spaces (right)
[ok]: INCR fails against key with spaces (both)
[ok]: ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics
[ok]: BRPOPLPUSH - linkedlist
[ok]: XREADGROUP history reporting of deleted entries. Bug #5570
[ok]: ZRANGEBYSCORE with WITHSCORES
[ok]: BLPOP, BRPOP: single existing list - ziplist
[ok]: ZRANGEBYSCORE with LIMIT
[ok]: BLPOP, BRPOP: multiple existing lists - ziplist
[ok]: ZRANGEBYSCORE with LIMIT and WITHSCORES
[ok]: BLPOP, BRPOP: second list has an entry - ziplist
[ok]: ZRANGEBYSCORE with non-value min or max
[ok]: BRPOPLPUSH - ziplist
[ok]: INCR fails against a key holding a list
[ok]: DECRBY over 32bit value with over 32bit increment, negative res
[ok]: INCR uses shared objects in the 0-9999 range
[ok]: INCR can modify objects in-place
[ok]: SADD overflows the maximum allowed integers in an intset
[ok]: Variadic SADD
[ok]: BLPOP, LPUSH + DEL should not awake blocked client
[ok]: ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics
[ok]: BLPOP, LPUSH + DEL + SET should not awake blocked client
[ok]: ZLEXCOUNT advanced
[ok]: INCRBYFLOAT against non existing key
[ok]: BLPOP with same key multiple times should work (issue #801)
[ok]: INCRBYFLOAT against key originally set with SET
[ok]: MULTI/EXEC is isolated from the point of view of BLPOP
[ok]: ZRANGEBYSLEX with LIMIT
[ok]: ZRANGEBYLEX with invalid lex range specifiers
[ok]: INCRBYFLOAT over 32bit value
[ok]: INCRBYFLOAT over 32bit value with over 32bit increment
[ok]: INCRBYFLOAT fails against key with spaces (left)
[ok]: INCRBYFLOAT fails against key with spaces (right)
[ok]: INCRBYFLOAT fails against key with spaces (both)
[ok]: INCRBYFLOAT fails against a key holding a list
[ok]: INCRBYFLOAT does not allow NaN or Infinity
[ok]: INCRBYFLOAT decrement
[ok]: Very big payload in GET/SET
[ok]: string to double with null terminator
[ok]: SCAN COUNT
[1/51 done]: unit/printver (2 seconds)
Testing unit/sort
[ok]: ZREMRANGEBYSCORE basics
[ok]: ZREMRANGEBYSCORE with non-value min or max
[ok]: ZREMRANGEBYRANK basics
[ok]: ZUNIONSTORE against non-existing key doesn't set destination - ziplist
[ok]: ZUNIONSTORE with empty set - ziplist
[ok]: ZUNIONSTORE basics - ziplist
[ok]: ZUNIONSTORE with weights - ziplist
[ok]: ZUNIONSTORE with a regular set and weights - ziplist
[ok]: ZUNIONSTORE with AGGREGATE MIN - ziplist
[ok]: ZUNIONSTORE with AGGREGATE MAX - ziplist
[ok]: ZINTERSTORE basics - ziplist
[ok]: ZINTERSTORE with weights - ziplist
[ok]: ZINTERSTORE with a regular set and weights - ziplist
[ok]: ZINTERSTORE with AGGREGATE MIN - ziplist
[ok]: ZINTERSTORE with AGGREGATE MAX - ziplist
[ok]: ZUNIONSTORE with +inf/-inf scores - ziplist
[ok]: ZUNIONSTORE with NaN weights ziplist
[ok]: SCAN MATCH
[ok]: ZINTERSTORE with +inf/-inf scores - ziplist
[ok]: ZINTERSTORE with NaN weights ziplist
[ok]: Basic ZPOP with a single key - ziplist
[ok]: ZPOP with count - ziplist
[ok]: BZPOP with a single existing sorted set - ziplist
[ok]: BZPOP with multiple existing sorted sets - ziplist
[ok]: BZPOP second sorted set has members - ziplist
[ok]: Check encoding - skiplist
[2/51 done]: unit/type/incr (2 seconds)
Testing unit/expire
[ok]: ZSET basic ZADD and score update - skiplist
[ok]: ZSET element can't be set to NaN with ZADD - skiplist
[ok]: ZSET element can't be set to NaN with ZINCRBY
[ok]: ZADD with options syntax error with incomplete pair
[ok]: ZADD XX option without key - skiplist
[ok]: ZADD XX existing key - skiplist
[ok]: ZADD XX returns the number of elements actually added
[ok]: BLPOP with variadic LPUSH
[ok]: ZADD XX updates existing elements score
[ok]: ZADD XX and NX are not compatible
[ok]: ZADD NX with non existing key
[ok]: ZADD NX only add new elements without updating old ones
[ok]: ZADD INCR works like ZINCRBY
[ok]: ZADD INCR works with a single score-elemenet pair
[ok]: ZADD CH option changes return value to all changed elements
[ok]: ZINCRBY calls leading to NaN result in error
[ok]: ZADD - Variadic version base case
[ok]: ZADD - Return value is the number of actually added items
[ok]: ZADD - Variadic version does not add nothing on single parsing err
[ok]: ZADD - Variadic version will raise error on missing arg
[ok]: ZINCRBY does not work variadic even if shares ZADD implementation
[ok]: ZCARD basics - skiplist
[ok]: ZREM removes key after last element is removed
[ok]: ZREM variadic version
[ok]: ZREM variadic version -- remove elements after key deletion
[ok]: ZRANGE basics - skiplist
[ok]: ZREVRANGE basics - skiplist
[ok]: ZRANK/ZREVRANK basics - skiplist
[ok]: ZRANK - after deletion - skiplist
[ok]: ZINCRBY - can create a new sorted set - skiplist
[ok]: ZINCRBY - increment and decrement - skiplist
[ok]: ZINCRBY return value
[ok]: ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics
[ok]: ZRANGEBYSCORE with WITHSCORES
[ok]: ZRANGEBYSCORE with LIMIT
[ok]: HSET/HLEN - Big hash creation
[ok]: ZRANGEBYSCORE with LIMIT and WITHSCORES
[ok]: ZRANGEBYSCORE with non-value min or max
[ok]: ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics
[ok]: ZLEXCOUNT advanced
[ok]: ZRANGEBYSLEX with LIMIT
[ok]: ZRANGEBYLEX with invalid lex range specifiers
[ok]: ZREMRANGEBYSCORE basics
[ok]: ZREMRANGEBYSCORE with non-value min or max
[ok]: ZREMRANGEBYRANK basics
[ok]: ZUNIONSTORE against non-existing key doesn't set destination - skiplist
[ok]: SCAN TYPE
[ok]: ZUNIONSTORE with empty set - skiplist
[ok]: ZUNIONSTORE basics - skiplist
[ok]: ZUNIONSTORE with weights - skiplist
[ok]: ZUNIONSTORE with a regular set and weights - skiplist
[ok]: XCLAIM can claim PEL items from another consumer
[ok]: ZUNIONSTORE with AGGREGATE MIN - skiplist
[ok]: ZUNIONSTORE with AGGREGATE MAX - skiplist
[ok]: ZINTERSTORE basics - skiplist
[ok]: ZINTERSTORE with weights - skiplist
[ok]: ZINTERSTORE with a regular set and weights - skiplist
[ok]: ZINTERSTORE with AGGREGATE MIN - skiplist
[ok]: ZINTERSTORE with AGGREGATE MAX - skiplist
[ok]: ZUNIONSTORE with +inf/-inf scores - skiplist
[ok]: SSCAN with encoding intset
[ok]: ZUNIONSTORE with NaN weights skiplist
[ok]: ZINTERSTORE with +inf/-inf scores - skiplist
[ok]: ZINTERSTORE with NaN weights skiplist
[ok]: SSCAN with encoding hashtable
[ok]: Basic ZPOP with a single key - skiplist
[ok]: ZPOP with count - skiplist
[ok]: BZPOP with a single existing sorted set - skiplist
[ok]: BZPOP with multiple existing sorted sets - skiplist
[ok]: BZPOP second sorted set has members - skiplist
[ok]: HSCAN with encoding ziplist
[ok]: ZINTERSTORE regression with two sets, intset+hashtable
[ok]: ZUNIONSTORE regression, should not create NaN in scores
[ok]: ZINTERSTORE #516 regression, mixed sets and ziplist zsets
[ok]: Set encoding after DEBUG RELOAD
[ok]: SREM basics - regular set
[ok]: SREM basics - intset
[ok]: SREM with multiple arguments
[ok]: SREM variadic version with more args needed to destroy the key
[ok]: DEL against expired key
[ok]: EXISTS
[ok]: Zero length value in key. SET/GET/EXISTS
[ok]: Commands pipelining
[ok]: Non existing command
[ok]: RENAME basic usage
[ok]: RENAME source key should no longer exist
[ok]: RENAME against already existing key
[ok]: RENAMENX basic usage
[ok]: RENAMENX against already existing key
[ok]: RENAMENX against already existing key (2)
[ok]: RENAME against non existing source key
[ok]: RENAME where source and dest key are the same (existing)
[ok]: RENAMENX where source and dest key are the same (existing)
[ok]: RENAME where source and dest key are the same (non existing)
[ok]: RENAME with volatile key, should move the TTL as well
[ok]: RENAME with volatile key, should not inherit TTL of target key
[ok]: DEL all keys again (DB 0)
[ok]: DEL all keys again (DB 1)
[ok]: MOVE basic usage
[ok]: MOVE against key existing in the target DB
[ok]: MOVE against non-integer DB (#1428)
[ok]: MOVE can move key expire metadata as well
[ok]: MOVE does not create an expire if it does not exist
[ok]: SET/GET keys in different DBs
[ok]: RANDOMKEY
[ok]: RANDOMKEY against empty DB
[ok]: RANDOMKEY regression 1
[ok]: KEYS * two times with long key, Github issue #1208
[ok]: HSCAN with encoding hashtable
[ok]: XCLAIM without JUSTID increments delivery count
[ok]: ZSCAN with encoding ziplist
[ok]: ZUNIONSTORE result is sorted
[ok]: ZSET commands don't accept the empty strings as valid score
[ok]: Generated sets must be encoded as hashtable
[ok]: SINTER with two sets - hashtable
[ok]: SINTERSTORE with two sets - hashtable
[3/51 done]: unit/keyspace (3 seconds)
Testing unit/other
[ok]: SINTERSTORE with two sets, after a DEBUG RELOAD - hashtable
[ok]: Is the big hash encoded with an hash table?
[ok]: ZSCORE - ziplist
[ok]: SUNION with two sets - hashtable
[ok]: BRPOPLPUSH with zero timeout should block indefinitely
[ok]: HGET against the small hash
[ok]: SUNIONSTORE with two sets - hashtable
[ok]: SINTER against three sets - hashtable
[ok]: SINTERSTORE with three sets - hashtable
[ok]: ZSCAN with encoding skiplist
[ok]: SUNION with non existing keys - hashtable
[ok]: SDIFF with two sets - hashtable
[ok]: SDIFF with three sets - hashtable
[ok]: SDIFFSTORE with three sets - hashtable
[ok]: AUTH fails when a wrong password is given
[ok]: Arbitrary command gives an error when AUTH is required
[ok]: AUTH succeeds when the right password is given
[ok]: SCAN guarantees check under write load
[ok]: SSCAN with integer encoded object (issue #1345)
[ok]: SSCAN with PATTERN
[ok]: HSCAN with PATTERN
[ok]: ZSCAN with PATTERN
[ok]: Once AUTH succeeded we can actually send commands to the server
[ok]: HGET against the big hash
[ok]: HGET against non existing key
[ok]: HSET in update and insert mode
[ok]: HSETNX target key missing - small hash
[ok]: HSETNX target key exists - small hash
[ok]: HSETNX target key missing - big hash
[ok]: HSETNX target key exists - big hash
[ok]: HMSET wrong number of args
[ok]: HMSET - small hash
[4/51 done]: unit/auth (4 seconds)
Testing unit/multi
[ok]: Generated sets must be encoded as intset
[ok]: SINTER with two sets - intset
[ok]: MIGRATE is caching connections
[ok]: SINTERSTORE with two sets - intset
[ok]: HMSET - big hash
[ok]: HMGET against non existing key and fields
[ok]: HMGET against wrong type
[ok]: HMGET - small hash
[ok]: XADD with MAXLEN option
[ok]: SINTERSTORE with two sets, after a DEBUG RELOAD - intset
[ok]: HMGET - big hash
[ok]: Regression for a crash with blocking ops and pipelining
[ok]: HKEYS - small hash
[ok]: HKEYS - big hash
[ok]: SUNION with two sets - intset
[ok]: HVALS - small hash
[ok]: HVALS - big hash
[ok]: HGETALL - small hash
[ok]: SUNIONSTORE with two sets - intset
[ok]: SINTER against three sets - intset
[ok]: SINTERSTORE with three sets - intset
[ok]: HGETALL - big hash
[ok]: HDEL and return value
[ok]: SUNION with non existing keys - intset
[ok]: HDEL - more than a single value
[ok]: SDIFF with two sets - intset
[ok]: ZSCORE after a DEBUG RELOAD - ziplist
[ok]: HDEL - hash becomes empty before deleting all specified fields
[ok]: Old Ziplist: SORT BY key
[ok]: HEXISTS
[ok]: SDIFF with three sets - intset
[ok]: SDIFFSTORE with three sets - intset
[ok]: Old Ziplist: SORT BY key with limit
[ok]: Is a ziplist encoded Hash promoted on big payload?
[ok]: SDIFF with first set empty
[ok]: SDIFF with same set two times
[ok]: HINCRBY against non existing database key
[ok]: HINCRBY against non existing hash key
[ok]: Old Ziplist: SORT BY hash field
[ok]: HINCRBY against hash key created by hincrby itself
[ok]: HINCRBY against hash key originally set with HSET
[ok]: HINCRBY over 32bit value
[ok]: HINCRBY over 32bit value with over 32bit increment
[ok]: HINCRBY fails against hash value with spaces (left)
[ok]: HINCRBY fails against hash value with spaces (right)
[ok]: HINCRBY can detect overflows
[ok]: HINCRBYFLOAT against non existing database key
[ok]: HINCRBYFLOAT against non existing hash key
[ok]: HINCRBYFLOAT against hash key created by hincrby itself
[ok]: HINCRBYFLOAT against hash key originally set with HSET
[ok]: HINCRBYFLOAT over 32bit value
[ok]: HINCRBYFLOAT over 32bit value with over 32bit increment
[ok]: HINCRBYFLOAT fails against hash value with spaces (left)
[ok]: HINCRBYFLOAT fails against hash value with spaces (right)
[ok]: HSTRLEN against the small hash
[ok]: EXPIRE - set timeouts multiple times
[ok]: EXPIRE - It should be still possible to read 'x'
[5/51 done]: unit/protocol (4 seconds)
Testing unit/quit
[ok]: ZSCAN scores: regression test for issue #2175
[ok]: HSTRLEN against the big hash
[ok]: HSTRLEN against non existing field
[ok]: HSTRLEN corner cases
[ok]: Hash ziplist regression test for large keys
[ok]: BRPOPLPUSH with a client BLPOPing the target list
[ok]: BRPOPLPUSH with wrong source type
[ok]: Hash fuzzing #1 - 10 fields
[ok]: ZSET sorting stresser - ziplist
[ok]: Hash fuzzing #2 - 10 fields
[ok]: SAVE - make sure there are all the types as values
[ok]: BRPOPLPUSH with wrong destination type
[ok]: BRPOPLPUSH maintains order of elements after failure
[ok]: BRPOPLPUSH with multiple blocked clients
[ok]: Linked BRPOPLPUSH
[ok]: Circular BRPOPLPUSH
[ok]: Self-referential BRPOPLPUSH
[ok]: BRPOPLPUSH inside a transaction
[ok]: PUSH resulting from BRPOPLPUSH affect WATCH
[ok]: BRPOPLPUSH does not affect WATCH while still blocked
[ok]: MUTLI / EXEC basics
[ok]: DISCARD
[ok]: Nested MULTI are not allowed
[ok]: Old Linked list: SORT BY key
[ok]: MULTI where commands alter argc/argv
[ok]: WATCH inside MULTI is not allowed
[ok]: EXEC fails if there are errors while queueing commands #1
[ok]: EXEC fails if there are errors while queueing commands #2
[ok]: If EXEC aborts, the client MULTI state is cleared
[ok]: EXEC works on WATCHed key not modified
[ok]: Old Linked list: SORT BY key with limit
[ok]: EXEC fail on WATCHed key modified (1 key of 1 watched)
[ok]: EXEC fail on WATCHed key modified (1 key of 5 watched)
[ok]: EXEC fail on WATCHed key modified by SORT with STORE even if the result is empty
[ok]: After successful EXEC key is no longer watched
[ok]: After failed EXEC key is no longer watched
[ok]: It is possible to UNWATCH
[ok]: UNWATCH when there is nothing watched works as expected
[ok]: FLUSHALL is able to touch the watched keys
[ok]: FLUSHALL does not touch non affected keys
[ok]: FLUSHDB is able to touch the watched keys
[ok]: FLUSHDB does not touch non affected keys
[ok]: WATCH is able to remember the DB a key belongs to
[ok]: WATCH will consider touched keys target of EXPIRE
[ok]: Old Linked list: SORT BY hash field
[ok]: QUIT returns OK
[ok]: Pipelined commands after QUIT must not be executed
[ok]: Pipelined commands after QUIT that exceed read buffer size
[6/51 done]: unit/quit (2 seconds)
Testing unit/aofrw
[ok]: Consumer group last ID propagation to slave (NOACK=0)
[ok]: EXPIRE - After 2.1 seconds the key should no longer be here
[ok]: EXPIRE - write on expire should work
[ok]: EXPIREAT - Check for EXPIRE alike behavior
[ok]: SETEX - Set + Expire combo operation. Check for TTL
[ok]: SETEX - Check value
[ok]: SETEX - Overwrite old key
[ok]: FUZZ stresser with data model binary
[ok]: WATCH will not consider touched expired keys
[ok]: DISCARD should clear the WATCH dirty flag on the client
[ok]: DISCARD should UNWATCH all the keys
[ok]: MULTI / EXEC is propagated correctly (single write command)
[ok]: Consumer group last ID propagation to slave (NOACK=1)
[ok]: FUZZ stresser with data model alpha
[ok]: MULTI / EXEC is propagated correctly (empty transaction)
[ok]: SETEX - Wait for the key to expire
[ok]: SETEX - Wrong time parameter
[ok]: PERSIST can undo an EXPIRE
[ok]: PERSIST returns 0 against non existing or non volatile keys
[ok]: MULTI / EXEC is propagated correctly (read-only commands)
[ok]: BRPOPLPUSH timeout
[ok]: BLPOP when new key is moved into place
[ok]: BLPOP when result key is created by SORT..STORE
[ok]: BLPOP: with single empty list argument
[ok]: BLPOP: with negative timeout
[ok]: BLPOP: with non-integer timeout
[ok]: MULTI / EXEC is propagated correctly (write command, no effect)
[ok]: DISCARD should not fail during OOM
[7/51 done]: unit/type/stream-cgroups (7 seconds)
Testing unit/acl
[8/51 done]: unit/multi (4 seconds)
Testing integration/block-repl
[ok]: FUZZ stresser with data model compr
[ok]: BLPOP: with zero timeout should block indefinitely
[ok]: BLPOP: second argument is not a list
[ok]: Hash fuzzing #1 - 512 fields
[ok]: BGSAVE
[ok]: SELECT an out of range DB
[ok]: EXPIRE pricision is now the millisecond
[ok]: Connections start with the default user
[ok]: It is possible to create new users
[ok]: New users start disabled
[ok]: Enabling the user allows the login
[ok]: Only the set of correct passwords work
[ok]: It is possible to remove passwords from the set of valid ones
[ok]: Test password hashes can be added
[ok]: Test password hashes validate input
[ok]: ACL GETUSER returns the password hash instead of the actual password
[ok]: Test hashed passwords removal
[ok]: By default users are not able to access any command
[ok]: By default users are not able to access any key
[ok]: It's possible to allow the access of a subset of keys
[ok]: Users can be configured to authenticate with any password
[ok]: ACLs can exclude single commands
[ok]: ACLs can include or exclude whole classes of commands
[ok]: ACLs can include single subcommands
[ok]: ACL GETUSER is able to translate back command permissions
[ok]: ACL #5998 regression: memory leaks adding / removing subcommands
[ok]: Very big payload random access
[9/51 done]: unit/acl (3 seconds)
Testing integration/replication
[ok]: PEXPIRE/PSETEX/PEXPIREAT can set sub-second expires
[ok]: TTL returns time to live in seconds
[ok]: PTTL returns time to live in milliseconds
[ok]: TTL / PTTL return -1 if key has no expire
[ok]: TTL / PTTL return -2 if key does not exit
[ok]: BLPOP: timeout
[ok]: BLPOP: arguments are empty
[ok]: BRPOP: with single empty list argument
[ok]: BRPOP: with negative timeout
[ok]: BRPOP: with non-integer timeout
[ok]: Redis should actively expire keys incrementally
[ok]: BRPOP: with zero timeout should block indefinitely
[ok]: BRPOP: second argument is not a list
[ok]: Hash fuzzing #2 - 512 fields
[ok]: Redis should lazy expire keys
[ok]: First server should have role slave after SLAVEOF
[ok]: EXPIRE should not resurrect keys (issue #1026)
[ok]: 5 keys in, 5 keys out
[ok]: EXPIRE with empty string as TTL should report an error
[ok]: BRPOP: timeout
[ok]: BRPOP: arguments are empty
[ok]: BLPOP inside a transaction
[ok]: LPUSHX, RPUSHX - generic
[ok]: LPUSHX, RPUSHX - linkedlist
[ok]: LINSERT - linkedlist
[ok]: Check consistency of different data types after a reload
[ok]: LPUSHX, RPUSHX - ziplist
[ok]: LINSERT - ziplist
[ok]: LINSERT raise error on bad syntax
[ok]: LINDEX consistency test - quicklist
[ok]: LINDEX random access - quicklist
[ok]: Slave enters handshake
[ok]: Check if list is still ok after a DEBUG RELOAD - quicklist
[ok]: LINDEX consistency test - quicklist
[ok]: Same dataset digest if saving/reloading as AOF?
[ok]: LINDEX random access - quicklist
[ok]: XADD mass insertion and XLEN
[ok]: XRANGE COUNT works as expected
[ok]: XREVRANGE COUNT works as expected
[ok]: EXPIRES after a reload (snapshot + append only file rewrite)
[ok]: Check if list is still ok after a DEBUG RELOAD - quicklist
[ok]: LLEN against non-list value error
[ok]: LLEN against non existing key
[ok]: LINDEX against non-list value error
[ok]: LINDEX against non existing key
[ok]: LPUSH against non-list value error
[ok]: RPUSH against non-list value error
[ok]: RPOPLPUSH base case - linkedlist
[ok]: RPOPLPUSH with the same list as src and dst - linkedlist
[ok]: RPOPLPUSH with linkedlist source and existing target linkedlist
[ok]: RPOPLPUSH with linkedlist source and existing target ziplist
[ok]: RPOPLPUSH base case - ziplist
[ok]: RPOPLPUSH with the same list as src and dst - ziplist
[ok]: RPOPLPUSH with ziplist source and existing target linkedlist
[ok]: RPOPLPUSH with ziplist source and existing target ziplist
[ok]: RPOPLPUSH against non existing key
[ok]: RPOPLPUSH against non list src key
[ok]: RPOPLPUSH against non list dst key
[ok]: RPOPLPUSH against non existing src key
[ok]: Basic LPOP/RPOP - linkedlist
[ok]: Basic LPOP/RPOP - ziplist
[ok]: LPOP/RPOP against non list value
[ok]: Mass RPOP/LPOP - quicklist
[ok]: Stress test the hash ziplist -> hashtable encoding conversion
[ok]: SET 10000 numeric keys and access all them in reverse order
[ok]: DBSIZE should be 10000 now
[ok]: SETNX target key missing
[ok]: SETNX target key exists
[ok]: SETNX against not-expired volatile key
[ok]: Mass RPOP/LPOP - quicklist
[ok]: LRANGE basics - linkedlist
[ok]: LRANGE inverted indexes - linkedlist
[ok]: LRANGE out of range indexes including the full list - linkedlist
[ok]: LRANGE out of range negative end index - linkedlist
[ok]: LRANGE basics - ziplist
[ok]: LRANGE inverted indexes - ziplist
[ok]: LRANGE out of range indexes including the full list - ziplist
[ok]: LRANGE out of range negative end index - ziplist
[ok]: LRANGE against non existing key
[ok]: SET - use EX/PX option, TTL should not be reseted after loadaof
[ok]: LTRIM basics - linkedlist
[ok]: LTRIM out of range negative end index - linkedlist
[ok]: LTRIM basics - ziplist
[ok]: LTRIM out of range negative end index - ziplist
[ok]: LSET - linkedlist
[ok]: LSET out of range index - linkedlist
[ok]: LSET - ziplist
[ok]: LSET out of range index - ziplist
[ok]: LSET against non existing key
[ok]: LSET against non list value
[ok]: LREM remove all the occurrences - linkedlist
[ok]: LREM remove the first occurrence - linkedlist
[ok]: LREM remove non existing element - linkedlist
[ok]: LREM starting from tail with negative count - linkedlist
[ok]: LREM starting from tail with negative count (2) - linkedlist
[ok]: LREM deleting objects that may be int encoded - linkedlist
[ok]: LREM remove all the occurrences - ziplist
[ok]: LREM remove the first occurrence - ziplist
[ok]: LREM remove non existing element - ziplist
[ok]: LREM starting from tail with negative count - ziplist
[ok]: LREM starting from tail with negative count (2) - ziplist
[ok]: LREM deleting objects that may be int encoded - ziplist
[ok]: XRANGE can be used to iterate the whole stream
[10/51 done]: unit/type/hash (17 seconds)
Testing integration/replication-2
[11/51 done]: unit/expire (15 seconds)
Testing integration/replication-3
[ok]: Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds
[12/51 done]: unit/type/list (18 seconds)
Testing integration/replication-4
[ok]: MIGRATE cached connections are released after some time
[ok]: EXPIRES after AOF reload (without rewrite)
[ok]: SDIFF fuzzing
[ok]: SINTER against non-set should throw error
[ok]: SUNION against non-set should throw error
[ok]: SINTER should handle non existing key as empty
[ok]: SINTER with same integer elements but different encoding
[ok]: SINTERSTORE against non existing keys should delete dstkey
[ok]: SUNIONSTORE against non existing keys should delete dstkey
[ok]: SPOP basics - hashtable
[ok]: SPOP with =1 - hashtable
[ok]: SRANDMEMBER - hashtable
[ok]: SPOP basics - intset
[ok]: SPOP with =1 - intset
[ok]: SRANDMEMBER - intset
[ok]: SPOP with 
[ok]: SPOP with 
[ok]: SPOP using integers, testing Knuth's and Floyd's algorithm
[ok]: SPOP using integers with Knuth's algorithm
[ok]: SPOP new implementation: code path #1
[ok]: SPOP new implementation: code path #2
[ok]: SPOP new implementation: code path #3
[ok]: SRANDMEMBER with  against non existing key
[ok]: SRANDMEMBER with  - hashtable
[ok]: SRANDMEMBER with  - intset
[ok]: SMOVE basics - from regular set to intset
[ok]: SMOVE basics - from intset to regular set
[ok]: SMOVE non existing key
[ok]: SMOVE non existing src set
[ok]: SMOVE from regular set to non existing destination set
[ok]: SMOVE from intset to non existing destination set
[ok]: SMOVE wrong src key type
[ok]: SMOVE wrong dst key type
[ok]: SMOVE with identical source and destination
[ok]: MIGRATE is able to migrate a key between two instances
[ok]: First server should have role slave after SLAVEOF
[ok]: First server should have role slave after SLAVEOF
[ok]: If min-slaves-to-write is honored, write is accepted
[ok]: No write if min-slaves-to-write is < attached slaves
[ok]: If min-slaves-to-write is honored, write is accepted (again)
[ok]: Old Big Linked list: SORT BY key
[ok]: XREVRANGE returns the reverse of XRANGE
[ok]: XREAD with non empty stream
[ok]: Non blocking XREAD with empty streams
[ok]: XREAD with non empty second stream
[ok]: Blocking XREAD waiting new data
[ok]: Blocking XREAD waiting old data
[ok]: XREAD: XADD + DEL should not awake client
[ok]: XREAD: XADD + DEL + LPUSH should not awake client
[ok]: XREAD with same stream name multiple times should work
[ok]: XREAD + multiple XADD inside transaction
[ok]: XDEL basic test
[ok]: Old Big Linked list: SORT BY key with limit
[ok]: MIGRATE is able to copy a key between two instances
[ok]: First server should have role slave after SLAVEOF
[ok]: Old Big Linked list: SORT BY hash field
[ok]: Intset: SORT BY key
[ok]: Intset: SORT BY key with limit
[ok]: Intset: SORT BY hash field
[ok]: Slave is able to detect timeout during handshake
[ok]: MIGRATE will not overwrite existing keys, unless REPLACE is used
[ok]: SETNX against expired volatile key
[ok]: MGET
[ok]: MGET against non existing key
[ok]: MGET against non-string key
[ok]: GETSET (set new value)
[ok]: GETSET (replace old value)
[ok]: MSET base case
[ok]: MSET wrong number of args
[ok]: MSETNX with already existent key
[ok]: MSETNX with not existing keys
[ok]: STRLEN against non-existing key
[ok]: STRLEN against integer-encoded value
[ok]: STRLEN against plain string
[ok]: SETBIT against non-existing key
[ok]: SETBIT against string-encoded key
[ok]: SETBIT against integer-encoded key
[ok]: SETBIT against key with wrong type
[ok]: SETBIT with out of range bit offset
[ok]: SETBIT with non-bit argument
[ok]: Hash table: SORT BY key
[ok]: Hash table: SORT BY key with limit
[ok]: Hash table: SORT BY hash field
[ok]: SETBIT fuzzing
[ok]: GETBIT against non-existing key
[ok]: GETBIT against string-encoded key
[ok]: GETBIT against integer-encoded key
[ok]: SETRANGE against non-existing key
[ok]: SETRANGE against string-encoded key
[ok]: SETRANGE against integer-encoded key
[ok]: SETRANGE against key with wrong type
[ok]: SETRANGE with out of range offset
[ok]: GETRANGE against non-existing key
[ok]: GETRANGE against string value
[ok]: GETRANGE against integer-encoded value
[ok]: MIGRATE propagates TTL correctly
[ok]: ZRANGEBYSCORE fuzzy test, 100 ranges in 128 element sorted set - ziplist
[ok]: No write if min-slaves-max-lag is > of the slave lag
[ok]: min-slaves-to-write is ignored by slaves
[ok]: ZRANGEBYLEX fuzzy test, 100 ranges in 128 element sorted set - ziplist
[ok]: GETRANGE fuzzing
[ok]: Extended SET can detect syntax errors
[ok]: Extended SET NX option
[ok]: Extended SET XX option
[ok]: Extended SET EX option
[ok]: Extended SET PX option
[ok]: Extended SET using multiple options at once
[ok]: GETRANGE with huge ranges, Github issue #1844
[13/51 done]: unit/type/string (28 seconds)
Testing integration/replication-psync
[ok]: Set instance A as slave of B
[ok]: BRPOPLPUSH replication, when blocking against empty list
[ok]: BRPOPLPUSH replication, list exists
[ok]: BLPOP followed by role change, issue #2473
[ok]: ZREMRANGEBYLEX fuzzy test, 100 ranges in 128 element sorted set - ziplist
[ok]: ZSETs skiplist implementation backlink consistency test - ziplist
[ok]: LTRIM stress testing - linkedlist
[ok]: intsets implementation stress testing
[14/51 done]: unit/type/set (32 seconds)
Testing integration/aof
[ok]: XDEL fuzz test
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: Unfinished MULTI: Server should start if load-truncated is yes
[ok]: SCAN regression test for issue #4906
[15/51 done]: unit/scan (34 seconds)
Testing integration/rdb
[ok]: Second server should have role master at first
[ok]: SLAVEOF should start with link status "down"
[ok]: The role should immediately be changed to "replica"
[ok]: Sync should have transferred keys from master
[ok]: The link status should be up
[ok]: SET on the master should immediately propagate
[ok]: Short read: Server should start if load-truncated is yes
[ok]: Truncated AOF loaded: we expect foo to be equal to 5
[ok]: Append a new command after loading an incomplete AOF
[ok]: RDB encoding loading test
[ok]: Test replication partial resync: no reconnection, just sync (diskless: no, disabled, reconnect: 0)
[ok]: Test replication with parallel clients writing in differnet DBs
[ok]: Server started empty with non-existing RDB file
[ok]: Short read + command: Server should start
[ok]: FLUSHALL should replicate
[ok]: Truncated AOF loaded: we expect foo to be equal to 6 now
[ok]: ROLE in master reports master with a slave
[ok]: ROLE in slave reports slave in connected state
[ok]: Test replication with blocking lists and sorted sets operations
[16/51 done]: integration/block-repl (30 seconds)
Testing integration/convert-zipmap-hash-on-load
[ok]: Server started empty with empty RDB file
[ok]: Bad format: Server should have logged an error
[ok]: ZSETs ZRANK augmented skip list stress testing - ziplist
[ok]: BZPOPMIN, ZADD + DEL should not awake blocked client
[ok]: BZPOPMIN, ZADD + DEL + SET should not awake blocked client
[ok]: BZPOPMIN with same key multiple times should work
[ok]: MULTI/EXEC is isolated from the point of view of BZPOPMIN
[ok]: BZPOPMIN with variadic ZADD
[ok]: AOF rewrite during write load: RDB preamble=yes
[ok]: RDB load zipmap hash: converts to ziplist
[ok]: Big Hash table: SORT BY key
[ok]: BZPOPMIN with zero timeout should block indefinitely
[ok]: Big Hash table: SORT BY key with limit
[ok]: ZSCORE - skiplist
[ok]: Unfinished MULTI: Server should have logged an error
[ok]: ZSCORE after a DEBUG RELOAD - skiplist
[ok]: ZSET sorting stresser - skiplist
[ok]: RDB load zipmap hash: converts to hash table when hash-max-ziplist-entries is exceeded
[ok]: Big Hash table: SORT BY hash field
[ok]: SORT GET #
[ok]: SORT GET 
[ok]: SORT GET (key and hash) with sanity check
[ok]: SORT BY key STORE
[ok]: SORT BY hash field STORE
[ok]: SORT extracts STORE correctly
[ok]: SORT extracts multiple STORE correctly
[ok]: SORT DESC
[ok]: SORT ALPHA against integer encoded strings
[ok]: SORT sorted set
[ok]: SORT sorted set BY nosort should retain ordering
[ok]: SORT sorted set BY nosort + LIMIT
[ok]: Short read: Server should have logged an error
[ok]: Short read: Utility should confirm the AOF is not valid
[ok]: Short read: Utility should be able to fix the AOF
[ok]: SORT sorted set BY nosort works as expected from scripts
[ok]: SORT sorted set: +inf and -inf handling
[ok]: Test RDB stream encoding
[ok]: SORT regression for issue #19, sorting floats
[ok]: SORT with STORE returns zero if result is empty (github issue 224)
[ok]: SORT with STORE does not create empty lists (github issue 224)
[ok]: SORT with STORE removes key if result is empty (github issue 227)
[ok]: SORT with BY  and STORE should still order output
[ok]: SORT will complain with numerical sorting and bad doubles (1)
[ok]: SORT will complain with numerical sorting and bad doubles (2)
[ok]: SORT BY sub-sorts lexicographically if score is the same
[ok]: SORT GET with pattern ending with just -> does not get hash field
[ok]: SORT by nosort retains native order for lists
[ok]: SORT by nosort plus store retains native order for lists
[ok]: SORT by nosort with limit returns based on original list order
[ok]: Slave should be able to synchronize with the master
[ok]: Detect write load to master
[ok]: RDB load zipmap hash: converts to hash table when hash-max-ziplist-value is exceeded
[ok]: SORT speed, 100 element list BY key, 100 times
[17/51 done]: integration/convert-zipmap-hash-on-load (6 seconds)
Testing integration/logging
[ok]: First server should have role slave after SLAVEOF
[ok]: With min-slaves-to-write (1,3): master should be writable
[ok]: With min-slaves-to-write (2,3): master should not be writable
[ok]: Fixed AOF: Server should have been started
[ok]: Fixed AOF: Keyspace should contain values that were parseable
[ok]: Server should not start if RDB file can't be open
[ok]: SORT speed, 100 element list BY hash field, 100 times
[ok]: SORT speed, 100 element list directly, 100 times
[ok]: SORT speed, 100 element list BY , 100 times
[18/51 done]: unit/sort (43 seconds)
Testing integration/psync2
[ok]: MASTER and SLAVE consistency with expire

Logged warnings (pid 126468):
(none)

[exception]: Executing test client: I/O error reading reply.
I/O error reading reply
    while executing
"[srv $level "client"] {*}$args"
    (procedure "r" line 7)
    invoked from within
"r debug sleep 1"
    ("uplevel" body line 3)
    invoked from within
"uplevel 1 $code"
    (procedure "test" line 47)
    invoked from within
"test "Server is able to generate a stack trace on selected systems" {
            r config set watchdog-period 200
            r debug sleep 1
       ..."
    ("uplevel" body line 2)
    invoked from within
"uplevel 1 $code "
    (procedure "start_server" line 3)
    invoked from within
"start_server [list overrides [list dir $server_path]] {
        test "Server is able to generate a stack trace on selected systems" {
            r co..."
    (file "tests/integration/logging.tcl" line 5)
    invoked from within
"source $path"
    (procedure "execute_tests" line 4)
    invoked from within
"execute_tests $data"
    (procedure "test_client_main" line 10)
    invoked from within
"test_client_main $::test_server_port "
Killing still running Redis server 124982
Killing still running Redis server 124995
Killing still running Redis server 124996
Killing still running Redis server 125001
Killing still running Redis server 125004
Killing still running Redis server 125198
Killing still running Redis server 125310
Killing still running Redis server 125493
Killing still running Redis server 125495
Killing still running Redis server 125524
Killing still running Redis server 125529
Killing still running Redis server 125965
Killing still running Redis server 126065
Killing still running Redis server 126089
Killing still running Redis server 126159
Killing still running Redis server 126237
Killing still running Redis server 126258
I/O error reading reply
    while executing
"$r set [expr rand()] [expr rand()]"
    (procedure "gen_write_load" line 6)
    invoked from within
"gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2]"
    (file "tests/helpers/gen_write_load.tcl" line 15)
I/O error reading reply
    while executing
"$r set [expr rand()] [expr rand()]"
    (procedure "gen_write_load" line 6)
    invoked from within
"gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2]"
    (file "tests/helpers/gen_write_load.tcl" line 15)I/O error reading reply
    while executing
"$r set [expr rand()] [expr rand()]"
    (procedure "gen_write_load" line 6)
    invoked from within
"gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2]"
    (file "tests/helpers/gen_write_load.tcl" line 15)

I/O error reading reply
    while executing
"$r set [expr rand()] [expr rand()]"
    (procedure "gen_write_load" line 6)
    invoked from within
"gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2]"
    (file "tests/helpers/gen_write_load.tcl" line 15)
I/O error reading reply
    while executing
"$r set [expr rand()] [expr rand()]"
    (procedure "gen_write_load" line 6)
    invoked from within
"gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2]"
    (file "tests/helpers/gen_write_load.tcl" line 15)
Killing still running Redis server 126333
Killing still running Redis server 126344
Killing still running Redis server 126401
Killing still running Redis server 126425
Killing still running Redis server 126468
Killing still running Redis server 126506
Killing still running Redis server 126513
Killing still running Redis server 126557
I/O error reading reply
    while executing
"$r set [expr rand()] [expr rand()]"
    (procedure "gen_write_load" line 6)
    invoked from within
"gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2]"
    (file "tests/helpers/gen_write_load.tcl" line 15)
I/O error reading reply
    while executing
"$r set [expr rand()] [expr rand()]"
    (procedure "gen_write_load" line 6)
    invoked from within
"gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2]"
    (file "tests/helpers/gen_write_load.tcl" line 15)
I/O error reading reply
    while executing
"{*}$r hdel $k $f"
    ("uplevel" body line 1)
    invoked from within
"uplevel 1 [lindex $args $path]"
    (procedure "randpath" line 3)
    invoked from within
"randpath {{*}$r hset $k $f $v}  {{*}$r hdel $k $f}"
    (procedure "createComplexDataset" line 80)
    invoked from within
"createComplexDataset $r $ops"
    (procedure "bg_complex_data" line 4)
    invoked from within
"bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3]"
    (file "tests/helpers/bg_complex_data.tcl" line 10)
I/O error reading reply
    while executing
"{*}$r zadd $k $d $v"
    ("uplevel" body line 1)
    invoked from within
"uplevel 1 [lindex $args $path]"
    (procedure "randpath" line 3)
    invoked from within
"randpath {{*}$r zadd $k $d $v}  {{*}$r zrem $k $v}  {
                            set otherzset [findKeyWithType {*}$r zset]
                         ..."
    (procedure "createComplexDataset" line 68)
    invoked from within
"createComplexDataset $r $ops"
    (procedure "bg_complex_data" line 4)
    invoked from within
"bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3]"
    (file "tests/helpers/bg_complex_data.tcl" line 10)
I/O error reading reply
    while executing
"{*}$r randomkey"
    (procedure "findKeyWithType" line 3)
    invoked from within
"findKeyWithType {*}$r zset"
    ("uplevel" body line 2)
    invoked from within
"uplevel 1 [lindex $args $path]"
    (procedure "randpath" line 3)
    invoked from within
"randpath {{*}$r zadd $k $d $v}  {{*}$r zrem $k $v}  {
                            set otherzset [findKeyWithType {*}$r zset]
                         ..."
    (procedure "createComplexDataset" line 68)
    invoked from within
"createComplexDataset $r $ops"
    (procedure "bg_complex_data" line 4)
    invoked from within
"bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3]"
    (file "tests/helpers/bg_complex_data.tcl" line 10)
I/O error reading reply
    while executing
"{*}$r zadd $k $d $v"
    ("uplevel" body line 2)
    invoked from within
"uplevel 1 [lindex $args $path]"
    (procedure "randpath" line 3)
    invoked from within
"randpath {
                {*}$r set $k $v
            } {
                {*}$r lpush $k $v
            } {
                {*}$r sadd $k $v
        ..."
    (procedure "createComplexDataset" line 30)
    invoked from within
"createComplexDataset $r $ops"
    (procedure "bg_complex_data" line 4)
    invoked from within
"bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3]"
    (file "tests/helpers/bg_complex_data.tcl" line 10)I/O error reading reply
    while executing
"{*}$r sadd $k $v"
    ("uplevel" body line 2)
    invoked from within
"uplevel 1 [lindex $args $path]"
    (procedure "randpath" line 3)
    invoked from within
"randpath {
                {*}$r set $k $v
            } {
                {*}$r lpush $k $v
            } {
                {*}$r sadd $k $v
        ..."
    (procedure "createComplexDataset" line 30)
    invoked from within
"createComplexDataset $r $ops"
    (procedure "bg_complex_data" line 4)
    invoked from within
"bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3]"
    (file "tests/helpers/bg_complex_data.tcl" line 10)I/O error reading reply
    while executing
"{*}$r randomkey"
    (procedure "findKeyWithType" line 3)
    invoked from within
"findKeyWithType {*}$r set"
    ("uplevel" body line 2)
    invoked from within
"uplevel 1 [lindex $args $path]"
    (procedure "randpath" line 3)
    invoked from within
"randpath {{*}$r sadd $k $v}  {{*}$r srem $k $v}  {
                            set otherset [findKeyWithType {*}$r set]
                            if..."
    (procedure "createComplexDataset" line 54)
    invoked from within
"createComplexDataset $r $ops"
    (procedure "bg_complex_data" line 4)
    invoked from within
"bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3]"
    (file "tests/helpers/bg_complex_data.tcl" line 10)


child process exited abnormally