From bb9fa4e91093633b4ac750551e353c28ad6d8227 Mon Sep 17 00:00:00 2001 From: Etienne Renault Date: Thu, 1 Dec 2016 18:56:33 +0100 Subject: [PATCH] bricks: update and move to c++14 * Makefile.am, bricks/brick-assert, bricks/brick-assert.h, spot/ltsmin/ltsmin.cc, spot/mc/ec.hh: here. * bricks/brick-bitlevel.h, bricks/brick-hash.h, bricks/brick-hashset.h, bricks/brick-shmem.h, bricks/brick-types.h: Rename as ... * bricks/brick-bitlevel, bricks/brick-hash, bricks/brick-hashset, bricks/brick-shmem, bricks/brick-types: ... these --- Makefile.am | 6 +- bricks/brick-assert | 229 +++++ bricks/brick-assert.h | 203 ----- bricks/{brick-bitlevel.h => brick-bitlevel} | 346 +++++--- bricks/{brick-hash.h => brick-hash} | 21 +- bricks/{brick-hashset.h => brick-hashset} | 887 ++++++++++---------- bricks/{brick-shmem.h => brick-shmem} | 502 ++++++----- bricks/{brick-types.h => brick-types} | 461 +++++++--- spot/ltsmin/ltsmin.cc | 10 +- spot/mc/ec.hh | 2 +- 10 files changed, 1557 insertions(+), 1110 deletions(-) create mode 100644 bricks/brick-assert delete mode 100644 bricks/brick-assert.h rename bricks/{brick-bitlevel.h => brick-bitlevel} (63%) rename bricks/{brick-hash.h => brick-hash} (99%) rename bricks/{brick-hashset.h => brick-hashset} (62%) rename bricks/{brick-shmem.h => brick-shmem} (78%) rename bricks/{brick-types.h => brick-types} (73%) diff --git a/Makefile.am b/Makefile.am index 51e5c37f0..7ef91b5e8 100644 --- a/Makefile.am +++ b/Makefile.am @@ -39,9 +39,9 @@ SUBDIRS = picosat buddy lib ltdl spot bin tests $(PYTHON_SUBDIR) $(DOC_SUBDIR) \ UTF8 = utf8/README.md utf8/utf8.h \ utf8/utf8/checked.h utf8/utf8/core.h utf8/utf8/unchecked.h -nobase_include_HEADERS= bricks/brick-assert.h bricks/brick-bitlevel.h \ - bricks/brick-hash.h bricks/brick-hashset.h bricks/brick-shmem.h \ - bricks/brick-types.h +nobase_include_HEADERS= bricks/brick-assert bricks/brick-bitlevel \ + bricks/brick-hash bricks/brick-hashset bricks/brick-shmem \ + bricks/brick-types DEBIAN = \ debian/changelog \ diff --git a/bricks/brick-assert b/bricks/brick-assert new file mode 100644 index 000000000..ec483d26c --- /dev/null +++ b/bricks/brick-assert @@ -0,0 +1,229 @@ +// -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4 -*- + +/* + * Various assert macros based on C++ exceptions and their support code. + */ + +/* + * (c) 2006-2016 Petr Ročkai + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include + +#ifndef TEST +#define TEST(n) void n() +#define TEST_FAILING(n) void n() +#endif + +#ifndef NDEBUG + +#define BRICK_SHARP_FIRST(x, ...) #x +#define ASSERT(...) ::brick::_assert::assert_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION( BRICK_SHARP_FIRST( __VA_ARGS__, ignored ) ) ), __VA_ARGS__ ) +#define ASSERT_PRED(p, x) ::brick::_assert::assert_pred_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION( #p "( " #x " )" ) ), x, p( x ) ) +#define ASSERT_EQ(x, y) ::brick::_assert::assert_eq_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION( #x " == " #y ) ), x, y ) +#define ASSERT_LT(x, y) ::brick::_assert::assert_lt_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION( #x " < " #y ) ), x, y ) +#define ASSERT_LEQ(x, y) ::brick::_assert::assert_leq_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION( #x " <= " #y ) ), x, y ) +#define ASSERT_NEQ(x, y) ::brick::_assert::assert_neq_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION( #x " != " #y ) ), x, y ) +#define ASSERT_EQ_IDX(i, x, y) ::brick::_assert::assert_eq_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION_I( #x " == " #y, i ) ), x, y ) + +#else + +#define ASSERT(...) static_cast< decltype(__VA_ARGS__, void(0)) >(0) +#define ASSERT_PRED(p, x) static_cast< decltype(p, x, void(0)) >(0) +#define ASSERT_EQ(x, y) static_cast< decltype(x, y, void(0)) >(0) +#define ASSERT_LEQ(x, y) static_cast< decltype(x, y, void(0)) >(0) +#define ASSERT_LT(x, y) static_cast< decltype(x, y, void(0)) >(0) +#define ASSERT_NEQ(x, y) static_cast< decltype(x, y, void(0)) >(0) +#define ASSERT_EQ_IDX(i, x, y) static_cast< decltype(i, x, y, void(0)) >(0) +#endif + +/* you must #include to use ASSERT_UNREACHABLE_F */ +#define UNREACHABLE_F(...) ::brick::_assert::assert_die_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION( brick::string::fmtf(__VA_ARGS__) ) ) ) +#define UNREACHABLE(x) ::brick::_assert::assert_die_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION( x ) ) ) +#define UNREACHABLE_() ::brick::_assert::assert_die_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION( "an unreachable location" ) ) ) +#define NOT_IMPLEMENTED() ::brick::_assert::assert_die_fn( \ + BRICK_LOCWRAP( BRICK_LOCATION( "a missing implementation" ) ) ) + +#ifdef _MSC_VER +#define UNUSED +#define noexcept +#else +#define UNUSED __attribute__((unused)) +#endif + +#ifndef BRICK_ASSERT_H +#define BRICK_ASSERT_H + +namespace brick { +namespace _assert { + +/* discard any number of parameters, taken as const references */ +template< typename... X > +void unused( const X&... ) { } + +struct Location { + int line, iteration; + std::string file, stmt; + Location( const char *f, int l, std::string st, int iter = -1 ) + : line( l ), iteration( iter ), file( f ), stmt( st ) + { + int slashes = 0; + for ( int i = 0; i < int( file.size() ); ++i ) + if ( file[i] == '/' ) + ++ slashes; + + while ( slashes >= 3 ) + { + file = std::string( file, file.find( "/" ) + 1, std::string::npos ); + -- slashes; + } + if ( f != file ) + file = ".../" + file; + } +}; + +#define BRICK_LOCATION(stmt) ::brick::_assert::Location( __FILE__, __LINE__, stmt ) +#define BRICK_LOCATION_I(stmt, i) ::brick::_assert::Location( __FILE__, __LINE__, stmt, i ) + +// lazy location construction in C++11 +#if __cplusplus >= 201103L +#define BRICK_LOCWRAP(x) [&]{ return (x); } +#define BRICK_LOCUNWRAP(x) (x)() +#else +#define BRICK_LOCWRAP(x) (x) +#define BRICK_LOCUNWRAP(x) (x) +#endif + +struct AssertFailed : std::exception +{ + std::string str; + + template< typename X > + friend inline AssertFailed &operator<<( AssertFailed &f, X x ) + { + std::stringstream str; + str << x; + f.str += str.str(); + return f; + } + + AssertFailed( Location l, const char *expected = "expected" ) + { + (*this) << l.file << ": " << l.line; + if ( l.iteration != -1 ) + (*this) << " (iteration " << l.iteration << ")"; + (*this) << ":\n " << expected << " " << l.stmt; + } + + const char *what() const noexcept override { return str.c_str(); } +}; + +static inline void format( AssertFailed & ) {} + +template< typename X, typename... Y > +void format( AssertFailed &f, X x, Y... y ) +{ + f << x; + format( f, y... ); +} + +template< typename Location, typename X, typename... Y > +void assert_fn( Location l, X x, Y... y ) +{ + if ( x ) + return; + AssertFailed f( BRICK_LOCUNWRAP( l ) ); + format( f, y... ); + throw f; +} + +template< typename Location > +inline void assert_die_fn( Location l ) __attribute__((noreturn)); + +template< typename Location > +inline void assert_die_fn( Location l ) +{ + throw AssertFailed( BRICK_LOCUNWRAP( l ), "encountered" ); +} + +#define ASSERT_FN(name, op, inv) \ + template< typename Location > \ + void assert_ ## name ## _fn( Location l, int64_t x, int64_t y ) \ + { \ + if ( !( x op y ) ) { \ + AssertFailed f( BRICK_LOCUNWRAP( l ) ); \ + f << "\n but got " \ + << x << " " #inv " " << y << "\n"; \ + throw f; \ + } \ + } \ + \ + template< typename Location, typename X, typename Y > \ + auto assert_ ## name ## _fn( Location l, X x, Y y ) \ + -> typename std::enable_if< \ + !std::is_integral< X >::value || \ + !std::is_integral< Y >::value >::type \ + { \ + if ( !( x op y ) ) { \ + AssertFailed f( BRICK_LOCUNWRAP( l ) ); \ + f << "\n but got " \ + << x << " " #inv " " << y << "\n"; \ + throw f; \ + } \ + } + +ASSERT_FN(eq, ==, !=); +ASSERT_FN(leq, <=, >); +ASSERT_FN(lt, <, >=); + +template< typename Location, typename X > +void assert_pred_fn( Location l, X x, bool p ) +{ + if ( !p ) { + AssertFailed f( BRICK_LOCUNWRAP( l ) ); + f << "\n but got x = " << x << "\n"; + throw f; + } +} + +template< typename Location, typename X, typename Y > +void assert_neq_fn( Location l, X x, Y y ) +{ + if ( x != y ) + return; + AssertFailed f( BRICK_LOCUNWRAP( l ) ); + f << "\n but got " + << x << " == " << y << "\n"; + throw f; +} + +} +} + +#endif + +// vim: syntax=cpp tabstop=4 shiftwidth=4 expandtab diff --git a/bricks/brick-assert.h b/bricks/brick-assert.h deleted file mode 100644 index 3efc55951..000000000 --- a/bricks/brick-assert.h +++ /dev/null @@ -1,203 +0,0 @@ -// -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4 -*- - -/* - * Various assert macros based on C++ exceptions and their support code. - */ - -/* - * (c) 2006-2014 Petr Ročkai - */ - -/* Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ - -#include -#include -#include - -#ifdef __divine__ -#include -#endif - -#ifndef TEST -#define TEST(n) void n() -#define TEST_FAILING(n) void n() -#endif - -#ifdef __divine__ -#define ASSERT(x) assert( x ) -#define ASSERT_PRED(p, x) assert( p( x ) ) -#define ASSERT_EQ(x, y) assert( (x) == (y) ) -#define ASSERT_LEQ(x, y) assert( (x) <= (y) ) -#define ASSERT_NEQ(x, y) assert ( (x) != (y) ) -#define ASSERT_EQ_IDX(i, x, y) assert( (x) == (y) ) - -#elif !defined NDEBUG -#define ASSERT(x) ::brick::_assert::assert_fn( BRICK_LOCWRAP( BRICK_LOCATION( #x ) ), x ) -#define ASSERT_PRED(p, x) ::brick::_assert::assert_pred_fn( BRICK_LOCWRAP( BRICK_LOCATION( #p "( " #x " )" ) ), x, p( x ) ) -#define ASSERT_EQ(x, y) ::brick::_assert::assert_eq_fn( BRICK_LOCWRAP( BRICK_LOCATION( #x " == " #y ) ), x, y ) -#define ASSERT_LEQ(x, y) ::brick::_assert::assert_leq_fn( BRICK_LOCWRAP( BRICK_LOCATION( #x " <= " #y ) ), x, y ) -#define ASSERT_NEQ(x, y) ::brick::_assert::assert_neq_fn( BRICK_LOCWRAP( BRICK_LOCATION( #x " != " #y ) ), x, y ) -#define ASSERT_EQ_IDX(i, x, y) ::brick::_assert::assert_eq_fn( BRICK_LOCWRAP( BRICK_LOCATION_I( #x " == " #y, i ) ), x, y ) - -#else - -#define ASSERT(x) ((void)0) -#define ASSERT_PRED(p, x) ((void)0) -#define ASSERT_EQ(x, y) ((void)0) -#define ASSERT_LEQ(x, y) ((void)0) -#define ASSERT_NEQ(x, y) ((void)0) -#define ASSERT_EQ_IDX(i, x, y) ((void)0) -#endif - -/* you must #include to use ASSERT_UNREACHABLE_F */ -#define ASSERT_UNREACHABLE_F(...) ::brick::_assert::assert_die_fn( BRICK_LOCATION( brick::string::fmtf(__VA_ARGS__) ) ) -#define ASSERT_UNREACHABLE(x) ::brick::_assert::assert_die_fn( BRICK_LOCATION( x ) ) -#define ASSERT_UNIMPLEMENTED() ::brick::_assert::assert_die_fn( BRICK_LOCATION( "not imlemented" ) ) - -#ifdef _MSC_VER -#define UNUSED -#define noexcept -#else -#define UNUSED __attribute__((unused)) -#endif - -#ifndef BRICK_ASSERT_H -#define BRICK_ASSERT_H - -namespace brick { -namespace _assert { - -/* discard any number of paramentets, taken as const references */ -template< typename... X > -void unused( const X&... ) { } - -struct Location { - const char *file; - int line, iteration; - std::string stmt; - Location( const char *f, int l, std::string st, int iter = -1 ) - : file( f ), line( l ), iteration( iter ), stmt( st ) {} -}; - -#define BRICK_LOCATION(stmt) ::brick::_assert::Location( __FILE__, __LINE__, stmt ) -#define BRICK_LOCATION_I(stmt, i) ::brick::_assert::Location( __FILE__, __LINE__, stmt, i ) - -// lazy location construction in C++11 -#if __cplusplus >= 201103L -#define BRICK_LOCWRAP(x) [&]{ return (x); } -#define BRICK_LOCUNWRAP(x) (x)() -#else -#define BRICK_LOCWRAP(x) (x) -#define BRICK_LOCUNWRAP(x) (x) -#endif - - -struct AssertFailed : std::exception { - std::string str; - - template< typename X > - friend inline AssertFailed &operator<<( AssertFailed &f, X x ) - { - std::stringstream str; - str << x; - f.str += str.str(); - return f; - } - - AssertFailed( Location l ) - { - (*this) << l.file << ": " << l.line; - if ( l.iteration != -1 ) - (*this) << " (iteration " << l.iteration << ")"; - (*this) << ": assertion `" << l.stmt << "' failed;"; - } - - const char *what() const noexcept override { return str.c_str(); } -}; - -template< typename Location, typename X > -void assert_fn( Location l, X x ) -{ - if ( !x ) { - throw AssertFailed( BRICK_LOCUNWRAP( l ) ); - } -} - -inline void assert_die_fn( Location l ) __attribute__((noreturn)); - -inline void assert_die_fn( Location l ) -{ - throw AssertFailed( l ); -} - -template< typename Location, typename X, typename Y > -void assert_eq_fn( Location l, X x, Y y ) -{ - if ( !( x == y ) ) { - AssertFailed f( BRICK_LOCUNWRAP( l ) ); - f << " got [" - << x << "] != [" << y - << "] instead"; - throw f; - } -} - -template< typename Location, typename X, typename Y > -void assert_leq_fn( Location l, X x, Y y ) -{ - if ( !( x <= y ) ) { - AssertFailed f( BRICK_LOCUNWRAP( l ) ); - f << " got [" - << x << "] > [" << y - << "] instead"; - throw f; - } -} - -template< typename Location, typename X > -void assert_pred_fn( Location l, X x, bool p ) -{ - if ( !p ) { - AssertFailed f( BRICK_LOCUNWRAP( l ) ); - f << " for " << x; - throw f; - } -} - -template< typename Location, typename X, typename Y > -void assert_neq_fn( Location l, X x, Y y ) -{ - if ( x != y ) - return; - AssertFailed f( BRICK_LOCUNWRAP( l ) ); - f << " got [" - << x << "] == [" << y << "] instead"; - throw f; -} - -} -} - -#endif - -// vim: syntax=cpp tabstop=4 shiftwidth=4 expandtab diff --git a/bricks/brick-bitlevel.h b/bricks/brick-bitlevel similarity index 63% rename from bricks/brick-bitlevel.h rename to bricks/brick-bitlevel index dbc4b61ee..0d81c1582 100644 --- a/bricks/brick-bitlevel.h +++ b/bricks/brick-bitlevel @@ -7,33 +7,25 @@ /* * (c) 2013-2014 Jiří Weiser * (c) 2013 Petr Ročkai + * (c) 2015 Vladimír Štill + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ - -#include +#include "brick-assert" #include +#include #ifdef __linux #include @@ -43,10 +35,6 @@ #define LITTLE_ENDIAN 1234 #endif -#ifndef bswap_64 -#define bswap_64 __builtin_bswap64 -#endif - #include #include @@ -75,7 +63,7 @@ constexpr unsigned MSB( T x ) { template< typename T > constexpr T fill( T x ) { - return x ? x | fill( x >> 1 ) : x; + return x ? x | compiletime::fill( x >> 1 ) : x; } template< typename T > @@ -83,6 +71,77 @@ constexpr size_t sizeOf() { return std::is_empty< T >::value ? 0 : sizeof( T ); } +template< typename T > +constexpr T ones( int bits ) +{ + return bits ? ( T(1) << ( bits - 1 ) ) | compiletime::ones< T >( bits - 1 ) : 0; +} + +} + +using compiletime::ones; + +template< typename L, typename H > +struct bvpair +{ + L low; H high; + constexpr bvpair( L l, H h = 0 ) : low( l ), high( h ) {} + constexpr bvpair() = default; + explicit constexpr operator bool() const { return low || high; } + constexpr bvpair operator<<( int s ) const + { + int rem = 8 * sizeof( low ) - s; + int unshift = std::max( rem, 0 ); + int shift = rem < 0 ? -rem : 0; + H carry = ( low & ~ones< L >( unshift ) ) >> unshift; + return bvpair( low << s, ( high << s ) | ( carry << shift ) ); + } + constexpr bvpair operator>>( int s ) const + { + int rem = 8 * sizeof( low ) - s; + int unshift = std::max( rem, 0 ); + int shift = rem < 0 ? -rem : 0; + L carry = L( high & ones< H >( s ) ) << unshift; + return bvpair( ( low >> s ) | ( carry >> shift ), high >> s ); + } + constexpr bvpair operator&( bvpair o ) const { return bvpair( o.low & low, o.high & high ); } + constexpr bvpair operator|( bvpair o ) const { return bvpair( o.low | low, o.high | high ); } + bvpair &operator|=( bvpair o ) { return *this = *this | o; } + constexpr bool operator==( bvpair o ) const { return o.low == low && o.high == high; } + constexpr bool operator!=( bvpair o ) const { return o.low != low || o.high != high; } + constexpr bvpair operator+() const { return *this; } + friend std::ostream &operator<<( std::ostream &o, bvpair p ) { + return o << p.high << "_" << p.low; } +} __attribute__((packed)); + +template< int i > struct _bitvec { using T = typename _bitvec< i + 1 >::T; }; +template<> struct _bitvec< 8 > { using T = uint8_t; }; +template<> struct _bitvec< 16 > { using T = uint16_t; }; +template<> struct _bitvec< 32 > { using T = uint32_t; }; +template<> struct _bitvec< 64 > { using T = uint64_t; }; +template<> struct _bitvec< 80 > { using T = bvpair< uint64_t, uint16_t >; }; +template<> struct _bitvec< 128 > { using T = bvpair< uint64_t, uint64_t >; }; + +template< int i > using bitvec = typename _bitvec< i >::T; + +namespace { + +uint32_t mixdown( uint64_t i ) /* due to Thomas Wang */ +{ + i = (~i) + (i << 18); + i = i ^ (i >> 31); + i = i * 21; + i = i ^ (i >> 11); + i = i + (i << 6); + i = i ^ (i >> 22); + return i; +} + +__attribute__((unused)) uint32_t mixdown( uint32_t a, uint32_t b ) +{ + return mixdown( ( uint64_t( a ) << 32 ) | uint64_t( b ) ); +} + } /* @@ -144,28 +203,46 @@ static inline number withoutMSB( number x ) { return x & ~onlyMSB( x ); } -inline uint64_t bitshift( uint64_t t, int shift ) { -#if BYTE_ORDER == LITTLE_ENDIAN - return bswap_64( shift < 0 ? bswap_64( t << -shift ) : bswap_64( t >> shift ) ); -#else +inline constexpr uint64_t bitshift( uint64_t t, int shift ) { return shift < 0 ? ( t << -shift ) : ( t >> shift ); -#endif +} + +inline constexpr uint64_t mask( int first, int count ) { + return (uint64_t(-1) << first) & (uint64_t(-1) >> (64 - first - count)); } struct BitPointer { + using Storage = uint32_t; + static constexpr int storageBits = sizeof( Storage ) * 8; + BitPointer() : base( nullptr ), _bitoffset( 0 ) {} template< typename T > BitPointer( T *t, int offset = 0 ) : base( static_cast< void * >( t ) ), _bitoffset( offset ) { normalize(); } - uint32_t &word() { ASSERT( valid() ); return *static_cast< uint32_t * >( base ); } - uint64_t &dword() { ASSERT( valid() ); return *static_cast< uint64_t * >( base ); } + + template< typename T > + T &ref() { ASSERT( valid() ); return *static_cast< T * >( base ); } + uint32_t &word() { return ref< uint32_t >(); } + uint64_t &dword() { return ref< uint64_t >(); } + + // unsafe version does not cross word boundary + uint32_t getUnsafe( int bits ) { return _get< uint32_t >( bits ); } + uint32_t get( int bits ) { + return bits + _bitoffset <= 32 ? _get< uint32_t >( bits ) : _get< uint64_t >( bits ); + } + + void setUnsafe( uint32_t val, int bits ) { return _set< uint32_t >( val, bits ); } + void set( uint32_t val, int bits ) { + return bits + _bitoffset <= 32 ? _set< uint32_t >( val, bits ) : _set< uint64_t >( val, bits ); + } + void normalize() { - int shift = downalign( _bitoffset, 32 ); + int shift = downalign( _bitoffset, storageBits ); _bitoffset -= shift; ASSERT_EQ( shift % 8, 0 ); - base = static_cast< uint32_t * >( base ) + shift / 32; + base = static_cast< Storage * >( base ) + shift / storageBits; } void shift( int bits ) { _bitoffset += bits; normalize(); } void fromReference( BitPointer r ) { *this = r; } @@ -174,11 +251,26 @@ struct BitPointer { private: void *base; int _bitoffset; -}; -inline uint64_t mask( int first, int count ) { - return bitshift(uint64_t(-1), -first) & bitshift(uint64_t(-1), (64 - first - count)); -} + template< typename T > + uint32_t _get( int bits ) { + static_assert( std::is_unsigned< T >::value, "T has to be unsigned numeric type" ); + ASSERT( valid() ); + ASSERT_LEQ( 0, bits ); + ASSERT_LEQ( bits, 32 ); + ASSERT_LEQ( bits + _bitoffset, int( sizeof( T ) * 8 ) ); + return (ref< T >() >> _bitoffset) & mask( 0, bits ); + } + + template< typename T > + void _set( uint32_t val, int bits ) { + static_assert( std::is_unsigned< T >::value, "T has to be unsigned numeric type" ); + ASSERT_EQ( val & ~mask( 0, bits ), 0u ); + ASSERT_LEQ( bits, 32 ); + ASSERT_LEQ( bits + _bitoffset, int( sizeof( T ) * 8 ) ); + ref< T >() = (ref< T >() & ~mask( _bitoffset, bits )) | (T(val) << _bitoffset); + } +}; /* * NB. This function will alias whatever "to" points to with an uint64_t. With @@ -191,17 +283,20 @@ inline uint64_t mask( int first, int count ) { inline void bitcopy( BitPointer from, BitPointer to, int bitcount ) { while ( bitcount ) { - int w = std::min( 32 - from.bitoffset(), bitcount ); - uint32_t fmask = mask( from.bitoffset(), w ); - uint64_t tmask = mask( to.bitoffset(), w ); - uint64_t bits = bitshift( from.word() & fmask, from.bitoffset() - to.bitoffset() ); - ASSERT_EQ( bits & ~tmask, 0u ); - ASSERT_EQ( bits & tmask, bits ); - if ( to.bitoffset() + bitcount > 32 ) - to.dword() = (to.dword() & ~tmask) | bits; - else - to.word() = (to.word() & ~static_cast< uint32_t >( tmask )) | static_cast< uint32_t >( bits ); - from.shift( w ); to.shift( w ); bitcount -= w; // slide + if ( from.bitoffset() == 0 && to.bitoffset() == 0 + && bitcount >= BitPointer::storageBits ) + { + const int cnt = bitcount / BitPointer::storageBits; + std::copy( &from.word(), &from.word() + cnt, &to.word() ); + const int bitcnt = cnt * BitPointer::storageBits; + from.shift( bitcnt ); + to.shift( bitcnt ); + bitcount -= bitcnt; + } else { + int w = std::min( BitPointer::storageBits - from.bitoffset(), bitcount ); + to.set( from.getUnsafe( w ), w ); + from.shift( w ); to.shift( w ); bitcount -= w; // slide + } } } @@ -209,8 +304,9 @@ template< typename T, int width = sizeof( T ) * 8 > struct BitField { static const int bitwidth = width; - struct Virtual : BitPointer { - void set( T t ) { bitcopy( BitPointer( &t ), *this, bitwidth ); } + struct Virtual : BitPointer + { + T set( T t ) { bitcopy( BitPointer( &t ), *this, bitwidth ); return t; } Virtual operator=( T t ) { set( t ); return *this; @@ -254,41 +350,24 @@ struct BitField set( value ); return result; } - template< typename U > - Virtual operator+=( U value ) { - T t( get() ); - t += value; - set( t ); - return *this; - } - template< typename U > - Virtual operator-=( U value ) { - T t( get() ); - t -= value; - set( t ); - return *this; - } - template< typename U > - Virtual operator*=( U value ) { - T t( get() ); - t *= value; - set( t ); - return *this; - } - template< typename U > - Virtual operator/=( U value ) { - T t( get() ); - t /= value; - set( t ); - return *this; - } - template< typename U > - Virtual operator%=( U value ) { - T t( get() ); - t %= value; - set( t ); - return *this; + +#define OP(__op) \ + template< typename U > \ + Virtual operator __op( U value ) { \ + T t( get() ); \ + t __op value; \ + set( t ); \ + return *this; \ } + + OP(+=); + OP(-=); + OP(*=); + OP(/=); + OP(%=); + OP(|=); + OP(&=); +#undef OP }; }; @@ -346,6 +425,16 @@ template< typename... Args > struct BitTuple : _BitTuple< Args... > char storage[ align( Virtual::bitwidth, 32 ) / 8 ]; BitTuple() { std::fill( storage, storage + sizeof( storage ), 0 ); } operator BitPointer() { return BitPointer( storage ); } + bool operator<( const BitTuple &o ) const + { + return std::lexicographical_compare( storage, storage + sizeof( storage ), + o.storage, o.storage + sizeof( storage ) ); + } + bool operator==( const BitTuple &o ) const + { + return std::equal( storage, storage + sizeof( storage ), + o.storage, o.storage + sizeof( storage ) ); + } }; template< int I, typename BT > @@ -357,11 +446,9 @@ typename BT::template AccessAt< I >::T::Head get( BT &bt ) return t; } -} } -namespace brick_test { -namespace bitlevel { +namespace t_bitlevel { using namespace ::brick::bitlevel; @@ -575,15 +662,15 @@ struct BitTupleTest { struct OperatorTester { int value; int expected; - OperatorTester &operator++() { ASSERT_UNREACHABLE( "fell through" ); return *this; } - OperatorTester operator++( int ) { ASSERT_UNREACHABLE( "fell through" ); return *this; } - OperatorTester &operator--() { ASSERT_UNREACHABLE( "fell through" ); return *this; } - OperatorTester &operator--( int ) { ASSERT_UNREACHABLE( "fell through" ); return *this; } - OperatorTester &operator+=( int ) { ASSERT_UNREACHABLE( "fell through" ); return *this; } - OperatorTester &operator-=( int ) { ASSERT_UNREACHABLE( "fell through" ); return *this; } - OperatorTester &operator*=( int ) { ASSERT_UNREACHABLE( "fell through" ); return *this; } - OperatorTester &operator/=( int ) { ASSERT_UNREACHABLE( "fell through" ); return *this; } - OperatorTester &operator%=( int ) { ASSERT_UNREACHABLE( "fell through" ); return *this; } + OperatorTester &operator++() { UNREACHABLE( "fell through" ); return *this; } + OperatorTester operator++( int ) { UNREACHABLE( "fell through" ); return *this; } + OperatorTester &operator--() { UNREACHABLE( "fell through" ); return *this; } + OperatorTester &operator--( int ) { UNREACHABLE( "fell through" ); return *this; } + OperatorTester &operator+=( int ) { UNREACHABLE( "fell through" ); return *this; } + OperatorTester &operator-=( int ) { UNREACHABLE( "fell through" ); return *this; } + OperatorTester &operator*=( int ) { UNREACHABLE( "fell through" ); return *this; } + OperatorTester &operator/=( int ) { UNREACHABLE( "fell through" ); return *this; } + OperatorTester &operator%=( int ) { UNREACHABLE( "fell through" ); return *this; } void test() { ASSERT_EQ( value, expected ); } void set( int v, int e ) { value = v; expected = e; } }; @@ -652,6 +739,65 @@ struct BitTupleTest { CHECK( 9, bt, 42, 9, item %= 11 ); } #undef CHECK + + TEST(ones) + { + ASSERT_EQ( bitlevel::ones< uint32_t >( 0 ), 0 ); + ASSERT_EQ( bitlevel::ones< uint32_t >( 1 ), 1 ); + ASSERT_EQ( bitlevel::ones< uint32_t >( 2 ), 3 ); + ASSERT_EQ( bitlevel::ones< uint32_t >( 31 ), std::numeric_limits< uint32_t >::max() >> 1 ); + ASSERT_EQ( bitlevel::ones< uint32_t >( 32 ), std::numeric_limits< uint32_t >::max() ); + ASSERT_EQ( bitlevel::ones< uint32_t >( 33 ), std::numeric_limits< uint32_t >::max() ); + } +}; + +struct BitVecTest +{ + TEST(bvpair_shiftl) + { + using bvp32 = bitlevel::bvpair< uint16_t, uint16_t >; + union { + bvp32 bvp; + uint32_t val; + }; + bvp = bvp32( 23, 13 ); + uint32_t check = ( 13u << 16 ) | 23u; + ASSERT_EQ( val, check ); + bvp = bvp << 7; + check = check << 7; + ASSERT_EQ( val, check ); + bvp = bvp << 18; + check = check << 18; + ASSERT_EQ( val, check ); + bvp = bvp32( 0xFF, 0xFF ); + check = (0xFF << 16) | 0xFF; + bvp = bvp << 20; + check = check << 20; + ASSERT_EQ( val, check ); + } + + TEST(bvpair_shiftr) + { + using bvp32 = bitlevel::bvpair< uint16_t, uint16_t >; + union { + bvp32 bvp; + uint32_t val; + }; + bvp = bvp32( 23, 13 ); + uint32_t check = ( 13u << 16 ) | 23u; + ASSERT_EQ( val, check ); + bvp = bvp >> 7; + check = check >> 7; + ASSERT_EQ( val, check ); + bvp = bvp >> 18; + check = check >> 18; + ASSERT_EQ( val, check ); + bvp = bvp32( 0xFF, 0xFF ); + check = (0xFF << 16) | 0xFF; + bvp = bvp >> 20; + check = check >> 20; + ASSERT_EQ( val, check ); + } }; } diff --git a/bricks/brick-hash.h b/bricks/brick-hash similarity index 99% rename from bricks/brick-hash.h rename to bricks/brick-hash index 8128f2131..cb0a2b950 100644 --- a/bricks/brick-hash.h +++ b/bricks/brick-hash @@ -29,9 +29,7 @@ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ -#include - -#include +#include "brick-assert" #include #include // pair @@ -51,7 +49,8 @@ typedef uint16_t uint16; typedef uint8_t uint8; #endif -#include +#include +#include #define ALLOW_UNALIGNED_READS 1 @@ -61,6 +60,7 @@ namespace brick { namespace hash { +typedef uint32_t hash32_t; typedef uint64_t hash64_t; typedef std::pair< hash64_t, hash64_t > hash128_t; @@ -220,6 +220,9 @@ public: // Is this message fragment too short? If it is, stuff it away. if (newLength < sc_bufSize) { +#ifndef NDEBUG + if ( length > sc_bufSize ) abort(); +#endif memcpy(&reinterpret_cast< uint8 * >( m_data )[m_remainder], message, length); m_length = length + m_length; m_remainder = uint8( newLength ); @@ -677,19 +680,17 @@ struct SpookyState { namespace { -inline hash128_t spooky( const void *message, size_t length, uint64_t seed1, uint64_t seed2 ) { +inline hash128_t spooky( const void *message, size_t length, uint64_t seed1 = 0, uint64_t seed2 = 0 ) { return jenkins::SpookyHash::Hash128( message, length, seed1, seed2 ); } } -} } -namespace brick_test { -namespace hash { +namespace t_hash { -using namespace ::brick::hash; +using namespace hash; class Random { @@ -813,7 +814,7 @@ struct Jenkins { saw[i] = SpookyHash::Hash32(buf, i, 0); if (saw[i] != expected[i]) { - printf("%3d: saw 0x%.8x, expected 0x%.8lx\n", i, saw[i], (unsigned long) expected[i]); + printf("%3d: saw 0x%.8x, expected 0x%.8" PRIx64 "\n", i, saw[i], expected[i]); ASSERT( false ); } } diff --git a/bricks/brick-hashset.h b/bricks/brick-hashset similarity index 62% rename from bricks/brick-hashset.h rename to bricks/brick-hashset index 30cdbb460..60ad96365 100644 --- a/bricks/brick-hashset.h +++ b/bricks/brick-hashset @@ -1,60 +1,74 @@ // -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4 -*- /* - * Fast hash tables. - */ - -/* - * (c) 2010-2014 Petr Ročkai + * (c) 2010-2015 Petr Ročkai * (c) 2012-2014 Jiří Weiser * (c) 2013-2014 Vladimír Štill + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ +#pragma once -#include - -#include -#include -#include -#include -#include +#include "brick-hash" +#include "brick-shmem" +#include "brick-bitlevel" +#include "brick-assert" +#include "brick-types" #include #include -#ifndef BRICK_HASHSET_H -#define BRICK_HASHSET_H +/* + * Various fast hash table implementations, including a concurrent-access hash + * table. See also ... + */ namespace brick { namespace hashset { +using hash::hash32_t; using hash::hash64_t; using hash::hash128_t; +struct DefaultHasher +{ + auto hash( int64_t v ) const + { + return std::make_pair( v, ~v ); + } + + template< typename X > + auto hash( const X &t ) const -> decltype( t.hash() ) + { + return t.hash(); + } + + template< typename X > + auto equal( const X &s, const X &t ) const -> decltype( s == t ) + { + return s == t; + } +}; + +static inline hash64_t highbits( hash64_t orig, int bits ) +{ + // use a different part of the hash than what we use for indexing + return orig >> ( sizeof( hash64_t ) * 8 - bits ); +} + /* - * Hash table cell implementations (tables are represented as vectors of - * cells). + * Tables are represented as vectors of cells. */ template< typename T, typename _Hasher > @@ -107,45 +121,44 @@ struct CompactCell : CellBase< T, Hasher > template< typename T, typename Hasher > struct FastAtomicCell : CellBase< T, Hasher > { - std::atomic< hash64_t > hashLock; - T value; + std::atomic< hash32_t > _hashlock; + T _value; - bool empty() { return hashLock == 0; } - bool invalid() { return hashLock == 3; } + bool empty() { return _hashlock == 0; } + bool invalid() { return _hashlock == 3; } /* returns old cell value */ - FastAtomicCell invalidate() { + FastAtomicCell invalidate() + { // wait for write to end - hash64_t prev = 0; - while ( !hashLock.compare_exchange_weak( prev, 0x3 ) ) { + hash32_t prev = 0; + while ( !_hashlock.compare_exchange_weak( prev, 0x3 ) ) { if ( prev == 3 ) - return FastAtomicCell( prev, value ); + return FastAtomicCell( prev, _value ); prev &= ~(0x3); // clean flags } - return FastAtomicCell( prev, value ); + return FastAtomicCell( prev, _value ); } - T &fetch() { return value; } - T copy() { return value; } + T &fetch() { return _value; } + T copy() { return _value; } - // TODO: this loses bits and hence doesn't quite work - // hash64_t hash( Hasher & ) { return hashLock >> 2; } - hash64_t hash( Hasher &h ) { return h.hash( value ).first; } + hash64_t hash( Hasher &h ) { return h.hash( _value ).first; } // wait for another write; returns false if cell was invalidated bool wait() { - while( hashLock & 1 ) + while( _hashlock & 1 ) if ( invalid() ) return false; return true; } - bool tryStore( T v, hash64_t hash ) { + bool tryStore( T v, hash32_t hash ) { hash |= 0x1; - hash64_t chl = 0; - if ( hashLock.compare_exchange_strong( chl, (hash << 2) | 1 ) ) { - value = v; - hashLock.exchange( hash << 2 ); + hash32_t chl = 0; + if ( _hashlock.compare_exchange_strong( chl, (hash << 2) | 1 ) ) { + _value = v; + _hashlock.exchange( hash << 2 ); return true; } return false; @@ -154,37 +167,38 @@ struct FastAtomicCell : CellBase< T, Hasher > template< typename Value > bool is( Value v, hash64_t hash, Hasher &h ) { hash |= 0x1; - if ( ( (hash << 2) | 1) != (hashLock | 1) ) + if ( ( (hash << 2) | 1) != (_hashlock | 1) ) return false; if ( !wait() ) return false; - return h.equal( value, v ); + return h.equal( _value, v ); } - FastAtomicCell() : hashLock( 0 ), value() {} - FastAtomicCell( const FastAtomicCell & ) : hashLock( 0 ), value() {} - FastAtomicCell( hash64_t hash, T value ) : hashLock( hash ), value( value ) { } + FastAtomicCell() : _hashlock( 0 ), _value() {} + FastAtomicCell( const FastAtomicCell & ) : _hashlock( 0 ), _value() {} + FastAtomicCell( hash64_t hash, T value ) : _hashlock( hash ), _value( value ) { } }; template< typename T, typename = void > -struct Tagged { +struct Tagged +{ T t; uint32_t _tag; - static const int tagBits = 16; - void setTag( uint32_t v ) { _tag = v; } + static const int tag_bits = 16; + void tag( uint32_t v ) { _tag = v; } uint32_t tag() { return _tag; } Tagged() noexcept : t(), _tag( 0 ) {} Tagged( const T &t ) : t( t ), _tag( 0 ) {} }; template< typename T > -struct Tagged< T, typename std::enable_if< (T::tagBits > 0) >::type > +struct Tagged< T, typename std::enable_if< (T::tag_bits > 0) >::type > { T t; - static const int tagBits = T::tagBits; - void setTag( uint32_t value ) { t.setTag( value ); } + static const int tag_bits = T::tag_bits; + void tag( uint32_t value ) { t.tag( value ); } uint32_t tag() { return t.tag(); } Tagged() noexcept : t() {} Tagged( const T &t ) : t( t ) {} @@ -197,7 +211,7 @@ struct AtomicCell : CellBase< T, Hasher > static_assert( sizeof( std::atomic< Tagged< T > > ) == sizeof( Tagged< T > ), "std::atomic< Tagged< T > > must be lock-free" ); - static_assert( Tagged< T >::tagBits > 0, "T has at least a one-bit tagspace" ); + static_assert( Tagged< T >::tag_bits > 0, "T has at least a one-bit tagspace" ); bool empty() { return !value.load().t; } bool invalid() { @@ -205,16 +219,10 @@ struct AtomicCell : CellBase< T, Hasher > return (v.tag() == 0 && v.t) || (v.tag() != 0 && !v.t); } - static hash64_t hashToTag( hash64_t hash, int bits = Tagged< T >::tagBits ) - { - // use different part of hash than used for storing - return ( hash >> ( sizeof( hash64_t ) * 8 - bits ) ) | 0x1; - } - /* returns old cell value */ AtomicCell invalidate() { Tagged< T > v = value; - v.setTag( v.tag() ? 0 : 1 ); // set tag to 1 if it was empty -> empty != invalid + v.tag( v.tag() ? 0 : 1 ); // set tag to 1 if it was empty -> empty != invalid return AtomicCell( value.exchange( v ) ); } @@ -224,7 +232,7 @@ struct AtomicCell : CellBase< T, Hasher > } T &fetch() { return deatomize().t; } - T copy() { Tagged< T > v = value; v.setTag( 0 ); return v.t; } + T copy() { Tagged< T > v = value; v.tag( 0 ); return v.t; } bool wait() { return !invalid(); } void store( T bn, hash64_t hash ) { @@ -234,14 +242,14 @@ struct AtomicCell : CellBase< T, Hasher > bool tryStore( T b, hash64_t hash ) { Tagged< T > zero; Tagged< T > next( b ); - next.setTag( hashToTag( hash ) ); + next.tag( highbits( hash, Tagged< T >::tag_bits ) | 1 ); auto rv = value.compare_exchange_strong( zero, next ); return rv; } template< typename Value > bool is( Value v, hash64_t hash, Hasher &h ) { - return value.load().tag() == hashToTag( hash ) && + return value.load().tag() == ( highbits( hash, Tagged< T >::tag_bits ) | 1 ) && h.equal( value.load().t, v ); } @@ -256,10 +264,6 @@ struct AtomicCell : CellBase< T, Hasher > } }; -// default hash implementation -template< typename T > -struct default_hasher {}; - template< typename T > struct Found : types::Wrapper< T > { @@ -284,8 +288,6 @@ Found< T > isNew( const T &x, bool y ) { template< typename Cell > struct HashSetBase { - struct ThreadData {}; - using value_type = typename Cell::value_type; using Hasher = typename Cell::Hasher; @@ -342,7 +344,6 @@ struct _HashSet : HashSetBase< Cell > { using Base = HashSetBase< Cell >; typedef std::vector< Cell > Table; - _HashSet< Cell > &withTD( typename Base::ThreadData & ) { return *this; } using typename Base::iterator; using typename Base::value_type; @@ -416,10 +417,10 @@ struct _HashSet : HashSetBase< Cell > void grow() { if ( 2 * size() >= _maxsize ) - ASSERT_UNREACHABLE( "ran out of space in the hash table" ); + UNREACHABLE( "ran out of space in the hash table" ); if( _growing ) - ASSERT_UNREACHABLE( "too many collisions during table growth" ); + UNREACHABLE( "too many collisions during table growth" ); _growing = true; @@ -456,14 +457,9 @@ struct _HashSet : HashSetBase< Cell > std::fill( _table.begin(), _table.end(), value_type() ); } - bool valid( int off ) { - return !_table[ off ].empty(); - } - - value_type &operator[]( int off ) { - return _table[ off ].fetch(); - } - + Cell &cellAt( size_t idx ) { return _table[ idx ]; } + value_type valueAt( size_t idx ) { return cellAt( idx ).fetch(); } + bool valid( size_t idx ) { return !cellAt( idx ).empty(); } _HashSet() : _HashSet( Hasher() ) {} explicit _HashSet( Hasher h ) : _HashSet( h, 32 ) {} @@ -475,10 +471,10 @@ struct _HashSet : HashSetBase< Cell > } }; -template< typename T, typename Hasher = default_hasher< T > > +template< typename T, typename Hasher = DefaultHasher > using Fast = _HashSet< FastCell< T, Hasher > >; -template< typename T, typename Hasher = default_hasher< T > > +template< typename T, typename Hasher = DefaultHasher > using Compact = _HashSet< CompactCell< T, Hasher > >; template< typename Cell > @@ -488,6 +484,7 @@ struct _ConcurrentHashSet : HashSetBase< Cell > using typename Base::Hasher; using typename Base::value_type; using typename Base::iterator; + using Base::hasher; enum class Resolution { Success, // the item has been inserted successfully @@ -509,13 +506,15 @@ struct _ConcurrentHashSet : HashSetBase< Cell > using Insert = _Resolution; using Find = _Resolution; - struct ThreadData { + struct Local { unsigned inserts; unsigned currentRow; - ThreadData() : inserts( 0 ), currentRow( 0 ) {} + Local() : inserts( 0 ), currentRow( 0 ) {} }; + Local _l; + struct Row { std::atomic< Cell * > _data; size_t _size; @@ -563,12 +562,11 @@ struct _ConcurrentHashSet : HashSetBase< Cell > ~Row() { free(); } }; - static const unsigned segmentSize = 1 << 16;// 2^16 = 65536 - static const unsigned syncPoint = 1 << 10;// 2^10 = 1024 + static const unsigned segmentSize = 1 << 16; // 65536 + static const unsigned syncPoint = 1024; - struct Data + struct Shared { - Hasher hasher; std::vector< Row > table; std::vector< std::atomic< unsigned short > > tableWorkers; std::atomic< unsigned > currentRow; @@ -577,366 +575,345 @@ struct _ConcurrentHashSet : HashSetBase< Cell > std::atomic< size_t > used; std::atomic< bool > growing; - Data( const Hasher &h, unsigned maxGrows ) - : hasher( h ), table( maxGrows ), tableWorkers( maxGrows ), currentRow( 0 ), + Shared( unsigned maxGrows ) + : table( maxGrows ), tableWorkers( maxGrows ), currentRow( 0 ), availableSegments( 0 ), used( 0 ), growing( false ) {} }; - Data _d; - ThreadData _global; /* for single-thread access */ + std::shared_ptr< Shared > _s; static size_t nextSize( size_t s ) { +#ifndef __divine__ if ( s < 512 * 1024 ) return s * 16; if ( s < 16 * 1024 * 1024 ) return s * 8; if ( s < 32 * 1024 * 1024 ) return s * 4; +#endif return s * 2; } - struct WithTD + size_t size() { return current().size(); } + Row ¤t() { return _s->table[ _s->currentRow ]; } + Row ¤t( unsigned index ) { return _s->table[ index ]; } + bool changed( unsigned row ) { return row < _s->currentRow || _s->growing; } + + iterator insert( value_type x ) { - using iterator = typename Base::iterator; - using value_type = typename Base::value_type; + return insertHinted( x, hasher.hash( x ).first ); + } - Data &_d; - ThreadData &_td; - WithTD( Data &d, ThreadData &td ) : _d( d ), _td( td ) {} + template< typename T > + iterator find( T x ) { + return findHinted( x, hasher.hash( x ).first ); + } - size_t size() { return current().size(); } - Row ¤t() { return _d.table[ _d.currentRow ]; } - Row ¤t( unsigned index ) { return _d.table[ index ]; } - bool changed( unsigned row ) { return row < _d.currentRow || _d.growing; } + int count( value_type x ) { return find( x ).valid() ? 1 : 0; } - iterator insert( value_type x ) { - return insertHinted( x, _d.hasher.hash( x ).first ); - } - - template< typename T > - iterator find( T x ) { - return findHinted( x, _d.hasher.hash( x ).first ); - } - - int count( value_type x ) { - return find( x ).valid() ? 1 : 0; - } - - iterator insertHinted( value_type x, hash64_t h ) - { - while ( true ) { - Insert ir = insertCell< false >( x, h ); - switch ( ir.r ) { - case Resolution::Success: - increaseUsage(); - return iterator( ir.c, true ); - case Resolution::Found: - return iterator( ir.c, false ); - case Resolution::NoSpace: - if ( grow( _td.currentRow + 1 ) ) { - ++_td.currentRow; - break; - } - SPOT_FALLTHROUGH; - case Resolution::Growing: - helpWithRehashing(); - updateIndex( _td.currentRow ); + iterator insertHinted( value_type x, hash64_t h ) + { + while ( true ) { + Row &row = current( _l.currentRow ); + Insert ir = insertCell< false >( row, x, h ); + switch ( ir.r ) { + case Resolution::Success: + increaseUsage(); + return iterator( ir.c, true ); + case Resolution::Found: + return iterator( ir.c, false ); + case Resolution::NoSpace: + if ( grow( _l.currentRow + 1 ) ) { + ++_l.currentRow; break; - default: - ASSERT_UNREACHABLE("impossible result from insertCell"); - } + } + SPOT_FALLTHROUGH; + case Resolution::Growing: + helpWithRehashing(); + updateIndex( _l.currentRow ); + break; + default: + UNREACHABLE("impossible result from insertCell"); } - ASSERT_UNREACHABLE("broken loop"); } + UNREACHABLE("broken loop"); + } - template< typename T > - iterator findHinted( T x, hash64_t h ) { - while ( true ) { - Find fr = findCell( x, h, _td.currentRow ); - switch ( fr.r ) { - case Resolution::Found: - return iterator( fr.c ); - case Resolution::NotFound: - return iterator(); - case Resolution::Growing: - helpWithRehashing(); - updateIndex( _td.currentRow ); - break; - default: - ASSERT_UNREACHABLE("impossible result from findCell"); - } + template< typename T > + iterator findHinted( T x, hash64_t h ) { + while ( true ) { + Find fr = findCell( x, h, _l.currentRow ); + switch ( fr.r ) { + case Resolution::Found: + return iterator( fr.c ); + case Resolution::NotFound: + return iterator(); + case Resolution::Growing: + helpWithRehashing(); + updateIndex( _l.currentRow ); + break; + default: + UNREACHABLE("impossible result from findCell"); } - ASSERT_UNREACHABLE("broken loop"); } + UNREACHABLE("broken loop"); + } - template< typename T > - Find findCell( T v, hash64_t h, unsigned rowIndex ) - { + template< typename T > + Find findCell( T v, hash64_t h, unsigned rowIndex ) + { + if ( changed( rowIndex ) ) + return Find( Resolution::Growing ); + + Row &row = current( rowIndex ); + + if ( row.empty() ) + return Find( Resolution::NotFound ); + + const size_t mask = row.size() - 1; + + for ( size_t i = 0; i < Base::maxcollisions; ++i ) { if ( changed( rowIndex ) ) return Find( Resolution::Growing ); - Row &row = current( rowIndex ); - - if ( row.empty() ) + Cell &cell = row[ Base::index( h, i, mask ) ]; + if ( cell.empty() ) return Find( Resolution::NotFound ); + if ( cell.is( v, h, hasher ) ) + return Find( Resolution::Found, &cell ); + if ( cell.invalid() ) + return Find( Resolution::Growing ); + } + return Find( Resolution::NotFound ); + } - const size_t mask = row.size() - 1; - - for ( size_t i = 0; i < Base::maxcollisions; ++i ) { - if ( changed( rowIndex ) ) - return Find( Resolution::Growing ); - - Cell &cell = row[ Base::index( h, i, mask ) ]; - if ( cell.empty() ) - return Find( Resolution::NotFound ); - if ( cell.is( v, h, _d.hasher ) ) - return Find( Resolution::Found, &cell ); - if ( cell.invalid() ) - return Find( Resolution::Growing ); - } - return Find( Resolution::NotFound ); + template< bool force > + Insert insertCell( Row &row, value_type x, hash64_t h ) + { + if ( !force ) { + // read usage first to guarantee usage <= size + size_t u = _s->used.load( std::memory_order_relaxed ); + // usage >= 75% of table size + // usage is never greater than size + if ( row.empty() || double( row.size() ) <= double( 4 * u ) / 3 ) + return Insert( Resolution::NoSpace ); + if ( changed( _l.currentRow ) ) + return Insert( Resolution::Growing ); } - template< bool force > - Insert insertCell( value_type x, hash64_t h ) + ASSERT( !row.empty() ); + const size_t mask = row.size() - 1; + + for ( size_t i = 0; i < Base::maxcollisions; ++i ) { - Row &row = current( _td.currentRow ); - if ( !force ) { - // read usage first to guarantee usage <= size - size_t u = _d.used.load(); - // usage >= 75% of table size - // usage is never greater than size - if ( row.empty() || double( row.size() ) <= double( 4 * u ) / 3 ) - return Insert( Resolution::NoSpace ); - if ( changed( _td.currentRow ) ) + Cell &cell = row[ Base::index( h, i, mask ) ]; + + if ( cell.empty() ) { + if ( cell.tryStore( x, h ) ) + return Insert( Resolution::Success, &cell ); + if ( !force && changed( _l.currentRow ) ) return Insert( Resolution::Growing ); } + if ( cell.is( x, h, hasher ) ) + return Insert( Resolution::Found, &cell ); - ASSERT( !row.empty() ); - const size_t mask = row.size() - 1; + if ( !force && changed( _l.currentRow ) ) + return Insert( Resolution::Growing ); + } + return Insert( Resolution::NoSpace ); + } - for ( size_t i = 0; i < Base::maxcollisions; ++i ) - { - Cell &cell = row[ Base::index( h, i, mask ) ]; + bool grow( unsigned rowIndex ) + { + ASSERT( rowIndex ); - if ( cell.empty() ) { - if ( cell.tryStore( x, h ) ) - return Insert( Resolution::Success, &cell ); - if ( !force && changed( _td.currentRow ) ) - return Insert( Resolution::Growing ); - } - if ( cell.is( x, h, _d.hasher ) ) - return Insert( Resolution::Found, &cell ); + if ( rowIndex >= _s->table.size() ) + UNREACHABLE( "out of growth space" ); - if ( !force && changed( _td.currentRow ) ) - return Insert( Resolution::Growing ); - } - return Insert( Resolution::NoSpace ); + if ( _s->currentRow >= rowIndex ) + return false; + + while ( _s->growing.exchange( true ) ) // acquire growing lock + helpWithRehashing(); + + if ( _s->currentRow >= rowIndex ) { + _s->growing.exchange( false ); // release the lock + return false; } - bool grow( unsigned rowIndex ) - { - ASSERT( rowIndex ); - - if ( rowIndex >= _d.table.size() ) - ASSERT_UNREACHABLE( "out of growth space" ); - - if ( _d.currentRow >= rowIndex ) - return false; - - while ( _d.growing.exchange( true ) ) // acquire growing lock - helpWithRehashing(); - - if ( _d.currentRow >= rowIndex ) { - _d.growing.exchange( false ); // release the lock - return false; - } - - Row &row = current( rowIndex - 1 ); - _d.table[ rowIndex ].resize( nextSize( row.size() ) ); - _d.currentRow.exchange( rowIndex ); - _d.tableWorkers[ rowIndex ] = 1; - _d.doneSegments.exchange( 0 ); - - // current row is fake, so skip the rehashing - if ( row.empty() ) { - rehashingDone(); - return true; - } - - const unsigned segments = std::max( row.size() / segmentSize, size_t( 1 ) ); - _d.availableSegments.exchange( segments ); - - while ( rehashSegment() ); + Row &row = current( rowIndex - 1 ); + _s->table[ rowIndex ].resize( nextSize( row.size() ) ); + _s->currentRow.exchange( rowIndex ); + _s->tableWorkers[ rowIndex ] = 1; + _s->doneSegments.exchange( 0 ); + // current row is fake, so skip the rehashing + if ( row.empty() ) { + rehashingDone(); return true; } - void helpWithRehashing() { - while ( _d.growing ) - while( rehashSegment() ); - } + const unsigned segments = std::max( row.size() / segmentSize, size_t( 1 ) ); + _s->availableSegments.exchange( segments ); - void rehashingDone() { - releaseRow( _d.currentRow - 1 ); - _d.growing.exchange( false ); /* done */ - } + while ( rehashSegment() ); - bool rehashSegment() { - int segment; - if ( _d.availableSegments <= 0 ) - return false; - if ( ( segment = --_d.availableSegments ) < 0 ) - return false; - - Row &row = current( _d.currentRow - 1 ); - size_t segments = std::max( row.size() / segmentSize, size_t( 1 ) ); - auto it = row.begin() + segmentSize * segment; - auto end = it + segmentSize; - if ( end > row.end() ) - end = row.end(); - ASSERT( it < end ); - - ThreadData td; - td.currentRow = _d.currentRow; - - // every cell has to be invalidated - for ( ; it != end; ++it ) { - Cell old = it->invalidate(); - if ( old.empty() || old.invalid() ) - continue; - - value_type value = old.fetch(); - Resolution r = WithTD( _d, td ).insertCell< true >( value, old.hash( _d.hasher ) ).r; - switch( r ) { - case Resolution::Success: - break; - case Resolution::NoSpace: - ASSERT_UNREACHABLE( "ran out of space during growth" ); - default: - ASSERT_UNREACHABLE( "internal error" ); - } - } - - if ( ++_d.doneSegments == segments ) - rehashingDone(); - - return segment > 0; - } - - void updateIndex( unsigned &index ) { - unsigned row = _d.currentRow; - if ( row != index ) { - releaseRow( index ); - acquireRow( row ); - index = row; - } - } - - void releaseRow( unsigned index ) { - // special case - zero index - if ( !_d.tableWorkers[ index ] ) - return; - // only last thread releases memory - if ( !--_d.tableWorkers[ index ] ) - _d.table[ index ].free(); - } - - void acquireRow( unsigned &index ) { - unsigned short refCount = _d.tableWorkers[ index ]; - - do { - if ( !refCount ) { - index = _d.currentRow; - refCount = _d.tableWorkers[ index ]; - continue; - } - - if (_d.tableWorkers[ index ].compare_exchange_weak( refCount, refCount + 1 )) - break; - } while( true ); - } - - void increaseUsage() { - if ( ++_td.inserts == syncPoint ) { - _d.used += syncPoint; - _td.inserts = 0; - } - } - - }; - - WithTD withTD( ThreadData &td ) { return WithTD( _d, td ); } - - explicit _ConcurrentHashSet( Hasher h = Hasher(), unsigned maxGrows = 64 ) - : Base( h ), _d( h, maxGrows ) - { - setSize( 16 ); // by default + return true; } - /* XXX only usable before the first insert; rename? */ - void setSize( size_t s ) { + void helpWithRehashing() { + while ( _s->growing ) + while( rehashSegment() ); + } + + void rehashingDone() { + releaseRow( _s->currentRow - 1 ); + _s->growing.exchange( false ); /* done */ + } + + bool rehashSegment() { + int segment; + if ( _s->availableSegments <= 0 ) + return false; + if ( ( segment = --_s->availableSegments ) < 0 ) + return false; + + Row &row = current( _s->currentRow - 1 ), + &next = current( _s->currentRow ); + size_t segments = std::max( row.size() / segmentSize, size_t( 1 ) ); + auto it = row.begin() + segmentSize * segment; + auto end = it + segmentSize; + if ( end > row.end() ) + end = row.end(); + ASSERT( it < end ); + + // every cell has to be invalidated + for ( ; it != end; ++it ) { + Cell old = it->invalidate(); + if ( old.empty() || old.invalid() ) + continue; + + value_type value = old.fetch(); + Resolution r = insertCell< true >( next, value, old.hash( hasher ) ).r; + switch( r ) { + case Resolution::Success: + break; + case Resolution::NoSpace: + UNREACHABLE( "ran out of space during growth" ); + case Resolution::Found: + UNREACHABLE( "value cloned during growth" ); + default: + UNREACHABLE( "internal error" ); + } + } + + if ( ++_s->doneSegments == segments ) + rehashingDone(); + + return segment > 0; + } + + void updateIndex( unsigned &index ) { + unsigned row = _s->currentRow; + if ( row != index ) { + releaseRow( index ); + acquireRow( row ); + index = row; + } + } + + void releaseRow( unsigned index ) { + // special case - zero index + if ( !_s->tableWorkers[ index ] ) + return; + // only last thread releases memory + if ( !--_s->tableWorkers[ index ] ) + _s->table[ index ].free(); + } + + void acquireRow( unsigned &index ) + { + unsigned short refCount = _s->tableWorkers[ index ]; + + do { + if ( !refCount ) { + index = _s->currentRow; + refCount = _s->tableWorkers[ index ]; + continue; + } + + if (_s->tableWorkers[ index ].compare_exchange_weak( refCount, refCount + 1 )) + break; + } while( true ); + } + + void increaseUsage() { + if ( ++_l.inserts == syncPoint ) { + _s->used.fetch_add( syncPoint, std::memory_order_relaxed ); + _l.inserts = 0; + } + } + + explicit _ConcurrentHashSet( Hasher h = Hasher(), unsigned maxGrows = 64 ) + : Base( h ), _s( new Shared( maxGrows ) ) + { +#ifdef __divine__ + initialSize( 8 ); +#else + initialSize( 4 * syncPoint ); // by default +#endif + } + + void initialSize( size_t s ) + { s = bitlevel::fill( s - 1 ) + 1; size_t toSet = 1; while ( nextSize( toSet ) < s ) toSet <<= 1; - _d.table[ 0 ].size( toSet ); + _s->table[ 0 ].size( toSet ); } hash64_t hash( const value_type &t ) { return hash128( t ).first; } - hash128_t hash128( const value_type &t ) { return _d.hasher.hash( t ); } - iterator insert( const value_type &t ) { return withTD( _global ).insert( t ); } - int count( const value_type &t ) { return withTD( _global ).count( t ); } - size_t size() { return withTD( _global ).size(); } + hash128_t hash128( const value_type &t ) { return hasher.hash( t ); } - _ConcurrentHashSet( const _ConcurrentHashSet & ) = delete; - _ConcurrentHashSet &operator=( const _ConcurrentHashSet & )= delete; + _ConcurrentHashSet( const _ConcurrentHashSet &o ) + : Base( o ), _s( o._s ) + {} - /* multiple threads may use operator[], but not concurrently with insertions */ - value_type operator[]( size_t index ) { // XXX return a reference - return _d.table[ _d.currentRow ][ index ].fetch(); - } - - bool valid( size_t index ) { - return !_d.table[ _d.currentRow ][ index ].empty(); - } + Cell &cellAt( size_t index ) { return _s->table[ _s->currentRow ][ index ]; } + value_type valueAt( size_t idx ) { return cellAt( idx ).fetch(); } + bool valid( size_t idx ) { return !cellAt( idx ).empty(); } }; -template< typename T, typename Hasher = default_hasher< T > > +template< typename T, typename Hasher = DefaultHasher > using FastConcurrent = _ConcurrentHashSet< FastAtomicCell< T, Hasher > >; -template< typename T, typename Hasher = default_hasher< T > > +template< typename T, typename Hasher = DefaultHasher > using CompactConcurrent = _ConcurrentHashSet< AtomicCell< T, Hasher > >; -#ifdef BRICKS_FORCE_FAST_CONCURRENT_SET -template< typename T, typename Hasher = default_hasher< T > > -using Concurrent = FastConcurrent< T, Hasher >; - -#elif BRICKS_FORCE_COMPACT_CONCURRENT_SET -template< typename T, typename Hasher = default_hasher< T > > -using Concurrent = CompactConcurrent< T, Hasher >; - -#else -template< typename T, typename Hasher = default_hasher< T > > +template< typename T, typename Hasher = DefaultHasher > using Concurrent = _ConcurrentHashSet< typename std::conditional< ( sizeof( Tagged< T > ) > 8 // most platforms do not have CAS for data types bigger then 64bit // for example 16B CAS does not link in clang 3.4 on x86_64 || sizeof( std::atomic< Tagged< T > > ) > sizeof( Tagged< T > ) // atomic is not lock-free || sizeof( AtomicCell< T, Hasher > ) >= sizeof( FastAtomicCell< T, Hasher > ) ), FastAtomicCell< T, Hasher >, AtomicCell< T, Hasher > >::type >; + +} + +namespace t_hashset { + +using namespace hashset; + +#ifdef __divine__ +static constexpr int size = 4; +static constexpr int isize = 2; +#else +static constexpr int size = 32 * 1024; +static constexpr int isize = 4 * 1024; #endif -} -} - -/* unit tests */ - -namespace brick_test { -namespace hashset { - -using namespace ::brick::hashset; - template< template< typename > class HS > struct Sequential { @@ -949,7 +926,7 @@ struct Sequential unsigned count = 0; for ( unsigned i = 0; i != set.size(); ++i ) - if ( set[ i ] ) + if ( set.valueAt( i ) ) ++count; ASSERT_EQ( count, 1u ); @@ -957,11 +934,12 @@ struct Sequential TEST(stress) { HS< int > set; - for ( int i = 1; i < 32*1024; ++i ) { + + for ( int i = 1; i < size; ++i ) { set.insert( i ); ASSERT( set.count( i ) ); } - for ( int i = 1; i < 32*1024; ++i ) { + for ( int i = 1; i < size; ++i ) { ASSERT( set.count( i ) ); } } @@ -969,21 +947,21 @@ struct Sequential TEST(set) { HS< int > set; - for ( int i = 1; i < 32*1024; ++i ) { + for ( int i = 1; i < size; ++i ) { ASSERT( !set.count( i ) ); } - for ( int i = 1; i < 32*1024; ++i ) { + for ( int i = 1; i < size; ++i ) { set.insert( i ); ASSERT( set.count( i ) ); ASSERT( !set.count( i + 1 ) ); } - for ( int i = 1; i < 32*1024; ++i ) { + for ( int i = 1; i < size; ++i ) { ASSERT( set.count( i ) ); } - for ( int i = 32*1024; i < 64 * 1024; ++i ) { + for ( int i = size; i < 2 * size; ++i ) { ASSERT( !set.count( i ) ); } } @@ -992,14 +970,13 @@ struct Sequential template< template< typename > class HS > struct Parallel { - struct Insert : shmem::Thread { - HS< int > *_set; - typename HS< int >::ThreadData td; + struct Insert + { + HS< int > set; int from, to; bool overlap; - void main() override { - auto set = _set->withTD( td ); + void main() { for ( int i = from; i < to; ++i ) { set.insert( i ); ASSERT( !set.insert( i ).isnew() ); @@ -1010,100 +987,96 @@ struct Parallel }; TEST(insert) { - HS< int > set; - set.setSize( 4 * 1024 ); Insert a; - a._set = &set; + a.set.initialSize( isize ); a.from = 1; - a.to = 32 * 1024; + a.to = size; a.overlap = false; a.main(); - for ( int i = 1; i < 32*1024; ++i ) - ASSERT( set.count( i ) ); + for ( int i = 1; i < size; ++i ) + ASSERT( a.set.count( i ) ); } - static void _par( HS< int > *set, int f1, int t1, int f2, int t2 ) + static HS< int > _par( int f1, int t1, int f2, int t2 ) { - Insert a, b; + shmem::Thread< Insert > a, b( a ); a.from = f1; a.to = t1; b.from = f2; b.to = t2; - a._set = set; - b._set = set; a.overlap = b.overlap = (t1 > f2); a.start(); b.start(); a.join(); b.join(); + return a.set; } - static void _multi( HS< int > *set, std::size_t count, int from, int to ) + static HS< int > _multi( std::size_t count, int from, int to ) { - Insert *arr = new Insert[ count ]; + shmem::ThreadSet< Insert > arr; + arr.resize( count, Insert() ); + arr[ 0 ].set.initialSize( isize ); - for ( std::size_t i = 0; i < count; ++i ) { + for ( std::size_t i = 0; i < count; ++i ) + { arr[ i ].from = from; arr[ i ].to = to; - arr[ i ]._set = set; arr[ i ].overlap = true; } - for ( std::size_t i = 0; i < count; ++i ) - arr[ i ].start(); + arr.start(); + arr.join(); - for ( std::size_t i = 0; i < count; ++i ) - arr[ i ].join(); - - delete[] arr; + return arr[ 0 ].set; } TEST(multi) { - HS< int > set; - set.setSize( 4 * 1024 ); - _multi( &set, 10, 1, 32 * 1024 ); + auto set = _multi( 10, 1, size ); - for ( int i = 1; i < 32 * 1024; ++i ) + for ( int i = 1; i < isize; ++i ) ASSERT( set.count( i ) ); int count = 0; std::set< int > s; for ( size_t i = 0; i != set.size(); ++i ) { - if ( set[ i ] ) { - if ( s.find( set[ i ] ) == s.end() ) - s.insert( set[ i ] ); + if ( set.valueAt( i ) ) { + if ( s.find( set.valueAt( i ) ) == s.end() ) + s.insert( set.valueAt( i ) ); ++count; } } - ASSERT_EQ( count, 32 * 1024 - 1 ); + ASSERT_EQ( count, size - 1 ); } TEST(stress) { - HS< int > set; + auto s = _par( 1, size / 2, size / 4, size ); - set.setSize( 4 * 1024 ); - _par( &set, 1, 16*1024, 8*1024, 32*1024 ); - - for ( int i = 1; i < 32*1024; ++i ) - ASSERT( set.count( i ) ); + for ( int i = 1; i < size; ++i ) + ASSERT( s.count( i ) ); } - TEST(set) { + TEST(empty) + { HS< int > set; - set.setSize( 4 * 1024 ); - for ( int i = 1; i < 32*1024; ++i ) + set.initialSize( isize ); + + for ( int i = 1; i < size; ++i ) ASSERT( !set.count( i ) ); + } - _par( &set, 1, 16*1024, 16*1024, 32*1024 ); + TEST(set) + { + auto set = _par( 1, size / 2, size / 2, size ); - for ( int i = 1; i < 32*1024; ++i ) + for ( int i = 1; i < size; ++i ) ASSERT_EQ( i, i * set.count( i ) ); - for ( int i = 32*1024; i < 64 * 1024; ++i ) + for ( int i = size; i < size * 2; ++i ) ASSERT( !set.count( i ) ); } }; @@ -1114,10 +1087,26 @@ struct test_hasher { test_hasher( X& ) { } test_hasher() = default; hash128_t hash( int t ) const { return std::make_pair( t, t ); } - bool valid( int t ) const { return t != 0; } + + template< typename X > + auto hash( const X &t ) const -> decltype( t.hash() ) { return t.hash(); } + template< typename X > + auto equal( const X &s, const X &t ) const -> decltype( s == t ) { return s == t; } + + template< typename X > + uint64_t operator()( const X &x ) const { return hash( x ).first; } + bool equal( int a, int b ) const { return a == b; } }; +template< typename T > +struct ptr_hasher +{ + hash128_t hash( const T *x ) const { return x->hash(); } + bool equal( const T *x, const T *y ) const { return *x == *y; } + uint64_t operator()( const T *x ) const { return hash( x ).first; } +}; + template< typename T > using CS = Compact< T, test_hasher< T > >; template< typename T > using FS = Fast< T, test_hasher< T > >; template< typename T > using ConCS = CompactConcurrent< T, test_hasher< T > >; @@ -1145,8 +1134,8 @@ template struct Parallel< ConFS >; #include #endif -namespace brick_test { -namespace hashset { +namespace brick { +namespace b_hashset { template< typename HS > struct RandomThread : shmem::Thread { @@ -1257,7 +1246,7 @@ struct Run : BenchmarkGroup template< template< typename > class, typename Self, int, typename, typename... Args > static void run( Self *, hlist::not_preferred, Args... ) { - ASSERT_UNREACHABLE( "brick_test::hashset::Run fell off the cliff" ); + UNREACHABLE( "brick::b_hashset::Run fell off the cliff" ); } template< template< typename > class RI, typename Self, int id, @@ -1579,6 +1568,4 @@ FOR_PAR(IvR_PAR) #endif // benchmarks -#endif - -// vim: syntax=cpp tabstop=4 shiftwidth=4 expandtab +// vim: syntax=cpp tabstop=4 shiftwidth=4 expandtab ft=cpp diff --git a/bricks/brick-shmem.h b/bricks/brick-shmem similarity index 78% rename from bricks/brick-shmem.h rename to bricks/brick-shmem index 9dc2a62d6..2019e26aa 100644 --- a/bricks/brick-shmem.h +++ b/bricks/brick-shmem @@ -37,18 +37,18 @@ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ -#include +#include "brick-assert" #include #include #include -#if __cplusplus >= 201103L #include #include #include -#include #include -#endif + +#include // alarm +#include #ifndef BRICK_SHMEM_H #define BRICK_SHMEM_H @@ -60,108 +60,168 @@ namespace brick { namespace shmem { -#if __cplusplus >= 201103L - -struct Thread { +template< typename T > +struct Thread : T +{ std::unique_ptr< std::thread > _thread; - std::atomic< bool > _interrupted; - virtual void main() = 0; - virtual void exception( std::exception_ptr ep ) { - try { - std::rethrow_exception( ep ); - } catch ( std::exception &ex ) { - std::cerr << "Uncaught exception" - << " of type " << typeid( ex ).name() - << ":" << std::endl; - std::cerr << ex.what() << std::endl; - std::terminate(); - } - } + bool _start_on_move; // :-( - Thread() : _interrupted( false ) {} - Thread( const Thread &other ) : _interrupted( false ) { - if( other._thread ) - throw std::logic_error( "cannot copy running thread" ); - } - Thread( Thread &&other ) : - _thread( std::move( other._thread ) ), - _interrupted( other.interrupted() ) - {} + template< typename... Args > + Thread( Args&&... args ) : T( std::forward< Args >( args )... ), _start_on_move( false ) {} + virtual ~Thread() { stop(); } - ~Thread() { stop(); } - - Thread &operator=( const Thread &other ) { - if ( _thread ) - throw std::logic_error( "cannot overwrite running thread" ); + Thread( const Thread &other ) : T( other ) + { if ( other._thread ) throw std::logic_error( "cannot copy running thread" ); - _interrupted.store( other.interrupted(), std::memory_order_relaxed ); - return *this; } - Thread &operator=( Thread &&other ) { - if ( _thread ) - throw std::logic_error( "cannot overwrite running thread" ); - _thread.swap( other._thread ); - _interrupted.store( other.interrupted(), std::memory_order_relaxed ); - return *this; + Thread( Thread &&other ) + : T( other._thread ? throw std::logic_error( "cannot move a running thread" ) : other ), + _thread( std::move( other._thread ) ), + _start_on_move( false ) + { + if ( other._start_on_move ) + start(); } -#ifdef __divine__ - void start() __attribute__((noinline)) { - __divine_interrupt_mask(); -#else - void start() { -#endif - _interrupted.store( false, std::memory_order_relaxed ); - _thread.reset( new std::thread( [this]() { - try { - this->main(); - } catch (...) { - this->exception( std::current_exception() ); - } - } ) ); + virtual void start() + { + _thread.reset( new std::thread( [this]() { this->main(); } ) ); } - // stop must be idempotent - void stop() { - interrupt(); + virtual void stop() + { if ( _thread && _thread->joinable() ) join(); } - void join() { - if ( _thread ) { + void join() + { + if ( _thread ) + { _thread->join(); _thread.reset(); } } - void detach() { - if ( _thread ) { + void detach() + { + if ( _thread ) + { _thread->detach(); _thread.reset(); } } - bool interrupted() const { + const Thread& operator=(const Thread& other) + { + std::cerr << "FIXME Added by us (Spot) to avoid compilation warnings\n"; + std::cerr << " Should not pass here.\n"; + return other; + } +}; + +template< typename T > +struct LoopWrapper : T +{ + std::atomic< bool > _interrupted; + + template< typename... Args > + LoopWrapper( Args&&... args ) : T( std::forward< Args >( args )... ), _interrupted( false ) {} + + LoopWrapper( const LoopWrapper & ) = default; + LoopWrapper( LoopWrapper && ) = default; + + void main() + { + while ( !_interrupted ) this->loop(); + } + + bool interrupted() const + { return _interrupted.load( std::memory_order_relaxed ); } - void interrupt() { + void interrupt() + { _interrupted.store( true, std::memory_order_relaxed ); } }; +template< typename L > +struct LambdaWrapper +{ + L lambda; + LambdaWrapper( L l ) : lambda( l ) {} + void loop() { lambda(); } + void main() { lambda(); } +}; + +template< typename T > +struct AsyncLoop : Thread< LoopWrapper< T > > +{ + using Super = Thread< LoopWrapper< T > >; + + template< typename... Args > + AsyncLoop( Args&&... args ) : Super( std::forward< Args >( args )... ) {} + + AsyncLoop( const AsyncLoop & ) = default; + AsyncLoop( AsyncLoop && ) = default; + + virtual ~AsyncLoop() + { + stop(); /* call the correct stop(), with interrupt() */ + } + + void start() override + { + this->_interrupted.store( false, std::memory_order_relaxed ); + Super::start(); + } + + void stop() override + { + this->interrupt(); + Super::stop(); + } +}; + +template< typename L > +auto async_loop( L &&l ) +{ + AsyncLoop< LambdaWrapper< L > > al( std::forward< L >( l ) ); + al._start_on_move = true; + return std::move( al ); +} + +template< typename L > +auto thread( L &&l ) +{ + Thread< LambdaWrapper< L > > thr( std::forward< L >( l ) ); + thr._start_on_move = true; + return thr; +} + +template< typename T > +struct ThreadSet : std::vector< Thread< T > > +{ + template< typename... Args > + ThreadSet( Args&&... args ) : std::vector< Thread< T > >( std::forward< Args >( args )... ) {} + + void start() { for ( auto &t : *this ) t.start(); } + void join() { for ( auto &t : *this ) t.join(); } +}; + /** * A spinlock implementation. * * One has to wonder why this is missing from the C++0x stdlib. */ struct SpinLock { - std::atomic_flag b = ATOMIC_FLAG_INIT;; + std::atomic_flag b; - SpinLock() {} + SpinLock() : b( 0 ) {} void lock() { while( b.test_and_set() ); @@ -198,53 +258,49 @@ struct ApproximateCounter { Shared( const Shared& ) = delete; }; - Shared &shared; - intptr_t local; + std::shared_ptr< Shared > _s; + intptr_t _l; - ApproximateCounter( Shared &s ) : shared( s ), local( 0 ) {} + ApproximateCounter() : _s( new Shared ), _l( 0 ) {} + ApproximateCounter( const ApproximateCounter &other ) : _s( other._s ), _l( 0 ) {} + ApproximateCounter operator=( const ApproximateCounter & ) = delete; ~ApproximateCounter() { sync(); } void sync() { - intptr_t value = shared.counter; + intptr_t value = _s->counter; - while ( local > 0 ) { - if ( value >= local ) { - if ( shared.counter.compare_exchange_weak( value, value - local ) ) - local = 0; + while ( _l > 0 ) { + if ( value >= _l ) { + if ( _s->counter.compare_exchange_weak( value, value - _l ) ) + _l = 0; } else { - if ( shared.counter.compare_exchange_weak( value, 0 ) ) - local = 0; + if ( _s->counter.compare_exchange_weak( value, 0 ) ) + _l = 0; } } } ApproximateCounter& operator++() { - if ( local == 0 ) { - shared.counter += step; - local = step; + if ( _l == 0 ) { + _s->counter += step; + _l = step; } - --local; + -- _l; return *this; } ApproximateCounter &operator--() { - ++local; + ++ _l; return *this; } - // NB. sync() must be called manually as this method is called too often - bool isZero() { - return shared.counter == 0; - } + // NB. may return false spuriously; call sync() to ensure a correct result + operator bool() { return _s->counter; } + bool operator!() { return _s->counter == 0; } - void reset() { shared.counter = 0; } - - ApproximateCounter( const ApproximateCounter &a ) - : shared( a.shared ), local( a.local ) - {} - ApproximateCounter operator=( const ApproximateCounter & ) = delete; + void reset() { _s->counter = 0; } /* fixme misleading? */ }; struct StartDetector { @@ -264,22 +320,21 @@ struct StartDetector { Shared( Shared & ) = delete; }; - Shared &shared; + std::shared_ptr< Shared > _s; - StartDetector( Shared &s ) : shared( s ) {} - StartDetector( const StartDetector &s ) : shared( s.shared ) {} + StartDetector() : _s( new Shared() ) {} - void waitForAll( unsigned short peers ) { + void waitForAll( unsigned short peers ) + { + while ( _s->leaveGuard ); - while ( shared.leaveGuard ); - - if ( ++shared.counter == peers ) { - shared.leaveGuard = peers; - shared.counter = 0; + if ( ++ _s->counter == peers ) { + _s->leaveGuard = peers; + _s->counter = 0; } - while ( shared.counter ); - --shared.leaveGuard; + while ( _s->counter ); + -- _s->leaveGuard; } }; @@ -333,8 +388,6 @@ struct WeakAtomic : std::conditional< std::is_integral< T >::value && !std::is_s friend struct _impl::WeakAtomicIntegral< WeakAtomic< T >, T >; }; -#endif - #ifndef __divine__ template< typename T > constexpr int defaultNodeSize() { @@ -530,28 +583,106 @@ struct LockedQueue { LockedQueue &operator=( const LockedQueue & ) = delete; }; -} +template< template< typename > class Q, typename T > +struct Chunked +{ + using Chunk = std::deque< T >; + using ChQ = Q< Chunk >; + std::shared_ptr< ChQ > q; + unsigned chunkSize; + + Chunk outgoing; + Chunk incoming; + + void push( T t ) { + outgoing.push_back( t ); + if ( outgoing.size() >= chunkSize ) + flush(); + } + + T pop() { + if ( incoming.empty() ) + incoming = q->pop(); + if ( incoming.empty() ) + UNREACHABLE( "attempted to pop an empty queue" ); + auto x = incoming.front(); + incoming.pop_front(); + return x; + } + + void flush() { + if ( !outgoing.empty() ) { + Chunk tmp; + std::swap( outgoing, tmp ); + q->push( std::move( tmp ) ); + + /* A quickstart trick -- make first few chunks smaller. */ + if ( chunkSize < 64 ) + chunkSize = std::min( 2 * chunkSize, 64u ); + } + } + + bool empty() { + if ( incoming.empty() ) /* try to get a fresh one */ + incoming = q->pop(); + return incoming.empty(); + } + + Chunked() : q( new ChQ() ), chunkSize( 2 ) {} +}; + +template< typename T > +using SharedQueue = Chunked< LockedQueue, T >; + } -#if __cplusplus >= 201103L - -#include // alarm -#include - -namespace brick_test { -namespace shmem { +namespace t_shmem { using namespace ::brick::shmem; +#ifdef __divine__ +static constexpr int size = 16; +static void timeout() {} +#else +static constexpr int size = 128 * 1024; +#endif + +#if defined( __unix ) || defined( POSIX ) +static void timeout() { alarm( 5 ); } +#else +static void timeout() { } +#endif + +struct ThreadTest +{ + TEST(async_loop) + { + timeout(); + std::atomic< int > x( 0 ); + auto t = shmem::async_loop( [&]() { x = 1; } ); + while ( !x ); + t.stop(); + } + + TEST(thread) + { + timeout(); + std::atomic< int > x( 0 ); + auto t = shmem::thread( [&]() { x = 1; } ); + while ( !x ); + t.join(); + } +}; + struct FifoTest { template< typename T > - struct Checker : Thread + struct Checker { Fifo< T > fifo; int terminate; int n; - void main() override + void main() { std::vector< int > x; x.resize( n ); @@ -572,17 +703,17 @@ struct FifoTest { } terminate = 0; for ( int i = 0; i < n; ++i ) - ASSERT_EQ( x[ i ], 128*1024 ); + ASSERT_EQ( x[ i ], size ); } Checker( int _n = 1 ) : terminate( 0 ), n( _n ) {} }; TEST(stress) { - Checker< int > c; + Thread< Checker< int > > c; for ( int j = 0; j < 5; ++j ) { c.start(); - for( int i = 0; i < 128 * 1024; ++i ) + for( int i = 0; i < size; ++i ) c.fifo.push( i ); c.terminate = true; c.join(); @@ -590,39 +721,38 @@ struct FifoTest { } }; +namespace { const int peers = 12; } + struct Utils { - static const int peers = 12; - - struct DetectorWorker : Thread { + struct DetectorWorker + { StartDetector detector; int rep; - DetectorWorker( StartDetector::Shared &sh, int repeat ) : + DetectorWorker( StartDetector sh, int repeat ) : detector( sh ), rep( repeat ) {} - void main() override { + void main() { for ( int i = 0; i < rep; ++i ) detector.waitForAll( peers ); } }; - void processDetector( int repeat ) { - StartDetector::Shared sh; - std::vector< DetectorWorker > threads{ peers, DetectorWorker{ sh, repeat } }; + void processDetector( int repeat ) + { + StartDetector sh; + ThreadSet< DetectorWorker > threads( peers, DetectorWorker{ sh, repeat } ); -#if (defined( __unix ) || defined( POSIX )) && !defined( __divine__ ) // hm - alarm( 5 ); -#endif + timeout(); - for ( int i = 0; i != 4; ++i ) { - for ( auto &w : threads ) - w.start(); - for ( auto &w : threads ) - w.join(); - ASSERT_EQ( sh.counter.load(), 0 ); + for ( int i = 0; i != 4; ++i ) + { + threads.start(); + threads.join(); + ASSERT_EQ( sh._s->counter.load(), 0 ); } } @@ -634,21 +764,21 @@ struct Utils { processDetector( 4 ); } - struct CounterWorker : Thread { + struct CounterWorker + { StartDetector detector; ApproximateCounter counter; int produce; int consume; - template< typename D, typename C > - CounterWorker( D &d, C &c ) : - detector( d ), - counter( c ), + CounterWorker( StartDetector det, ApproximateCounter ctr ) : + detector( det ), + counter( ctr ), produce( 0 ), consume( 0 ) {} - void main() override { + void main() { detector.waitForAll( peers ); while ( produce-- ) @@ -664,15 +794,14 @@ struct Utils { } }; - void processCounter() { - StartDetector::Shared detectorShared; - ApproximateCounter::Shared counterShared; - std::vector< CounterWorker > threads{ peers, - CounterWorker{ detectorShared, counterShared } }; + TEST(approximateCounter) + { + StartDetector det; + ApproximateCounter ctr; -#if (defined( __unix ) || defined( POSIX )) && !defined( __divine__ ) // hm - alarm( 5 ); -#endif + ThreadSet< CounterWorker > threads( peers, CounterWorker{ det, ctr } ); + + timeout(); // set consume and produce limits to each worker int i = 1; @@ -685,20 +814,15 @@ struct Utils { ++i; } - for ( auto &w : threads ) - w.start(); - - for ( auto &w : threads ) - w.join(); - ASSERT_EQ( counterShared.counter.load(), 0 ); + threads.start(); + threads.join(); + ASSERT_EQ( ctr._s->counter.load(), 0 ); } - TEST(approximateCounter) { - processCounter(); - }; }; } + } #ifdef BRICK_BENCHMARK_REG @@ -708,7 +832,7 @@ struct Utils { #endif #include -#include +#include namespace brick_test { namespace shmem { @@ -841,7 +965,7 @@ struct Linked { Linked() { reader = writer = new Node(); - reader->next = 0; + reader->next = nullptr; } }; @@ -882,59 +1006,6 @@ struct Shared { Shared() : q( new Q() ) {} }; -template< template< typename > class Q, typename T > -struct Chunked { - using Chunk = std::deque< T >; - using ChQ = Q< Chunk >; - std::shared_ptr< ChQ > q; - unsigned chunkSize; - - Chunk outgoing; - Chunk incoming; - - void push( T t ) { - outgoing.push_back( t ); - // std::cerr << "pushed " << outgoing.back() << std::endl; - if ( outgoing.size() >= chunkSize ) - flush(); - } - - T pop() { - // std::cerr << "pop: empty = " << incoming.empty() << std::endl; - if ( incoming.empty() ) - incoming = q->pop(); - if ( incoming.empty() ) - return T(); - // std::cerr << "pop: found " << incoming.front() << std::endl; - auto x = incoming.front(); - incoming.pop_front(); - return x; - } - - void flush() { - if ( !outgoing.empty() ) { - // std::cerr << "flushing " << outgoing.size() << " items" << std::endl; - Chunk tmp; - std::swap( outgoing, tmp ); - q->push( std::move( tmp ) ); - - /* A quickstart trick -- make first few chunks smaller. */ - if ( chunkSize < 64 ) - chunkSize = std::min( 2 * chunkSize, 64u ); - } - } - - bool empty() { - if ( incoming.empty() ) { /* try to get a fresh one */ - incoming = q->pop(); - // std::cerr << "pulled in " << incoming.size() << " items" << std::endl; - } - return incoming.empty(); - } - - Chunked() : q( new ChQ() ), chunkSize( 2 ) {} -}; - template< typename Q > struct InsertThread : Thread { Q *q; @@ -952,7 +1023,7 @@ struct InsertThread : Thread { }; template< typename Q > -struct WorkThread : Thread { +struct WorkThread { Q q; std::atomic< bool > *stop; int items; @@ -1027,7 +1098,7 @@ struct ShQueue : BenchmarkGroup template< typename Q > void scale() { Q fifo; - auto *t = new WorkThread< Q >[ p ]; + auto *t = new Thread< WorkThread< Q > >[ p ]; std::atomic< bool > stop( false ); for ( int i = 0; i < p; ++i ) { @@ -1135,7 +1206,6 @@ struct FIFO : BenchmarkGroup } } -#endif #endif #endif diff --git a/bricks/brick-types.h b/bricks/brick-types similarity index 73% rename from bricks/brick-types.h rename to bricks/brick-types index 39dd03139..4e7f7b9e4 100644 --- a/bricks/brick-types.h +++ b/bricks/brick-types @@ -10,32 +10,22 @@ /* * (c) 2006, 2014 Petr Ročkai - * (c) 2013-2014 Vladimír Štill + * (c) 2013-2015 Vladimír Štill + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/* Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ - -#include +#include "brick-assert" #include #include @@ -51,7 +41,7 @@ #define CONSTEXPR #endif -#if __cplusplus > 201103L +#if __cplusplus > 201103L && __GNUC__ != 4 && __GNUC_MINOR__ != 9 #define CPP1Y_CONSTEXPR constexpr // C++1y #else #define CPP1Y_CONSTEXPR // C++11 @@ -68,34 +58,30 @@ struct Unit { struct Preferred { CONSTEXPR Preferred() { } }; struct NotPreferred { CONSTEXPR NotPreferred( Preferred ) {} }; -struct Comparable { - typedef bool IsComparable; -}; +template< typename _T > +struct Witness { using T = _T; }; + +struct Eq { typedef bool IsEq; }; template< typename T > -typename T::IsComparable operator!=( const T &a, const T &b ) { - return not( a == b ); -} +typename T::IsEq operator!=( const T &a, const T &b ) { return !(a == b); } + +struct Ord : Eq { typedef bool IsOrd; }; template< typename T > -typename T::IsComparable operator==( const T &a, const T &b ) { +typename T::IsOrd operator<( const T &a, const T &b ) { return !(b <= a); } + +template< typename T > +typename T::IsOrd operator>( const T &a, const T &b ) { return !(a <= b); } + +template< typename T > +typename T::IsOrd operator>=( const T &a, const T &b ) { return b <= a; } + +template< typename T > +typename T::IsOrd operator==( const T &a, const T &b ) { return a <= b && b <= a; } - -template< typename T > -typename T::IsComparable operator<( const T &a, const T &b ) { - return a <= b && a != b; -} - -template< typename T > -typename T::IsComparable operator>( const T &a, const T &b ) { - return b <= a && a != b; -} - -template< typename T > -typename T::IsComparable operator>=( const T &a, const T &b ) { - return b <= a; -} +using Comparable = Ord; struct Defer { template< typename F > @@ -234,7 +220,7 @@ struct Maybe : Comparable T fromMaybe( T x ) const { return isJust() ? value() : x; } - explicit operator bool() const { return isJust(); } + explicit operator bool() const { return isJust() && bool( value() ); } static Maybe Just( const T &t ) { return Maybe( t ); } static Maybe Nothing() { return Maybe(); } @@ -365,21 +351,6 @@ struct StrongEnumFlags { UnderlyingType store; }; -// don't catch integral types and classical enum! -template< typename Self, typename = typename - std::enable_if< is_enum_class< Self >::value >::type > -constexpr StrongEnumFlags< Self > operator|( Self a, Self b ) noexcept { - using Ret = StrongEnumFlags< Self >; - return Ret( a ) | Ret( b ); -} - -template< typename Self, typename = typename - std::enable_if< is_enum_class< Self >::value >::type > -constexpr StrongEnumFlags< Self > operator&( Self a, Self b ) noexcept { - using Ret = StrongEnumFlags< Self >; - return Ret( a ) & Ret( b ); -} - /* implementation of Union */ namespace _impl { @@ -411,9 +382,6 @@ namespace _impl { std::is_same< Needle, T >::value || In< Needle, Ts... >::value > { }; - template< typename _T > - struct Witness { using T = _T; }; - template< typename, typename... > struct _OneConversion { }; @@ -454,10 +422,10 @@ template< typename F, typename T, typename Fallback, typename Check = bool > struct _ApplyResult : Fallback {}; template< typename F, typename T, typename Fallback > -struct _ApplyResult< F, T, Fallback, decltype( std::declval< F >()( std::declval< T >() ), true ) > +struct _ApplyResult< F, T, Fallback, decltype( std::declval< F >()( std::declval< T& >() ), true ) > { using Parameter = T; - using Result = decltype( std::declval< F >()( std::declval< T >() ) ); + using Result = decltype( std::declval< F >()( std::declval< T& >() ) ); }; template< typename F, typename... Ts > struct ApplyResult; @@ -524,6 +492,11 @@ struct Union : Comparable { return *this; } + ~Union() { + if ( _discriminator ) + _destruct< 1, Types... >( _discriminator ); + } + template< typename T > auto operator=( const T &other ) -> typename std::enable_if< std::is_lvalue_reference< T & >::value, Union & >::type @@ -546,26 +519,26 @@ struct Union : Comparable { return *this; } - void swap( Union other ) { - typename std::aligned_storage< size, algignment >::type tmpStor; - unsigned char tmpDis; + void swap( Union &other ) { + if ( _discriminator == 0 && other._discriminator == 0 ) + return; - std::memcpy( &tmpStor, &other.storage, size ); - tmpDis = other._discriminator; - other._discriminator = 0; - std::memcpy( &other.storage, &storage, size ); - other._discriminator = _discriminator; - _discriminator = 0; - std::memcpy( &storage, &tmpStor, size ); - _discriminator = tmpDis; + if ( _discriminator == other._discriminator ) + _swapSame< 1, Types... >( other ); + else + _swapDifferent< 0, void, Types... >( other ); } - bool empty() { + bool empty() const { return _discriminator == 0; } - explicit operator bool() { - return !empty(); + explicit operator bool() const + { + auto rv = const_cast< Union* >( this )->apply( []( const auto & x ) -> bool { return !!x; } ); + if ( rv.isNothing() ) + return false; + return true; } template< typename T > @@ -595,6 +568,12 @@ struct Union : Comparable { return unsafeGet< T >(); } + template< typename T > + T *asptr() { return is< T >() ? &get< T >() : nullptr; } + + template< typename T > + const T *asptr() const { return is< T >() ? &get< T >() : nullptr; } + template< typename T > const T &getOr( const T &val ) const { if ( is< T >() ) @@ -642,7 +621,7 @@ struct Union : Comparable { // invoke the first function that can handle the currently stored value // (type-based pattern matching) template< typename R, typename F, typename... Args > - R _match( F f, Args... args ) { + R _match( F f, Args&&... args ) { auto x = apply( f ); if ( x.isNothing() ) return _match< R >( args... ); @@ -650,25 +629,24 @@ struct Union : Comparable { return x; } + // invoke the first function that can handle the currently stored value + // (type-based pattern matching) + // * return value can be extracted from resuling Maybe value + // * auto lambdas are supported an can be called on any value! template< typename F, typename... Args > - Applied< F > match( F f, Args... args ) { + Applied< F > match( F f, Args&&... args ) { return _match< Applied< F > >( f, args... ); } bool operator==( const Union &other ) const { return _discriminator == other._discriminator - && _compare< std::equal_to >( other ); - } - - bool operator!=( const Union &other ) const { - return _discriminator != other._discriminator - || _compare< std::not_equal_to >( other ); + && (_discriminator == 0 || _compare< std::equal_to >( other )); } bool operator<( const Union &other ) const { return _discriminator < other._discriminator || (_discriminator == other._discriminator - && _compare< std::less >( other ) ); + && (_discriminator == 0 || _compare< std::less >( other )) ); } unsigned char discriminator() const { return _discriminator; } @@ -682,11 +660,10 @@ struct Union : Comparable { private: static constexpr size_t size = _impl::MaxSizeof< 1, Types... >::value; - static constexpr size_t algignment = _impl::MaxAlign< 1, Types... >::value; - typename std::aligned_storage< size, algignment >::type storage; + static constexpr size_t alignment = _impl::MaxAlign< 1, Types... >::value; + typename std::aligned_storage< size, alignment >::type storage; unsigned char _discriminator; - template< unsigned char i, typename Needle, typename T, typename... Ts > constexpr unsigned char _discriminatorF() const { return std::is_same< Needle, T >::value @@ -706,7 +683,7 @@ struct Union : Comparable { template< unsigned char > unsigned char _copyConstruct( unsigned char, const Union & ) - { ASSERT_UNREACHABLE( "invalid _copyConstruct" ); } + { UNREACHABLE( "invalid _copyConstruct" ); return 0; } template< unsigned char i, typename T, typename... Ts > void _moveConstruct( unsigned char d, Union &&other ) { @@ -718,7 +695,7 @@ struct Union : Comparable { template< unsigned char > unsigned char _moveConstruct( unsigned char, Union && ) - { ASSERT_UNREACHABLE( "invalid _moveConstruct" ); } + { UNREACHABLE( "invalid _moveConstruct" ); return 0; } void _copyAssignDifferent( const Union &other ) { auto tmp = _discriminator; @@ -746,7 +723,7 @@ struct Union : Comparable { } template< unsigned char > - void _copyAssignSame( const Union & ) { ASSERT_UNREACHABLE( "invalid _copyAssignSame" ); } + void _copyAssignSame( const Union & ) { UNREACHABLE( "invalid _copyAssignSame" ); } template< unsigned char i, typename T, typename... Ts > void _destruct( unsigned char d ) { @@ -757,7 +734,7 @@ struct Union : Comparable { } template< unsigned char > - void _destruct( unsigned char ) { ASSERT_UNREACHABLE( "invalid _destruct" ); } + void _destruct( unsigned char ) { UNREACHABLE( "invalid _destruct" ); } void _moveAssignSame( Union &&other ) { ASSERT_EQ( _discriminator, other._discriminator ); @@ -775,7 +752,7 @@ struct Union : Comparable { } template< unsigned char > - void _moveAssignSame( Union && ) { ASSERT_UNREACHABLE( "invalid _moveAssignSame" ); } + void _moveAssignSame( Union && ) { UNREACHABLE( "invalid _moveAssignSame" ); } void _moveAssignDifferent( Union &&other ) { auto tmp = _discriminator; @@ -821,7 +798,7 @@ struct Union : Comparable { } template< template< typename > class Compare, int d > - bool _compare2( const Union & ) const { ASSERT_UNREACHABLE( "invalid discriminator" ); } + bool _compare2( const Union & ) const { UNREACHABLE( "invalid discriminator" ); return false;} template< template< typename > class Compare, int d, typename T, typename... Ts > bool _compare2( const Union &other ) const { @@ -838,7 +815,7 @@ struct Union : Comparable { template< typename Target, bool anyCastPossible, int > Target _convert2( Preferred ) const { static_assert( anyCastPossible, "Cast of Union can never succeed" ); - ASSERT_UNREACHABLE( "wrong _convert2 in Union" ); + UNREACHABLE( "wrong _convert2 in Union" ); } template< typename Target, bool any, int d, typename, typename... Ts > @@ -859,8 +836,155 @@ struct Union : Comparable { return _convert2< Target, false, 1, Types... >( Preferred() ); } + template< unsigned char i, typename T, typename... Ts > + void _swapSame( Union &other ) { + if ( _discriminator == i ) + _doSwap< T >( unsafeGet< T >(), other.unsafeGet< T >(), Preferred() ); + else + _swapSame< i + 1, Ts... >( other ); + } + + template< unsigned char i > + void _swapSame( Union & ) { UNREACHABLE( "Invalid _swapSame" ); } + + template< typename T > + auto _doSwap( T &a, T &b, Preferred ) -> decltype( a.swap( b ) ) { + a.swap( b ); + } + + template< typename T > + auto _doSwap( T &a, T &b, NotPreferred ) -> decltype( std::swap( a, b ) ) { + std::swap( a, b ); + } + + template< unsigned char i, typename T, typename... Ts > + void _swapDifferent( Union &other ) { + if ( i == _discriminator ) + _swapDifferent2< i, T, 0, void, Types... >( other ); + else + _swapDifferent< i + 1, Ts... >( other ); + } + + template< unsigned char i > + void _swapDifferent( Union & ) { UNREACHABLE( "Invalid _swapDifferent" ); } + + template< unsigned char local, typename Local, unsigned char i, typename T, typename... Ts > + void _swapDifferent2( Union &other ) { + if ( i == other._discriminator ) + _doSwapDifferent< local, i, Local, T >( other ); + else + _swapDifferent2< local, Local, i + 1, Ts... >( other ); + } + + template< unsigned char local, typename Local, unsigned char i > + void _swapDifferent2( Union & ) { UNREACHABLE( "Invalid _swapDifferent2" ); } + + template< unsigned char l, unsigned char r, typename L, typename R > + auto _doSwapDifferent( Union &other ) -> typename std::enable_if< l != 0 && r != 0 >::type { + L lval( unsafeMoveOut< L >() ); + unsafeGet< L >().~L(); + + new ( &unsafeGet< R >() ) R( other.unsafeMoveOut< R >() ); + other.unsafeGet< R >().~R(); + + new ( &other.unsafeGet< L >() ) L( std::move( lval ) ); + std::swap( _discriminator, other._discriminator ); + } + + template< unsigned char l, unsigned char r, typename L, typename R > + auto _doSwapDifferent( Union &other ) -> typename std::enable_if< l == 0 && r != 0 >::type { + new ( &unsafeGet< R >() ) R( other.unsafeMoveOut< R >() ); + other.unsafeGet< R >().~R(); + std::swap( _discriminator, other._discriminator ); + } + + template< unsigned char l, unsigned char r, typename L, typename R > + auto _doSwapDifferent( Union &other ) -> typename std::enable_if< l != 0 && r == 0 >::type { + new ( &other.unsafeGet< L >() ) L( unsafeMoveOut< L >() ); + unsafeGet< L >().~L(); + std::swap( _discriminator, other._discriminator ); + } + + template< unsigned char l, unsigned char r, typename L, typename R > + auto _doSwapDifferent( Union & ) -> typename std::enable_if< l == 0 && r == 0 >::type { + UNREACHABLE( "Invalid _doSwapDifferent" ); + } }; +template< typename Left, typename Right > +struct Either : Union< Left, Right > { + + using Union< Left, Right >::Union; + + bool isLeft() const { return this->template is< Left >(); } + bool isRight() const { return this->template is< Right >(); } + + Left &left() { return this->template get< Left >(); } + Right &right() { return this->template get< Right >(); } + + const Left &left() const { return this->template get< Left >(); } + const Right &right() const { return this->template get< Right >(); } +}; + +// a pointer-like structure which can, however store values a value or a +// referrence to type T +template< typename T > +struct RefOrVal { + static_assert( !std::is_reference< T >::value, "T must not be a reference type" ); + + RefOrVal() : _store( InPlace< T >() ) { } + RefOrVal( T &&val ) : _store( std::forward< T >( val ) ) { } + RefOrVal( T *ref ) : _store( ref ) { } + RefOrVal( T &ref ) : _store( &ref ) { } + + RefOrVal &operator=( const RefOrVal & ) = default; + RefOrVal &operator=( RefOrVal && ) = default; + RefOrVal &operator=( T &v ) { _store = v; return *this; } + RefOrVal &operator=( T &&v ) { _store = std::move( v ); return *this; } + RefOrVal &operator=( T *ptr ) { _store = ptr; return *this; } + + T *ptr() { + ASSERT( !_store.empty() ); + auto *val = _store.template asptr< T >(); + return val ? val : _store.template get< T * >(); + } + const T *ptr() const { + ASSERT( !_store.empty() ); + const auto *val = _store.template asptr< T >(); + return val ? val : _store.template get< T * >(); + } + + T *operator->() { return ptr(); } + T &operator*() { return *ptr(); } + const T *operator->() const { return ptr(); } + const T &operator*() const { return *ptr(); } + + private: + Union< T, T * > _store; +}; + +template< typename Fn, typename R = typename std::result_of< Fn() >::type > +struct Lazy { + + Lazy( Fn &&fn ) : _fn( std::forward< Fn >( fn ) ), _val() { } + + R &get() { + if ( _val.empty() ) + _val = _fn(); + return _val.template get< R >(); + } + + R &operator*() { return get(); } + R *operator->() { return &get(); } + + private: + Fn _fn; + Union< R > _val; +}; + +template< typename Fn, typename R = typename std::result_of< Fn() >::type > +Lazy< Fn, R > lazy( Fn &&fn ) { return Lazy< Fn, R >( std::forward< Fn >( fn ) ); } + template< template< typename > class C, typename T, typename F > using FMap = C< typename std::result_of< F( T ) >::type >; @@ -955,10 +1079,25 @@ auto operator>=( const A &a, const B &b ) -> typename _OneUnion< A, B >::type } } -namespace brick_test { -namespace types { +// don't catch integral types and classical enum! +template< typename Self, typename = typename + std::enable_if< brick::types::is_enum_class< Self >::value >::type > +constexpr brick::types::StrongEnumFlags< Self > operator|( Self a, Self b ) noexcept { + using Ret = brick::types::StrongEnumFlags< Self >; + return Ret( a ) | Ret( b ); +} -using namespace ::brick::types; +template< typename Self, typename = typename + std::enable_if< brick::types::is_enum_class< Self >::value >::type > +constexpr brick::types::StrongEnumFlags< Self > operator&( Self a, Self b ) noexcept { + using Ret = brick::types::StrongEnumFlags< Self >; + return Ret( a ) & Ret( b ); +} + +namespace brick { +namespace t_types { + +using namespace types; struct Integer : Comparable { @@ -968,23 +1107,53 @@ public: bool operator<=( const Integer& o ) const { return val <= o.val; } }; +struct IntegerEq : Eq { + int val; +public: + IntegerEq(int val) : val(val) {} + bool operator==( const IntegerEq& o ) const { return val == o.val; } +}; + +struct IntegerEqOrd : Ord { + int val; +public: + IntegerEqOrd(int val) : val(val) {} + bool operator==( const IntegerEqOrd& o ) const { return val == o.val; } + bool operator<=( const IntegerEqOrd& o ) const { return val <= o.val; } +}; + +struct IntegerOrd : Ord { + int val; +public: + IntegerOrd(int val) : val(val) {} + bool operator<=( const IntegerOrd& o ) const { return val <= o.val; } +}; + struct Mixins { - TEST(comparable) { - Integer i10(10); - Integer i10a(10); - Integer i20(20); - - ASSERT(i10 <= i10a); - ASSERT(i10a <= i10); - ASSERT(i10 <= i20); - ASSERT(! (i20 <= i10)); + template< typename T > + void eq() { + T i10(10); + T i10a(10); + T i20(20); ASSERT(i10 != i20); ASSERT(!(i10 != i10a)); ASSERT(i10 == i10a); ASSERT(!(i10 == i20)); + } + + template< typename T > + void ord() { + T i10(10); + T i10a(10); + T i20(20); + + ASSERT(i10 <= i10a); + ASSERT(i10a <= i10); + ASSERT(i10 <= i20); + ASSERT(! (i20 <= i10)); ASSERT(i10 < i20); ASSERT(!(i20 < i10)); @@ -1000,6 +1169,25 @@ struct Mixins { ASSERT(! (i10 >= i20)); } + TEST(comparable) { + eq< Integer >(); + ord< Integer >(); + } + + TEST(eq) { + eq< IntegerEq >(); + } + + TEST(ord) { + eq< IntegerOrd >(); + ord< IntegerOrd >(); + } + + TEST(eqord) { + eq< IntegerEqOrd >(); + ord< IntegerEqOrd >(); + } + }; #if __cplusplus >= 201103L @@ -1024,25 +1212,24 @@ struct UnionInstances { struct UnionTest { TEST(basic) { Union< int > u( 1 ); - ASSERT( !!u ); ASSERT( !u.empty() ); ASSERT( u.is< int >() ); ASSERT_EQ( u.get< int >(), 1 ); u = 2; // move - ASSERT( !!u ); + ASSERT( !u.empty() ); ASSERT_EQ( u.get< int >(), 2 ); int i = 3; u = i; // copy - ASSERT( !!u ); + ASSERT( !u.empty() ); ASSERT_EQ( u.get< int >(), 3 ); u = types::Union< int >( 4 ); ASSERT( u.is< int >() ); ASSERT_EQ( u.get< int >(), 4 ); u = types::Union< int >(); - ASSERT( !u ); + ASSERT( u.empty() ); ASSERT( !u.is< int >() ); u = 5; - ASSERT( u ); + ASSERT( !u.empty() ); ASSERT( u.is< int >() ); ASSERT_EQ( u.get< int >(), 5 ); } @@ -1061,12 +1248,12 @@ struct UnionTest { ASSERT( wierd.empty() ); wierd = 2L; - ASSERT( !!wierd ); + ASSERT( !wierd.empty() ); ASSERT( wierd.is< long >() ); ASSERT_EQ( wierd.get< long >(), 2L ); wierd = Move(); - ASSERT( !!wierd ); + ASSERT( !wierd.empty() ); ASSERT( wierd.is< Move >() ); } @@ -1086,8 +1273,9 @@ struct UnionTest { ASSERT( ( Union< B, std::string >{ 1 }.is< B >() ) ); } - static C idC( C c ) { return c; }; - static C constC( B ) { return C( 32 ); }; + static C idC( C c ) { return c; } + static C constC( B ) { return C( 32 ); } + static C refC( C &c ) { return c; } TEST(apply) { Union< B, C > u; @@ -1104,6 +1292,9 @@ struct UnionTest { result = u.match( constC ); ASSERT( result.isNothing() ); + + result = u.match( refC ); + ASSERT_EQ( result.value().x, 12 ); } TEST(eq) { @@ -1153,6 +1344,31 @@ struct UnionTest { ASSERT( v < 2l ); ASSERT( w <= 2l ); } + + struct TrackDtor { + TrackDtor( int *cnt ) : cnt( cnt ) { } + ~TrackDtor() { ++*cnt; } + int *cnt; + }; + + TEST(dtor) { + int cnt = 0; + { + Union< int, TrackDtor > u; + u = TrackDtor( &cnt ); + cnt = 0; + } + ASSERT_EQ( cnt, 1 ); + } + + TEST(assing_dtor) { + int cnt = 0; + Union< int, TrackDtor > u; + u = TrackDtor( &cnt ); + cnt = 0; + u = 1; + ASSERT_EQ( cnt, 1 ); + } }; enum class FA : unsigned char { X = 1, Y = 2, Z = 4 }; @@ -1202,5 +1418,6 @@ struct StrongEnumFlagsTest { } } + #endif // vim: syntax=cpp tabstop=4 shiftwidth=4 expandtab diff --git a/spot/ltsmin/ltsmin.cc b/spot/ltsmin/ltsmin.cc index f439422ca..22956b9d9 100644 --- a/spot/ltsmin/ltsmin.cc +++ b/spot/ltsmin/ltsmin.cc @@ -43,8 +43,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -1285,7 +1285,7 @@ namespace spot cspins_state s = inner->manager->alloc_setup(dst, inner->compressed_, inner->manager->size() * 2); - auto it = inner->map->insert({s}); + auto it = inner->map->insert(s); inner->succ->push_back(*it); if (!it.isnew()) inner->manager->dealloc(s); @@ -1335,7 +1335,7 @@ namespace spot cspins_state s = inner->manager->alloc_setup(dst, inner->compressed_, inner->manager->size() * 2); - auto it = inner->map->insert({s}); + auto it = inner->map->insert(s); inner->succ->push_back(*it); if (!it.isnew()) inner->manager->dealloc(s); @@ -1421,7 +1421,7 @@ namespace spot compress_(compress), cubeset_(visible_aps.size()), selfloopize_(selfloopize), aps_(visible_aps), nb_threads_(nb_threads) { - map_.setSize(2000000); + map_.initialSize(2000000); manager_ = static_cast (::operator new(sizeof(cspins_state_manager) * nb_threads)); inner_ = new inner_callback_parameters[nb_threads_]; diff --git a/spot/mc/ec.hh b/spot/mc/ec.hh index e14ddc149..3dc1db448 100644 --- a/spot/mc/ec.hh +++ b/spot/mc/ec.hh @@ -185,7 +185,7 @@ namespace spot // -example and flush the bfs queue. auto mark = this->twa_->trans_data(front->it_prop, this->tid_).acc_; - if (!acc.has(mark)) + if (!acc.has(mark.id)) { ctrx_element* current = front; while (current != nullptr)