<
classVal,
classBV>
137{
return bool(*
this) ==
bool(ref); }
185{
return(
pos_== it.pos_) && (
sv_== it.sv_); }
189{
return pos_< it.pos_; }
191{
return pos_<= it.pos_; }
193{
return pos_> it.pos_; }
195{
return pos_>= it.pos_; }
285this->flush();
sv_= bi.sv_; bv_null_ = bi.bv_null_;
288this->set_not_null_ = bi.set_not_null_;
296this->flush();
sv_= bi.sv_; bv_null_ = bi.bv_null_;
297this->buffer_.
swap(bi.buffer_);
298this->buf_ptr_ = bi.buf_ptr_;
299this->set_not_null_ = bi.set_not_null_;
364 boolset_not_null_ =
true;
443{
returnthis->
get(idx); }
621 boolset_not_null =
true);
631 boolset_not_null =
true);
659 boolzero_mem =
true)
const;
1041*idx_from = from; *idx_to = to;
return true;
1058 boolset_not_null =
true);
1097 template<
classVal,
classBV>
1108 template<
classVal,
classBV>
1116 template<
classVal,
classBV>
1127 template<
classVal,
classBV>
1133 template<
classVal,
classBV>
1141 template<
classVal,
classBV>
1145 throwstd::range_error(err_msg);
1153 template<
classVal,
classBV>
1156BV::throw_bad_alloc();
1161 template<
classVal,
classBV>
1169 template<
classVal,
classBV>
1177 const unsignedtmp_size = 1024;
1180 while(
i< arr_size)
1182arr_tmp[k++] = this->s2u(
arr[
i++]);
1185import_u(arr_tmp, k,
offset, set_not_null);
1186k = 0;
offset+= tmp_size;
1191import_u(arr_tmp, k,
offset, set_not_null);
1202 template<
classVal,
classBV>
1209throw_range_error(
"sparse_vector range error (import size 0)");
1212 if(offset < this->size_)
1215this->import_u_nocheck(
arr, arr_size,
offset, set_not_null);
1219 template<
classVal,
classBV>
1227 const unsignedbit_capacity =
sizeof(Val)*8;
1228 unsigned charb_list[bit_capacity];
1229 unsignedrow_len[bit_capacity] = {0, };
1232 const unsignedtranspose_window = 256;
1242 for(
i= 0;
i< arr_size; ++
i)
1247 for(
unsignedj = 0; j < bcnt; ++j)
1249 unsignedp = b_list[j];
1250 unsignedrl = row_len[p];
1251tm.
row(p)[rl] = bit_idx;
1254 if(rl == transpose_window)
1258bv = bv_slices[p] = this->get_create_slice(p);
1262bv->import_sorted(
r, rl,
false);
1270 unsignedrows = tm.
rows();
1271 for(
unsignedk = 0; k < rows; ++k)
1273 if(
unsignedrl = row_len[k])
1277bv = this->get_create_slice(k);
1279bv->import_sorted(
row, rl,
false);
1284 if(
i+
offset> this->size_)
1297 template<
classVal,
classBV>
1300 const bvector_type* bv_null = this->get_null_bvector();
1303 boolfound = bv_null->find_reverse(this->size_);
1304this->size_ += found;
1309 template<
classVal,
classBV>
1316 const unsignedtmp_size = 1024;
1319 while(
i< arr_size)
1321arr_tmp[k++] = this->s2u(
arr[
i++]);
1324import_back_u(arr_tmp, k, set_not_null);
1330import_back_u(arr_tmp, k, set_not_null);
1342 template<
classVal,
classBV>
1347this->import_u_nocheck(
arr, arr_size, this->
size(), set_not_null);
1352 template<
classVal,
classBV>
1357 boolzero_mem)
const 1359 returnextract(
arr, dec_size, idx_from, zero_mem);
1364 template<
classVal,
classBV>
1377 arr[0] = this->get(idx[0]);
1383 boolsorted_block =
true;
1401 if(idx[
r] < idx_prev)
1402sorted_block =
false;
1403idx_prev = idx[
r];
1409sorted_block =
false;
1452 unsignedeff_planes = this->effective_slices();
1455 for(
unsignedj = 0; j < eff_planes; ++j)
1457 const bm::word_t* blk = this->bmatr_.get_block(j, i0, j0);
1482 unsignedgap_value = gap_blk[gidx];
1486 for(++k; k <
r; ++k)
1521 ifconstexpr (parent_type::is_signed())
1529 template<
classVal,
classBV>
1534 boolzero_mem)
const 1543 if(end > this->size_)
1558 autoplanes = this->effective_slices();
1561 for(
unsignedj = 0; j < planes; ++j)
1563blk = this->bmatr_.get_block(j, i0, j0);
1566 for(
size_typek = start; k < end; ++k)
1573blk = this->bmatr_.get_block(j, i0, j0);
1594is_set = (blk[nword] & mask0);
1606 ifconstexpr (parent_type::is_signed())
1614 template<
classVal,
classBV>
1619 boolzero_mem)
const 1629 if(end > this->size_)
1632 for(
size_type i= 0;
i< parent_type::value_bits(); ++
i)
1639 typenameBV::enumerator en(bv,
offset);
1640 for(;en.valid(); ++en)
1656 template<
classVal,
classBV>
1665 structsv_decode_visitor_func
1670: arr_(varr), mask_(
mask), sv_off_(off)
1680 for(
unsigned i= 0;
i< bits_size; ++
i)
1681arr_[bits[
i] + base] |= m;
1686 autobase = bv_offset - sv_off_;
1689arr_[
i+ base] |= m;
1707 if(end > this->size_)
1710sv_decode_visitor_func func(
arr, 0,
offset);
1712 autoplanes = this->effective_slices();
1724 ifconstexpr (parent_type::is_signed())
1725u2s_translate(
arr, exported_size);
1726 returnexported_size;
1731 template<
classVal,
classBV>
1737::memcpy(&uv, &
arr[
i],
sizeof(uv));
1738 arr[
i] = parent_type::u2s(uv);
1745 template<
classVal,
classBV>
1749 if(idx >= this->size_)
1750throw_range_error(
"sparse vector range error");
1751 returnthis->get(idx);
1756 template<
classVal,
classBV>
1762 returnget_no_check(
i);
1767 template<
classVal,
classBV>
1773 ifconstexpr (parent_type::is_signed())
1774 returnthis->u2s(uv);
1781 template<
classVal,
classBV>
1789 const unsignedeff_planes = this->effective_slices();
1793 for(
unsignedj = 0; smask && j < eff_planes; j+=4, smask >>= 4)
1807 template<
classVal,
classBV>
1813 const unsignedeff_planes = this->effective_slices();
1815 if(N_bits > eff_planes)
1816N_bits = eff_planes;
1819 for(
unsignedj = 0; j < N_bits; ++j)
1824 bool b= bv->test(idx);
1835 template<
classVal,
classBV>
1839 if(this->is_null(idx))
1848 template<
classVal,
classBV>
1854this->size_ = idx+1;
1855need_clear =
false;
1859set_value(idx, v, need_clear);
1864 template<
classVal,
classBV>
1868this->size_ = idx+1;
1875bv_null->clear_bit_no_check(idx);
1882 template<
classVal,
classBV>
1885set_value(this->size_, v,
false);
1891 template<
classVal,
classBV>
1898this->size_ +=
count;
1903 template<
classVal,
classBV>
1909this->swap_elements(idx1, idx2);
1915 template<
classVal,
classBV>
1920this->size_ = idx+1;
1921set_value(idx, v,
false);
1924insert_value(idx, v);
1929 template<
classVal,
classBV>
1932insert_value_no_null(idx, v);
1933this->insert_null(idx,
true);
1938 template<
classVal,
classBV>
1946 for(;
i<= bsr; ++
i)
1951bv->insert(idx,
true);
1956bv->insert(idx,
false);
1962 unsignedeff_planes = this->effective_slices();
1964 for(;
i< eff_planes; ++
i)
1967bv->insert(idx,
false);
1974 template<
classVal,
classBV>
1978 if(idx >= this->size_)
1980this->erase_column(idx, erase_null);
1981this->size_ -= erase_null;
1987 template<
classVal,
classBV>
1990set_value_no_null(this->size_, v,
false);
1996 template<
classVal,
classBV>
2000set_value_no_null(idx, v, need_clear);
2002bv_null->set_bit_no_check(idx);
2007 template<
classVal,
classBV>
2017 unsignedeff_planes = this->effective_slices();
2019this->bmatr_.clear_slices_range(bsr, eff_planes, idx);
2024 for(
unsignedj = 0; j <= bsr; ++j)
2029bv->set_bit_no_check(idx);
2031 else if(need_clear)
2037 if(
const bm::word_t* blk = this->bmatr_.get_block(j, i0, j0))
2041bv->clear_bit_no_check(idx);
2051 template<
classVal,
classBV>
2054 if(idx >= this->size_)
2056this->size_ = idx+1;
2057set_value_no_null(idx, 1,
false);
2063bv_null->set_bit_no_check(idx);
2068 template<
classVal,
classBV>
2071 ifconstexpr (parent_type::is_signed())
2078set_value_no_null(idx, v,
true);
2081 for(
unsigned i= 0;
i< parent_type::sv_value_slices; ++
i)
2084 if(
boolcarry_over = bv->inc(idx); !carry_over)
2091 template<
classVal,
classBV>
2095set_value_no_null(idx, v + v_prev,
true);
2100 template<
classVal,
classBV>
2103parent_type::clear_all(free_mem);
2108 template<
classVal,
classBV>
2118 template<
classVal,
classBV>
2125parent_type::clear_range(left, right, set_null);
2131 template<
classVal,
classBV>
2136 typenamebvector_type::statistics stbv;
2137parent_type::calc_stat(&stbv);
2147 template<
classVal,
classBV>
2150 typenamebvector_type::optmode opt_mode,
2153 typenamebvector_type::statistics stbv;
2155parent_type::optimize(temp_block, opt_mode,
st? &stbv : 0);
2166 template<
classVal,
classBV>
2169 unsignedstored_slices = this->stored_slices();
2170 for(
unsignedj = 0; j < stored_slices; ++j)
2173bv->optimize_gap_size();
2179 template<
classVal,
classBV>
2184 if(this->size_ < arg_size)
2187 unsignedplanes = (unsigned)this->bmatr_.rows();
2191 for(
unsignedj = 0; j < planes; ++j)
2200join_null_slice(sv);
2206 template<
classVal,
classBV>
2211 if(this->size_ < arg_size)
2214this->merge_matr(sv.
bmatr_);
2216join_null_slice(sv);
2223 template<
classVal,
classBV>
2233bv_null->set_range(0, arg_size-1);
2247 template<
classVal,
classBV>
2256this->copy_range_slices(sv, left, right, slice_null);
2262 template<
classVal,
classBV>
2268this->keep_range_no_check(left, right, slice_null);
2274 template<
classVal,
classBV>
2278 unsignedslices = (unsigned)this->get_bmatrix().rows();
2279 for(
unsignedj = 0; j < slices
; ++j)
2282bv->bit_and(bv_mask);
2288 template<
classVal,
classBV>
2294 return(sv_value >
val) - (sv_value <
val);
2299 template<
classVal,
classBV>
2303 returnparent_type::equal(sv, null_able);
2308 template<
classVal,
classBV>
2313 returnit_type(
this);
2318 template<
classVal,
classBV>
2322this->bmatr_.set_allocator_pool(pool_ptr);
2331 template<
classVal,
classBV>
2333: sv_(0), pos_(
bm::
id_max), buf_ptr_(0)
2338 template<
classVal,
classBV>
2341: sv_(it.sv_), pos_(it.pos_), buf_ptr_(0)
2346 template<
classVal,
classBV>
2350: sv_(sv), buf_ptr_(0)
2358 template<
classVal,
classBV>
2362: sv_(sv), buf_ptr_(0)
2370 template<
classVal,
classBV>
2373pos_ = (!sv_ || pos >= sv_->size()) ?
bm::id_max: pos;
2379 template<
classVal,
classBV>
2385 if(pos_ >= sv_->size())
2401 template<
classVal,
classBV>
2412sv_->extract(buf_ptr_,
n_buf_size, pos_,
true);
2420 template<
classVal,
classBV>
2431 if(++buf_ptr_ < buf_end)
2436 if(pos_ >= sv_->size())
2441 if(buf_ptr_ >= buf_end)
2448 template<
classVal,
classBV>
2451 returnsv_->is_null(pos_);
2460 template<
classVal,
classBV>
2466 template<
classVal,
classBV>
2486 template<
classVal,
classBV>
2489: sv_(bi.sv_), bv_null_(bi.bv_null_), buf_ptr_(0),
2490set_not_null_(bi.set_not_null_),
2491prev_nb_(bi.prev_nb_), opt_mode_(bi.opt_mode_)
2503 template<
classVal,
classBV>
2506: sv_(bi.sv_), bv_null_(bi.bv_null_), buf_ptr_(bi.buf_ptr_),
2507set_not_null_(bi.set_not_null_),
2508prev_nb_(bi.prev_nb_), opt_mode_(bi.opt_mode_)
2510buffer_.swap(bi.buffer_);
2511buf_ptr_ = bi.buf_ptr_;
2516 template<
classVal,
classBV>
2524 template<
classVal,
classBV>
2535this->add_value_no_null(v);
2539bv_null_->set_bit_no_check(sz + buf_idx);
2545 template<
classVal,
classBV>
2566 template<
classVal,
classBV>
2575 template<
classVal,
classBV>
2587sv_->push_back_null(
count);
2593 template<
classVal,
classBV>
2601 template<
classVal,
classBV>
2610sv_->import_back_u(
arr, arr_size,
false);
2615sv_->optimize_block(prev_nb_, opt_mode_);
ncbi::TMaskedQueryRegions mask
basic bit-matrix class and utilities
#define IS_FULL_BLOCK(addr)
#define BM_ASSERT_THROW(x, xerrcode)
#define FULL_BLOCK_FAKE_ADDR
Utilities for bit transposition (internal) (experimental!)
Base class for bit-transposed(bit-sliced) sparse vector construction.
void freeze_matr()
Turn on RO mode.
void resize(size_type new_size, bool set_null)
const bmatrix_type & get_bmatrix() const noexcept
void copy_from(const base_sparse_vector< Val, BV, MAX_SIZE > &bsv)
bvector_type::allocation_policy allocation_policy_type
void sync_ro() noexcept
Sybc read-only state.
bool is_nullable() const noexcept
check if container supports NULL(unassigned) values
bmatrix_type bmatr_
bit-transposed matrix
allocator_type::allocator_pool_type allocator_pool_type
static unsigned_value_type s2u(value_type v) noexcept
Convert signed value type to unsigned representation.
size_type size_
array size
std::make_unsigned< value_type >::type unsigned_value_type
void bit_sub_rows(const bvector_type &bv, bool use_null)
Set SUB (MINUS) operation on all existing bit-slices.
BV::allocator_type allocator_type
Basic dense bit-matrix class.
bool is_ro() const noexcept
return true if matrix is in read-only mode
const bvector_type * row(size_type i) const noexcept
bm::heap_matrix< unsigned char, sizeof(int), 256, typename bvector_type::allocator_type > remap_matrix_type
unused remap matrix type for compatibility with the sparse serializer
size_type get_null_idx() const noexcept
return index of the NULL vector
size_type rows() const noexcept
optmode
Optimization mode Every next level means additional checks (better compression vs time)
allocator_type::allocator_pool_type allocator_pool_type
bvector_size_type size_type
blocks_manager_type::block_idx_type block_idx_type
unsigned char * data() noexcept
Get write access to buffer memory.
void reserve(size_t new_capacity)
reserve new capacity (buffer content preserved)
Rank-Select compressed sparse vector.
Back insert iterator implements buffered insert, faster than generic access assignment.
back_insert_iterator & operator*()
noop
void add_null(size_type count)
add a series of consequitve NULLs (no-value) to the container
bm::byte_buffer< allocator_type > buffer_type
void disable_set_null() noexcept
Reconfigure back inserter not to touch the NULL vector.
bvector_type::allocator_type::allocator_pool_type allocator_pool_type
void operator=(value_type v)
push value to the vector
bvector_type::block_idx_type block_idx_type
void operator=(const back_insert_iterator &bi)
back_insert_iterator & operator++()
noop
bvector_type * get_null_bvect() const noexcept
Get access to not-null vector.
back_insert_iterator & operator++(int)
noop
sparse_vector_type::unsigned_value_type unsigned_value_type
void add_null()
add NULL (no-value) to the container
sparse_vector_type::size_type size_type
bool flush()
flush the accumulated buffer
bvector_type * bv_null_
!< pointer on the parent vector
back_insert_iterator(const back_insert_iterator &bi)
void add(value_type v)
add value to the container
bool empty() const
return true if insertion buffer is empty
sparse_vector_type * sparse_vector_type_ptr
unsigned_value_type * buf_ptr_
!< not NULL vector pointer
back_insert_iterator(sparse_vector_type *sv)
bvector_type::allocator_type allocator_type
void add_value_no_null(value_type v)
add value to the buffer without changing the NULL vector
sparse_vector< Val, BV > sparse_vector_type
back_insert_iterator(back_insert_iterator &&bi) noexcept
move constructor
std::output_iterator_tag iterator_category
sparse_vector_type::value_type value_type
bm::sparse_vector< Val, BV > * sv_
!< value buffer
sparse_vector_type::bvector_type bvector_type
Const iterator to traverse the sparse vector.
bool operator==(const const_iterator &it) const noexcept
std::input_iterator_tag iterator_category
const sparse_vector_type * sv_
const_iterator() noexcept
void invalidate() noexcept
Invalidate current iterator.
bvector_type::allocator_type allocator_type
sparse_vector< Val, BV > sparse_vector_type
void go_to(size_type pos) noexcept
re-position to a specified position
size_type pos() const noexcept
Current position (index) in the vector.
value_type operator*() const
Get current position (value)
sparse_vector_type::bvector_type bvector_type
buffer_type buffer_
!< Position
bool operator!=(const const_iterator &it) const noexcept
bool is_null() const noexcept
Get NULL status.
bool advance() noexcept
advance iterator forward by one
bool operator>(const const_iterator &it) const noexcept
bool operator<=(const const_iterator &it) const noexcept
void skip_zero_values() noexcept
const_iterator operator++(int)
Advance to the next available value.
bool valid() const noexcept
Returns true if iterator is at a valid position.
bool operator>=(const const_iterator &it) const noexcept
bm::byte_buffer< allocator_type > buffer_type
value_type value() const
Get current position (value)
bool operator<(const const_iterator &it) const noexcept
sparse_vector_type::size_type size_type
const_iterator & operator++() noexcept
Advance to the next available value.
sparse_vector_type * sparse_vector_type_ptr
sparse_vector_type::value_type value_type
bvector_type::allocator_type::allocator_pool_type allocator_pool_type
value_type * buf_ptr_
!< value buffer
size_type pos_
!< ptr to parent
Reference class to access elements via common [] operator.
bool is_null() const noexcept
reference & operator=(const reference &ref)
reference & operator=(value_type val)
reference(sparse_vector< Val, BV > &sv, size_type idx) noexcept
bool operator==(const reference &ref) const noexcept
sparse_vector< Val, BV > & sv_
sparse vector de-serializer
algorithms for sparse_vector scan/search
Serialize sparse vector into a memory buffer(s) structure.
succinct sparse vector with runtime compression using bit-slicing / transposition method
void inc(size_type idx)
increment specified element by one
unsigned_value_type get_unsigned(size_type idx) const noexcept
get raw unsigned value
static void u2s_translate(value_type *arr, size_type sz) noexcept
bool empty() const noexcept
return true if vector is empty
int compare(size_type idx, const value_type val) const noexcept
Compare vector element with argument.
bvector_type::size_type size_type
void set_value(size_type idx, value_type v, bool need_clear)
set value without checking boundaries
void set_remap() noexcept
value_type at(size_type idx) const
access specified element with bounds checking
void set_null(size_type idx)
set specified element to unassigned value (NULL)
void set_value_no_null(size_type idx, value_type v, bool need_clear)
set value without checking boundaries or support of NULL
static constexpr bool is_str() noexcept
unsigned_value_type get_unsigned_bits(size_type idx, size_type N_bits) const noexcept
Get raw unsigned value first N bits.
value_type get_no_check(size_type idx) const noexcept
get specified element without checking boundary conditions
allocator_type::allocator_pool_type allocator_pool_type
void insert_value_no_null(size_type idx, value_type v)
insert value without checking boundaries or support of NULL
const_iterator begin() const noexcept
Provide const iterator access to container content.
const value_type & const_reference
void sync_size() noexcept
recalculate size to exclude tail NULL elements After this call size() will return the true size of th...
void push_back(value_type v)
push value back into vector
remap_matrix_type * get_remap_matrix()
void push_back_no_null(value_type v)
push value back into vector without NULL semantics
void insert_value(size_type idx, value_type v)
insert value without checking boundaries
bvector_type * bvector_type_ptr
bool equal(const sparse_vector< Val, BV > &sv, bm::null_support null_able=bm::use_null) const noexcept
check if another sparse vector has the same content and size
void optimize_gap_size()
Optimize sizes of GAP blocks.
sparse_vector(const sparse_vector< Val, BV > &sv)
sparse_vector(sparse_vector< Val, BV > &&sv) noexcept
sparse_vector< Val, BV > & operator=(const sparse_vector< Val, BV > &sv)
value_type get(size_type idx) const noexcept
get specified element without bounds checking
bvector_type::enumerator bvector_enumerator_type
void set(size_type idx, value_type v)
set specified element with bounds checking and automatic resize
static bool find_rank(size_type rank, size_type &pos) noexcept
find position of compressed element by its rank
void resize(size_type sz)
resize vector
void clear(size_type idx, bool set_null)
clear specified element with bounds checking and automatic resize
static void throw_range_error(const char *err_msg)
throw range error
back_insert_iterator get_back_inserter()
Provide back insert iterator Back insert iterator implements buffered insertion, which is faster,...
void resize_internal(size_type sz, bool set_null=true)
void inc_no_null(size_type idx, value_type v)
increment by v without chnaging NULL vector or size
void swap(size_type idx1, size_type idx2)
swap two vector elements between each other
friend back_insert_iterator
bvector_type::block_idx_type block_idx_type
void import_u_nocheck(const unsigned_value_type *arr, size_type arr_size, size_type offset, bool set_not_null)
void inc_no_null(size_type idx)
Increment element by 1 without chnaging NULL vector or size.
size_type gather(value_type *arr, const size_type *idx, size_type size, bm::sort_order sorted_idx) const
Gather elements to a C-style array.
const_iterator end() const noexcept
Provide const iterator access to the end.
size_t remap_size() const noexcept
unsigned char * init_remap_buffer() noexcept
void calc_stat(struct sparse_vector< Val, BV >::statistics *st) const noexcept
Calculates memory statistics.
void push_back_null(size_type count)
push back specified amount of NULL values
void clear() noexcept
resize to zero, free memory
sparse_vector< Val, BV > & merge(sparse_vector< Val, BV > &sv)
merge with another sparse vector using OR operation Merge is different from join(),...
parent_type::bmatrix_type::remap_matrix_type remap_matrix_type
unused remap matrix type for compatibility with the sparse serializer
void freeze()
Turn sparse vector into immutable mode Read-only (immutable) vector uses less memory and allows faste...
size_type effective_size() const noexcept
size of sparse vector (may be different for RSC)
void set_null(const bvector_type &bv_idx)
Set NULL all elements set as 1 in the argument vector.
sparse_vector< Val, BV > & join(const sparse_vector< Val, BV > &sv)
join all with another sparse vector using OR operation
void set_allocator_pool(allocator_pool_type *pool_ptr) noexcept
Set allocator pool for local (non-threaded) memory cyclic(lots of alloc-free ops) opertations.
~sparse_vector() noexcept
const unsigned char * get_remap_buffer() const noexcept
void insert(size_type idx, value_type v)
insert specified element into container
void import_back(const value_type *arr, size_type arr_size, bool set_not_null=true)
Import list of elements from a C-style array (pushed back)
static constexpr bool is_compressed() noexcept
various type traits
size_type extract(value_type *arr, size_type size, size_type offset=0, bool zero_mem=true) const
Bulk export list of elements to a C-style array.
base_sparse_vector< Val, BV, 1 > parent_type
size_type extract_range(value_type *arr, size_type size, size_type offset, bool zero_mem=true) const
extract small window without use of masking vector
static size_type translate_address(size_type i) noexcept
address translation for this type of container
size_type extract_planes(value_type *arr, size_type size, size_type offset, bool zero_mem=true) const
extract medium window without use of masking vector
void swap(sparse_vector< Val, BV > &sv) noexcept
content exchange
static void throw_bad_alloc()
throw bad alloc
constexpr bool is_remap() const noexcept
void keep_range(size_type left, size_type right, bm::null_support slice_null=bm::use_null)
Keep only specified interval in the sparse vector, clear all other elements.
void clear_all(bool free_mem, unsigned) noexcept
resize to zero, free memory
void import_u(const unsigned_value_type *arr, size_type arr_size, size_type offset, bool set_not_null)
Import list of elements from a C-style array.
void join_null_slice(const sparse_vector< Val, BV > &sv)
BV::allocator_type allocator_type
bool try_get(size_type idx, value_type &v) const noexcept
get specified element with NOT NULL check
sparse_vector(bm::null_support null_able=bm::no_null, allocation_policy_type ap=allocation_policy_type(), size_type bv_max_size=bm::id_max, const allocator_type &alloc=allocator_type())
Sparse vector constructor.
void push_back_null()
push back NULL value
sparse_vector< Val, BV > & clear_range(size_type left, size_type right, bool set_null=false)
clear range (assign bit 0 for all planes)
void erase(size_type idx, bool erase_null=true)
erase specified element from container
size_type decode(value_type *arr, size_type idx_from, size_type dec_size, bool zero_mem=true) const
Bulk export list of elements to a C-style array.
size_type effective_vector_max() const noexcept
Always 1 (non-matrix type)
bool is_ro() const noexcept
Returns true if vector is in read-only mode.
const_iterator get_const_iterator(size_type idx) const noexcept
Get const_itertor re-positioned to specific element.
const remap_matrix_type * get_remap_matrix() const
bm::basic_bmatrix< BV > bmatrix_type
value_type operator[](size_type idx) const noexcept
get specified element without bounds checking
void clear(const bvector_type &bv_idx)
Set vector elements spcified by argument bit-vector to zero Note that set to 0 elements are NOT going...
size_type size_internal() const noexcept
void copy_range(const sparse_vector< Val, BV > &sv, size_type left, size_type right, bm::null_support slice_null=bm::use_null)
copy range of values from another sparse vector
parent_type::unsigned_value_type unsigned_value_type
bvector_type::allocation_policy allocation_policy_type
void import(const value_type *arr, size_type arr_size, size_type offset=0, bool set_not_null=true)
Import list of elements from a C-style array.
void import_back_u(const unsigned_value_type *arr, size_type arr_size, bool set_not_null=true)
Import list of elements from a C-style array (pushed back)
void filter(const bvector_type &bv_mask)
Apply value filter, defined by mask vector.
const bvector_type * bvector_type_const_ptr
void sync(bool, bool)
syncronize internal structures, build fast access index
size_type size() const noexcept
return size of the vector
void optimize(bm::word_t *temp_block=0, typename bvector_type::optmode opt_mode=bvector_type::opt_compress, typename sparse_vector< Val, BV >::statistics *stat=0)
run memory optimization for all vector planes
bool resolve_range(size_type from, size_type to, size_type *idx_from, size_type *idx_to) const noexcept
static vector< string > arr
void swap(NCBI_NS_NCBI::pair_base_member< T1, T2 > &pair1, NCBI_NS_NCBI::pair_base_member< T1, T2 > &pair2)
void bit_block_gather_scatter(TRGW *arr, const bm::word_t *blk, const IDX *idx, SZ size, SZ start, unsigned bit_idx) noexcept
bit index to word gather-scatter algorithm
unsigned short bitscan(V w, B *bits) noexcept
Templated Bitscan with dynamic dispatch for best type.
unsigned bit_scan_reverse(T value) noexcept
sort_order
Sort order declaration.
null_support
NULL-able value support.
@ BM_UNSORTED
input set is NOT sorted
@ BM_SORTED
input set is sorted (ascending order)
@ BM_UNKNOWN
sort order unknown
@ BM_SORTED_UNIFORM
sorted and in one block (internal!)
@ use_null
support "non-assigned" or "NULL" logic
@ no_null
do not support NULL values
unsigned gap_test_unr(const T *buf, const unsigned pos) noexcept
Tests if bit = pos is true. Analog of bm::gap_test with SIMD unrolling.
void xor_swap(W &x, W &y) noexcept
XOR swap two variables.
int for_each_bit_range_no_check(const BV &bv, typename BV::size_type left, typename BV::size_type right, Func &bit_functor)
Implementation of for_each_bit_range without boilerplave checks.
const unsigned set_block_mask
void get_block_coord(BI_TYPE nb, unsigned &i, unsigned &j) noexcept
Recalc linear bvector block index into 2D matrix coordinates.
bm::id64_t idx_arr_block_lookup_u64(const bm::id64_t *idx, bm::id64_t size, bm::id64_t nb, bm::id64_t start) noexcept
block boundaries look ahead U32
unsigned idx_arr_block_lookup_u32(const unsigned *idx, unsigned size, unsigned nb, unsigned start) noexcept
block boundaries look ahead U32
const unsigned set_word_shift
unsigned gap_bfind(const T *buf, unsigned pos, unsigned *is_set) noexcept
unsigned short gap_word_t
const unsigned set_block_shift
const unsigned set_word_mask
double value_type
The numeric datatype used by the parser.
const struct ncbi::grid::netcache::search::fields::SIZE size
void resize(vector< SMethodDef > &container)
const GenericPointer< typename T::ValueType > T2 value
double r(size_t dimension_, const Int4 *score_, const double *prob_, double theta_)
static SLJIT_INLINE sljit_ins st(sljit_gpr r, sljit_s32 d, sljit_gpr x, sljit_gpr b)
#define row(bind, expected)
Structure with statistical information about memory allocation footprint, serialization projection,...
Mini-matrix for bit transposition purposes.
static unsigned rows() noexcept
const T * row(unsigned row_idx) const noexcept
RetroSearch is an open source project built by @garambo | Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4